This is the mail archive of the gdb-patches@sourceware.org mailing list for the GDB project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

Re: [gdbserver] x86 agent expression bytecode compiler (speed up conditional tracepoints)



This is way cool, but at some point I've gotta remark --


gdbserver has sure come a long way from the simple remote debug agent
that it started out to be.  Is anybody at all worried about the ever
increasing complexification of gdbserver?

Pedro Alves wrote:
This adds a bytecode compiler to the linux x86 ports of
gdbserver, that turns agent expression bytecodes into
native code.  This greatly speeds up conditional tracepoints
(by around an order of magnitude) that usually don't reference much
more than a few variables (memory/registers), to around 100ns or
less on a typical modern x86 box.

--
Pedro Alves

2010-06-07  Stan Shebs  <stan@codesourcery.com>
            Pedro Alves  <pedro@codesourcery.com>

Bytecode compiler.

        * linux-x86-low.c: Include limits.h.
        (add_insns): New.
        (always_true): New.
        (EMIT_ASM): New.
        (EMIT_ASM32): New.
        (amd64_emit_prologue, amd64_emit_epilogue, amd64_emit_add)
        (amd64_emit_sub, amd64_emit_mul, amd64_emit_lsh)
        (amd64_emit_rsh_signed, amd64_emit_rsh_unsigned, amd64_emit_ext,
        (amd64_emit_log_not, amd64_emit_bit_and, amd64_emit_bit_or)
        (amd64_emit_bit_xor, amd64_emit_bit_not, amd64_emit_equal,
        (amd64_emit_less_signed, amd64_emit_less_unsigned, amd64_emit_ref,
        (amd64_emit_if_goto, amd64_emit_goto, amd64_write_goto_address)
        (amd64_emit_const, amd64_emit_call, amd64_emit_reg)
        (amd64_emit_pop, amd64_emit_stack_flush, amd64_emit_zero_ext)
        (amd64_emit_swap, amd64_emit_stack_adjust, amd64_emit_int_call_1)
        (amd64_emit_void_call_2): New.
        (amd64_emit_ops): New.
        (i386_emit_prologue, i386_emit_epilogue, i386_emit_add)
        (i386_emit_sub,i386_emit_mul, i386_emit_lsh, i386_emit_rsh_signed)
        (i386_emit_rsh_unsigned, i386_emit_ext, i386_emit_log_not)
        (i386_emit_bit_and, i386_emit_bit_or, i386_emit_bit_xor)
        (i386_emit_bit_not, i386_emit_equal, i386_emit_less_signed)
        (i386_emit_less_unsigned, i386_emit_ref, i386_emit_if_goto)
        (i386_emit_goto, i386_write_goto_address, i386_emit_const)
        (i386_emit_call, i386_emit_reg, i386_emit_pop)
        (i386_emit_stack_flush, i386_emit_zero_ext, i386_emit_swap)
        (i386_emit_stack_adjust, i386_emit_int_call_1)
        (i386_emit_void_call_2): New.
        (i386_emit_ops): New.
        (x86_emit_ops): New.
        (the_low_target): Install x86_emit_ops.
        * server.h (struct emit_ops): New.
        (get_raw_reg_func_addr): Declare.
        (current_insn_ptr, emit_error): Declare.
        * tracepoint.c (get_raw_reg, get_trace_state_variable_value)
        (set_trace_state_variable_value): New defines.
        (struct ipa_sym_addresses): New fields addr_get_raw_reg,
        addr_get_trace_state_variable_value and
        addr_set_trace_state_variable_value.
        (symbol_list): New fields for get_raw_reg,
        get_trace_state_variable_value and set_trace_state_variable_value.
        (condfn): New typedef.
        (struct tracepoint): New field `compiled_cond'.
        (do_action_at_tracepoint): Clear compiled_cond.
        (get_trace_state_variable_value, set_trace_state_variable_value):
        Export in the IPA.
        (condition_true_at_tracepoint): If there's a compiled condition,
        run that.
        (current_insn_ptr, emit_error): New globals.
        (struct bytecode_address): New.
        (get_raw_reg_func_addr): New.
        (emit_prologue, emit_epilogue, emit_add, emit_sub, emit_mul)
        (emit_lsh, emit_rsh_signed, emit_rsh_unsigned, emit_ext)
        (emit_log_not, emit_bit_and, emit_bit_or, emit_bit_xor)
        (emit_bit_not, emit_equal, emit_less_signed, emit_less_unsigned)
        (emit_ref, emit_if_goto, emit_goto, write_goto_address, emit_const)
        (emit_reg, emit_pop, emit_stack_flush, emit_zero_ext, emit_swap)
        (emit_stack_adjust, emit_int_call_1, emit_void_call_2): New.
        (compile_tracepoint_condition, compile_bytecodes): New.
        * target.h (emit_ops): Forward declare.
        (struct target_ops): New field emit_ops.
        (target_emit_ops): New.
        * linux-amd64-ipa.c (gdb_agent_get_raw_reg): New.
        * linux-i386-ipa.c (gdb_agent_get_raw_reg): New.
        * linux-low.c (linux_emit_ops): New.
        (linux_target_ops): Install it.
        * linux-low.h (struct linux_target_ops): New field emit_ops.

---
 gdb/gdbserver/linux-amd64-ipa.c |    9
 gdb/gdbserver/linux-i386-ipa.c  |   15
 gdb/gdbserver/linux-low.c       |   12
 gdb/gdbserver/linux-low.h       |    4
 gdb/gdbserver/linux-x86-low.c   | 1032 +++++++++++++++++++++++++++++++++++++++-
 gdb/gdbserver/server.h          |   50 +
 gdb/gdbserver/target.h          |    8
 gdb/gdbserver/tracepoint.c      |  624 ++++++++++++++++++++++++
 8 files changed, 1749 insertions(+), 5 deletions(-)

Index: src/gdb/gdbserver/linux-x86-low.c
===================================================================
--- src.orig/gdb/gdbserver/linux-x86-low.c      2010-06-07 16:13:07.000000000 +0100
+++ src/gdb/gdbserver/linux-x86-low.c   2010-06-07 16:43:25.000000000 +0100
@@ -20,6 +20,7 @@

 #include <stddef.h>
 #include <signal.h>
+#include <limits.h>
 #include "server.h"
 #include "linux-low.h"
 #include "i387-fp.h"
@@ -1470,6 +1471,1034 @@ x86_install_fast_tracepoint_jump_pad (CO
                                                adjusted_insn_addr_end);
 }

+static void
+add_insns (unsigned char *start, int len)
+{
+  CORE_ADDR buildaddr = current_insn_ptr;
+
+  if (debug_threads)
+    fprintf (stderr, "Adding %d bytes of insn at %s\n",
+            len, paddress (buildaddr));
+
+  append_insns (&buildaddr, len, start);
+  current_insn_ptr = buildaddr;
+}
+
+/* A function used to trick optimizers.  */
+
+int
+always_true (void)
+{
+  return 1;
+}
+
+/* Our general strategy for emitting code is to avoid specifying raw
+   bytes whenever possible, and instead copy a block of inline asm
+   that is embedded in the function.  This is a little messy, because
+   we need to keep the compiler from discarding what looks like dead
+   code, plus suppress various warnings.  */
+
+#define EMIT_ASM(NAME,INSNS)                                           \
+  { extern unsigned char start_ ## NAME, end_ ## NAME;                 \
+    add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME);      \
+    if (always_true ())                                                \
+      goto skipover ## NAME;                                           \
+    __asm__ ("start_" #NAME ":\n\t" INSNS "\n\tend_" #NAME ":\n\t");   \
+    skipover ## NAME:                                                  \
+    ; }
+
+
+#ifdef __x86_64__
+
+#define EMIT_ASM32(NAME,INSNS)                                         \
+  { extern unsigned char start_ ## NAME, end_ ## NAME;                 \
+    add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME);      \
+    if (always_true ())                                                \
+      goto skipover ## NAME;                                           \
+    __asm__ (".code32\n\tstart_" #NAME ":\n\t" INSNS "\n\tend_" #NAME ":\n" \
+            "\t.code64\n\t");                                          \
+    skipover ## NAME:                                                  \
+    ; }
+
+#else
+
+#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
+
+#endif
+
+#ifdef __x86_64__
+
+static void
+amd64_emit_prologue (void)
+{
+  EMIT_ASM (amd64_prologue,
+           "pushq %rbp\n\t"
+           "movq %rsp,%rbp\n\t"
+           "sub $0x20,%rsp\n\t"
+           "movq %rdi,-8(%rbp)\n\t"
+           "movq %rsi,-16(%rbp)");
+}
+
+
+static void
+amd64_emit_epilogue (void)
+{
+  EMIT_ASM (amd64_epilogue,
+           "movq -16(%rbp),%rdi\n\t"
+           "movq %rax,(%rdi)\n\t"
+           "xor %rax,%rax\n\t"
+           "leave\n\t"
+           "ret");
+}
+
+static void
+amd64_emit_add (void)
+{
+  EMIT_ASM (amd64_add,
+           "add (%rsp),%rax\n\t"
+           "lea 0x8(%rsp),%rsp");
+}
+
+static void
+amd64_emit_sub (void)
+{
+  EMIT_ASM (amd64_sub,
+           "sub %rax,(%rsp)\n\t"
+           "pop %rax");
+}
+
+static void
+amd64_emit_mul (void)
+{
+  emit_error = 1;
+}
+
+static void
+amd64_emit_lsh (void)
+{
+  emit_error = 1;
+}
+
+static void
+amd64_emit_rsh_signed (void)
+{
+  emit_error = 1;
+}
+
+static void
+amd64_emit_rsh_unsigned (void)
+{
+  emit_error = 1;
+}
+
+static void
+amd64_emit_ext (int arg)
+{
+  switch (arg)
+    {
+    case 8:
+      EMIT_ASM (amd64_ext_8,
+               "cbtw\n\t"
+               "cwtl\n\t"
+               "cltq");
+      break;
+    case 16:
+      EMIT_ASM (amd64_ext_16,
+               "cwtl\n\t"
+               "cltq");
+      break;
+    case 32:
+      EMIT_ASM (amd64_ext_32,
+               "cltq");
+      break;
+    default:
+      emit_error = 1;
+    }
+}
+
+static void
+amd64_emit_log_not (void)
+{
+  EMIT_ASM (amd64_log_not,
+           "test %rax,%rax\n\t"
+           "sete %cl\n\t"
+           "movzbq %cl,%rax");
+}
+
+static void
+amd64_emit_bit_and (void)
+{
+  EMIT_ASM (amd64_and,
+           "and (%rsp),%rax\n\t"
+           "lea 0x8(%rsp),%rsp");
+}
+
+static void
+amd64_emit_bit_or (void)
+{
+  EMIT_ASM (amd64_or,
+           "or (%rsp),%rax\n\t"
+           "lea 0x8(%rsp),%rsp");
+}
+
+static void
+amd64_emit_bit_xor (void)
+{
+  EMIT_ASM (amd64_xor,
+           "xor (%rsp),%rax\n\t"
+           "lea 0x8(%rsp),%rsp");
+}
+
+static void
+amd64_emit_bit_not (void)
+{
+  EMIT_ASM (amd64_bit_not,
+           "xorq $0xffffffffffffffff,%rax");
+}
+
+static void
+amd64_emit_equal (void)
+{
+  EMIT_ASM (amd64_equal,
+           "cmp %rax,(%rsp)\n\t"
+           "je .Lamd64_equal_true\n\t"
+           "xor %rax,%rax\n\t"
+           "jmp .Lamd64_equal_end\n\t"
+           ".Lamd64_equal_true:\n\t"
+           "mov $0x1,%rax\n\t"
+           ".Lamd64_equal_end:\n\t"
+           "lea 0x8(%rsp),%rsp");
+}
+
+static void
+amd64_emit_less_signed (void)
+{
+  EMIT_ASM (amd64_less_signed,
+           "cmp %rax,(%rsp)\n\t"
+           "jl .Lamd64_less_signed_true\n\t"
+           "xor %rax,%rax\n\t"
+           "jmp .Lamd64_less_signed_end\n\t"
+           ".Lamd64_less_signed_true:\n\t"
+           "mov $1,%rax\n\t"
+           ".Lamd64_less_signed_end:\n\t"
+           "lea 0x8(%rsp),%rsp");
+}
+
+static void
+amd64_emit_less_unsigned (void)
+{
+  EMIT_ASM (amd64_less_unsigned,
+           "cmp %rax,(%rsp)\n\t"
+           "jb .Lamd64_less_unsigned_true\n\t"
+           "xor %rax,%rax\n\t"
+           "jmp .Lamd64_less_unsigned_end\n\t"
+           ".Lamd64_less_unsigned_true:\n\t"
+           "mov $1,%rax\n\t"
+           ".Lamd64_less_unsigned_end:\n\t"
+           "lea 0x8(%rsp),%rsp");
+}
+
+static void
+amd64_emit_ref (int size)
+{
+  switch (size)
+    {
+    case 1:
+      EMIT_ASM (amd64_ref1,
+               "movb (%rax),%al");
+      break;
+    case 2:
+      EMIT_ASM (amd64_ref2,
+               "movw (%rax),%ax");
+      break;
+    case 4:
+      EMIT_ASM (amd64_ref4,
+               "movl (%rax),%eax");
+      break;
+    case 8:
+      EMIT_ASM (amd64_ref8,
+               "movq (%rax),%rax");
+      break;
+    }
+}
+
+static void
+amd64_emit_if_goto (int *offset_p, int *size_p)
+{
+  EMIT_ASM (amd64_if_goto,
+           "mov %rax,%rcx\n\t"
+           "pop %rax\n\t"
+           "cmp $0,%rcx\n\t"
+           ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
+  if (offset_p)
+    *offset_p = 10;
+  if (size_p)
+    *size_p = 4;
+}
+
+static void
+amd64_emit_goto (int *offset_p, int *size_p)
+{
+  EMIT_ASM (amd64_goto,
+           ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
+  if (offset_p)
+    *offset_p = 1;
+  if (size_p)
+    *size_p = 4;
+}
+
+static void
+amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
+{
+  int diff = (to - (from + size));
+  unsigned char buf[sizeof (int)];
+
+  if (size != 4)
+    {
+      emit_error = 1;
+      return;
+    }
+
+  memcpy (buf, &diff, sizeof (int));
+  write_inferior_memory (from, buf, sizeof (int));
+}
+
+static void
+amd64_emit_const (int64_t num)
+{
+  unsigned char buf[16];
+  int i;
+  CORE_ADDR buildaddr = current_insn_ptr;
+
+  i = 0;
+  buf[i++] = 0x48;  buf[i++] = 0xb8; /* mov $<n>,%rax */
+  *((int64_t *) (&buf[i])) = num;
+  i += 8;
+  append_insns (&buildaddr, i, buf);
+  current_insn_ptr = buildaddr;
+}
+
+static void
+amd64_emit_call (CORE_ADDR fn)
+{
+  unsigned char buf[16];
+  int i;
+  CORE_ADDR buildaddr;
+  int64_t offset64;
+
+  /* The destination function being in the shared library, may be
+     >31-bits away off the compiled code pad.  */
+
+  buildaddr = current_insn_ptr;
+
+  offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
+
+  i = 0;
+
+  if (offset64 > INT_MAX || offset64 < INT_MIN)
+    {
+      /* Offset is too large for a call.  Use callq, but that requires
+        a register, so avoid it if possible.  Use r10, since it is
+        call-clobbered, we don't have to push/pop it.  */
+      buf[i++] = 0x48; /* mov $fn,%r10 */
+      buf[i++] = 0xba;
+      memcpy (buf + i, &fn, 8);
+      i += 8;
+      buf[i++] = 0xff; /* callq *%r10 */
+      buf[i++] = 0xd2;
+    }
+  else
+    {
+      int offset32 = offset64; /* we know we can't overflow here.  */
+      memcpy (buf + i, &offset32, 4);
+      i += 4;
+    }
+
+  append_insns (&buildaddr, i, buf);
+  current_insn_ptr = buildaddr;
+}
+
+static void
+amd64_emit_reg (int reg)
+{
+  unsigned char buf[16];
+  int i;
+  CORE_ADDR buildaddr;
+
+  /* Assume raw_regs is still in %rdi.  */
+  buildaddr = current_insn_ptr;
+  i = 0;
+  buf[i++] = 0xbe; /* mov $<n>,%esi */
+  *((int *) (&buf[i])) = reg;
+  i += 4;
+  append_insns (&buildaddr, i, buf);
+  current_insn_ptr = buildaddr;
+  amd64_emit_call (get_raw_reg_func_addr ());
+}
+
+static void
+amd64_emit_pop (void)
+{
+  EMIT_ASM (amd64_pop,
+           "pop %rax");
+}
+
+static void
+amd64_emit_stack_flush (void)
+{
+  EMIT_ASM (amd64_stack_flush,
+           "push %rax");
+}
+
+static void
+amd64_emit_zero_ext (int arg)
+{
+  switch (arg)
+    {
+    case 8:
+      EMIT_ASM (amd64_zero_ext_8,
+               "and $0xff,%rax");
+      break;
+    case 16:
+      EMIT_ASM (amd64_zero_ext_16,
+               "and $0xffff,%rax");
+      break;
+    case 32:
+      EMIT_ASM (amd64_zero_ext_32,
+               "mov $0xffffffff,%rcx\n\t"
+               "and %rcx,%rax");
+      break;
+    default:
+      emit_error = 1;
+    }
+}
+
+static void
+amd64_emit_swap (void)
+{
+  EMIT_ASM (amd64_swap,
+           "mov %rax,%rcx\n\t"
+           "pop %rax\n\t"
+           "push %rcx");
+}
+
+static void
+amd64_emit_stack_adjust (int n)
+{
+  unsigned char buf[16];
+  int i;
+  CORE_ADDR buildaddr = current_insn_ptr;
+
+  i = 0;
+  buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
+  buf[i++] = 0x8d;
+  buf[i++] = 0x64;
+  buf[i++] = 0x24;
+  /* This only handles adjustments up to 16, but we don't expect any more.  */
+  buf[i++] = n * 8;
+  append_insns (&buildaddr, i, buf);
+  current_insn_ptr = buildaddr;
+}
+
+/* FN's prototype is `LONGEST(*fn)(int)'.  */
+
+static void
+amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
+{
+  unsigned char buf[16];
+  int i;
+  CORE_ADDR buildaddr;
+
+  buildaddr = current_insn_ptr;
+  i = 0;
+  buf[i++] = 0xbf; /* movl $<n>,%edi */
+  *((int *) (&buf[i])) = arg1;
+  i += 4;
+  append_insns (&buildaddr, i, buf);
+  current_insn_ptr = buildaddr;
+  amd64_emit_call (fn);
+}
+
+/* FN's prototype is `void(*fn)(int,int64_t)'.  */
+
+static void
+amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
+{
+  unsigned char buf[16];
+  int i;
+  CORE_ADDR buildaddr;
+
+  buildaddr = current_insn_ptr;
+  i = 0;
+  buf[i++] = 0xbf; /* movl $<n>,%edi */
+  *((int *) (&buf[i])) = arg1;
+  i += 4;
+  append_insns (&buildaddr, i, buf);
+  current_insn_ptr = buildaddr;
+  EMIT_ASM (amd64_void_call_2_a,
+           /* Save away a copy of the stack top.  */
+           "push %rax\n\t"
+           /* Also pass top as the second argument.  */
+           "mov %rax,%rsi");
+  amd64_emit_call (fn);
+  EMIT_ASM (amd64_void_call_2_b,
+           /* Restore the stack top, %rax may have been trashed.  */
+           "pop %rax");
+}
+
+struct emit_ops amd64_emit_ops =
+  {
+    amd64_emit_prologue,
+    amd64_emit_epilogue,
+    amd64_emit_add,
+    amd64_emit_sub,
+    amd64_emit_mul,
+    amd64_emit_lsh,
+    amd64_emit_rsh_signed,
+    amd64_emit_rsh_unsigned,
+    amd64_emit_ext,
+    amd64_emit_log_not,
+    amd64_emit_bit_and,
+    amd64_emit_bit_or,
+    amd64_emit_bit_xor,
+    amd64_emit_bit_not,
+    amd64_emit_equal,
+    amd64_emit_less_signed,
+    amd64_emit_less_unsigned,
+    amd64_emit_ref,
+    amd64_emit_if_goto,
+    amd64_emit_goto,
+    amd64_write_goto_address,
+    amd64_emit_const,
+    amd64_emit_call,
+    amd64_emit_reg,
+    amd64_emit_pop,
+    amd64_emit_stack_flush,
+    amd64_emit_zero_ext,
+    amd64_emit_swap,
+    amd64_emit_stack_adjust,
+    amd64_emit_int_call_1,
+    amd64_emit_void_call_2
+  };
+
+#endif /* __x86_64__ */
+
+static void
+i386_emit_prologue (void)
+{
+  EMIT_ASM32 (i386_prologue,
+           "push %ebp\n\t"
+           "mov %esp,%ebp");
+  /* At this point, the raw regs base address is at 8(%ebp), and the
+     value pointer is at 12(%ebp).  */
+}
+
+static void
+i386_emit_epilogue (void)
+{
+  EMIT_ASM32 (i386_epilogue,
+           "mov 12(%ebp),%ecx\n\t"
+           "mov %eax,(%ecx)\n\t"
+           "mov %ebx,0x4(%ecx)\n\t"
+           "xor %eax,%eax\n\t"
+           "pop %ebp\n\t"
+           "ret");
+}
+
+static void
+i386_emit_add (void)
+{
+  EMIT_ASM32 (i386_add,
+           "add (%esp),%eax\n\t"
+           "adc 0x4(%esp),%ebx\n\t"
+           "lea 0x8(%esp),%esp");
+}
+
+static void
+i386_emit_sub (void)
+{
+  EMIT_ASM32 (i386_sub,
+           "subl %eax,(%esp)\n\t"
+           "sbbl %ebx,4(%esp)\n\t"
+           "pop %eax\n\t"
+           "pop %ebx\n\t");
+}
+
+static void
+i386_emit_mul (void)
+{
+  emit_error = 1;
+}
+
+static void
+i386_emit_lsh (void)
+{
+  emit_error = 1;
+}
+
+static void
+i386_emit_rsh_signed (void)
+{
+  emit_error = 1;
+}
+
+static void
+i386_emit_rsh_unsigned (void)
+{
+  emit_error = 1;
+}
+
+static void
+i386_emit_ext (int arg)
+{
+  switch (arg)
+    {
+    case 8:
+      EMIT_ASM32 (i386_ext_8,
+               "cbtw\n\t"
+               "cwtl\n\t"
+               "movl %eax,%ebx\n\t"
+               "sarl $31,%ebx");
+      break;
+    case 16:
+      EMIT_ASM32 (i386_ext_16,
+               "cwtl\n\t"
+               "movl %eax,%ebx\n\t"
+               "sarl $31,%ebx");
+      break;
+    case 32:
+      EMIT_ASM32 (i386_ext_32,
+               "movl %eax,%ebx\n\t"
+               "sarl $31,%ebx");
+      break;
+    default:
+      emit_error = 1;
+    }
+}
+
+static void
+i386_emit_log_not (void)
+{
+  EMIT_ASM32 (i386_log_not,
+           "or %ebx,%eax\n\t"
+           "test %eax,%eax\n\t"
+           "sete %cl\n\t"
+           "xor %ebx,%ebx\n\t"
+           "movzbl %cl,%eax");
+}
+
+static void
+i386_emit_bit_and (void)
+{
+  EMIT_ASM32 (i386_and,
+           "and (%esp),%eax\n\t"
+           "and 0x4(%esp),%ebx\n\t"
+           "lea 0x8(%esp),%esp");
+}
+
+static void
+i386_emit_bit_or (void)
+{
+  EMIT_ASM32 (i386_or,
+           "or (%esp),%eax\n\t"
+           "or 0x4(%esp),%ebx\n\t"
+           "lea 0x8(%esp),%esp");
+}
+
+static void
+i386_emit_bit_xor (void)
+{
+  EMIT_ASM32 (i386_xor,
+           "xor (%esp),%eax\n\t"
+           "xor 0x4(%esp),%ebx\n\t"
+           "lea 0x8(%esp),%esp");
+}
+
+static void
+i386_emit_bit_not (void)
+{
+  EMIT_ASM32 (i386_bit_not,
+           "xor $0xffffffff,%eax\n\t"
+           "xor $0xffffffff,%ebx\n\t");
+}
+
+static void
+i386_emit_equal (void)
+{
+  EMIT_ASM32 (i386_equal,
+           "cmpl %ebx,4(%esp)\n\t"
+           "jne .Li386_equal_false\n\t"
+           "cmpl %eax,(%esp)\n\t"
+           "je .Li386_equal_true\n\t"
+           ".Li386_equal_false:\n\t"
+           "xor %eax,%eax\n\t"
+           "jmp .Li386_equal_end\n\t"
+           ".Li386_equal_true:\n\t"
+           "mov $1,%eax\n\t"
+           ".Li386_equal_end:\n\t"
+           "xor %ebx,%ebx\n\t"
+           "lea 0x8(%esp),%esp");
+}
+
+static void
+i386_emit_less_signed (void)
+{
+  EMIT_ASM32 (i386_less_signed,
+           "cmpl %ebx,4(%esp)\n\t"
+           "jl .Li386_less_signed_true\n\t"
+           "jne .Li386_less_signed_false\n\t"
+           "cmpl %eax,(%esp)\n\t"
+           "jl .Li386_less_signed_true\n\t"
+           ".Li386_less_signed_false:\n\t"
+           "xor %eax,%eax\n\t"
+           "jmp .Li386_less_signed_end\n\t"
+           ".Li386_less_signed_true:\n\t"
+           "mov $1,%eax\n\t"
+           ".Li386_less_signed_end:\n\t"
+           "xor %ebx,%ebx\n\t"
+           "lea 0x8(%esp),%esp");
+}
+
+static void
+i386_emit_less_unsigned (void)
+{
+  EMIT_ASM32 (i386_less_unsigned,
+           "cmpl %ebx,4(%esp)\n\t"
+           "jb .Li386_less_unsigned_true\n\t"
+           "jne .Li386_less_unsigned_false\n\t"
+           "cmpl %eax,(%esp)\n\t"
+           "jb .Li386_less_unsigned_true\n\t"
+           ".Li386_less_unsigned_false:\n\t"
+           "xor %eax,%eax\n\t"
+           "jmp .Li386_less_unsigned_end\n\t"
+           ".Li386_less_unsigned_true:\n\t"
+           "mov $1,%eax\n\t"
+           ".Li386_less_unsigned_end:\n\t"
+           "xor %ebx,%ebx\n\t"
+           "lea 0x8(%esp),%esp");
+}
+
+static void
+i386_emit_ref (int size)
+{
+  switch (size)
+    {
+    case 1:
+      EMIT_ASM32 (i386_ref1,
+               "movb (%eax),%al");
+      break;
+    case 2:
+      EMIT_ASM32 (i386_ref2,
+               "movw (%eax),%ax");
+      break;
+    case 4:
+      EMIT_ASM32 (i386_ref4,
+               "movl (%eax),%eax");
+      break;
+    case 8:
+      EMIT_ASM32 (i386_ref8,
+               "movl 4(%eax),%ebx\n\t"
+               "movl (%eax),%eax");
+      break;
+    }
+}
+
+static void
+i386_emit_if_goto (int *offset_p, int *size_p)
+{
+  EMIT_ASM32 (i386_if_goto,
+           "mov %eax,%ecx\n\t"
+           "or %ebx,%ecx\n\t"
+           "pop %eax\n\t"
+           "pop %ebx\n\t"
+           "cmpl $0,%ecx\n\t"
+           /* Don't trust the assembler to choose the right jump */
+           ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
+
+  if (offset_p)
+    *offset_p = 11; /* be sure that this matches the sequence above */
+  if (size_p)
+    *size_p = 4;
+}
+
+static void
+i386_emit_goto (int *offset_p, int *size_p)
+{
+  EMIT_ASM32 (i386_goto,
+           /* Don't trust the assembler to choose the right jump */
+           ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
+  if (offset_p)
+    *offset_p = 1;
+  if (size_p)
+    *size_p = 4;
+}
+
+static void
+i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
+{
+  int diff = (to - (from + size));
+  unsigned char buf[sizeof (int)];
+
+  /* We're only doing 4-byte sizes at the moment.  */
+  if (size != 4)
+    {
+      emit_error = 1;
+      return;
+    }
+
+  memcpy (buf, &diff, sizeof (int));
+  write_inferior_memory (from, buf, sizeof (int));
+}
+
+static void
+i386_emit_const (int64_t num)
+{
+  unsigned char buf[16];
+  int i, hi;
+  CORE_ADDR buildaddr = current_insn_ptr;
+
+  i = 0;
+  buf[i++] = 0xb8; /* mov $<n>,%eax */
+  *((int *) (&buf[i])) = (num & 0xffffffff);
+  i += 4;
+  hi = ((num >> 32) & 0xffffffff);
+  if (hi)
+    {
+      buf[i++] = 0xbb; /* mov $<n>,%ebx */
+      *((int *) (&buf[i])) = hi;
+      i += 4;
+    }
+  else
+    {
+      buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
+    }
+  append_insns (&buildaddr, i, buf);
+  current_insn_ptr = buildaddr;
+}
+
+static void
+i386_emit_call (CORE_ADDR fn)
+{
+  unsigned char buf[16];
+  int i, offset;
+  CORE_ADDR buildaddr;
+
+  buildaddr = current_insn_ptr;
+  i = 0;
+  buf[i++] = 0xe8; /* call <reladdr> */
+  offset = ((int) fn) - (buildaddr + 5);
+  memcpy (buf + 1, &offset, 4);
+  append_insns (&buildaddr, 5, buf);
+  current_insn_ptr = buildaddr;
+}
+
+static void
+i386_emit_reg (int reg)
+{
+  unsigned char buf[16];
+  int i;
+  CORE_ADDR buildaddr;
+
+  EMIT_ASM32 (i386_reg_a,
+           "sub $0x8,%esp");
+  buildaddr = current_insn_ptr;
+  i = 0;
+  buf[i++] = 0xb8; /* mov $<n>,%eax */
+  *((int *) (&buf[i])) = reg;
+  i += 4;
+  append_insns (&buildaddr, i, buf);
+  current_insn_ptr = buildaddr;
+  EMIT_ASM32 (i386_reg_b,
+           "mov %eax,4(%esp)\n\t"
+           "mov 8(%ebp),%eax\n\t"
+           "mov %eax,(%esp)");
+  i386_emit_call (get_raw_reg_func_addr ());
+  EMIT_ASM32 (i386_reg_c,
+           "xor %ebx,%ebx\n\t"
+           "lea 0x8(%esp),%esp");
+}
+
+static void
+i386_emit_pop (void)
+{
+  EMIT_ASM32 (i386_pop,
+           "pop %eax\n\t"
+           "pop %ebx");
+}
+
+static void
+i386_emit_stack_flush (void)
+{
+  EMIT_ASM32 (i386_stack_flush,
+           "push %ebx\n\t"
+           "push %eax");
+}
+
+static void
+i386_emit_zero_ext (int arg)
+{
+  switch (arg)
+    {
+    case 8:
+      EMIT_ASM32 (i386_zero_ext_8,
+               "and $0xff,%eax\n\t"
+               "xor %ebx,%ebx");
+      break;
+    case 16:
+      EMIT_ASM32 (i386_zero_ext_16,
+               "and $0xffff,%eax\n\t"
+               "xor %ebx,%ebx");
+      break;
+    case 32:
+      EMIT_ASM32 (i386_zero_ext_32,
+               "xor %ebx,%ebx");
+      break;
+    default:
+      emit_error = 1;
+    }
+}
+
+static void
+i386_emit_swap (void)
+{
+  EMIT_ASM32 (i386_swap,
+           "mov %eax,%ecx\n\t"
+           "mov %ebx,%edx\n\t"
+           "pop %eax\n\t"
+           "pop %ebx\n\t"
+           "push %edx\n\t"
+           "push %ecx");
+}
+
+static void
+i386_emit_stack_adjust (int n)
+{
+  unsigned char buf[16];
+  int i;
+  CORE_ADDR buildaddr = current_insn_ptr;
+
+  i = 0;
+  buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
+  buf[i++] = 0x64;
+  buf[i++] = 0x24;
+  buf[i++] = n * 8;
+  append_insns (&buildaddr, i, buf);
+  current_insn_ptr = buildaddr;
+}
+
+/* FN's prototype is `LONGEST(*fn)(int)'.  */
+
+static void
+i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
+{
+  unsigned char buf[16];
+  int i;
+  CORE_ADDR buildaddr;
+
+  EMIT_ASM32 (i386_int_call_1_a,
+           /* Reserve a bit of stack space.  */
+           "sub $0x8,%esp");
+  /* Put the one argument on the stack.  */
+  buildaddr = current_insn_ptr;
+  i = 0;
+  buf[i++] = 0xc7;  /* movl $<arg1>,(%esp) */
+  buf[i++] = 0x04;
+  buf[i++] = 0x24;
+  *((int *) (&buf[i])) = arg1;
+  i += 4;
+  append_insns (&buildaddr, i, buf);
+  current_insn_ptr = buildaddr;
+  i386_emit_call (fn);
+  EMIT_ASM32 (i386_int_call_1_c,
+           "mov %edx,%ebx\n\t"
+           "lea 0x8(%esp),%esp");
+}
+
+/* FN's prototype is `void(*fn)(int,int64_t)'.  */
+
+static void
+i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
+{
+  unsigned char buf[16];
+  int i;
+  CORE_ADDR buildaddr;
+
+  EMIT_ASM32 (i386_void_call_2_a,
+           /* Preserve %eax only; we don't have to worry about %ebx.  */
+           "push %eax\n\t"
+           /* Reserve a bit of stack space for arguments.  */
+           "sub $0x10,%esp\n\t"
+           /* Copy "top" to the second argument position.  (Note that
+              we can't assume function won't scribble on its
+              arguments, so don't try to restore from this.)  */
+           "mov %eax,4(%esp)\n\t"
+           "mov %ebx,8(%esp)");
+  /* Put the first argument on the stack.  */
+  buildaddr = current_insn_ptr;
+  i = 0;
+  buf[i++] = 0xc7;  /* movl $<arg1>,(%esp) */
+  buf[i++] = 0x04;
+  buf[i++] = 0x24;
+  *((int *) (&buf[i])) = arg1;
+  i += 4;
+  append_insns (&buildaddr, i, buf);
+  current_insn_ptr = buildaddr;
+  i386_emit_call (fn);
+  EMIT_ASM32 (i386_void_call_2_b,
+           "lea 0x10(%esp),%esp\n\t"
+           /* Restore original stack top.  */
+           "pop %eax");
+}
+
+struct emit_ops i386_emit_ops =
+  {
+    i386_emit_prologue,
+    i386_emit_epilogue,
+    i386_emit_add,
+    i386_emit_sub,
+    i386_emit_mul,
+    i386_emit_lsh,
+    i386_emit_rsh_signed,
+    i386_emit_rsh_unsigned,
+    i386_emit_ext,
+    i386_emit_log_not,
+    i386_emit_bit_and,
+    i386_emit_bit_or,
+    i386_emit_bit_xor,
+    i386_emit_bit_not,
+    i386_emit_equal,
+    i386_emit_less_signed,
+    i386_emit_less_unsigned,
+    i386_emit_ref,
+    i386_emit_if_goto,
+    i386_emit_goto,
+    i386_write_goto_address,
+    i386_emit_const,
+    i386_emit_call,
+    i386_emit_reg,
+    i386_emit_pop,
+    i386_emit_stack_flush,
+    i386_emit_zero_ext,
+    i386_emit_swap,
+    i386_emit_stack_adjust,
+    i386_emit_int_call_1,
+    i386_emit_void_call_2
+  };
+
+
+static struct emit_ops *
+x86_emit_ops (void)
+{
+#ifdef __x86_64__
+  int use_64bit = register_size (0) == 8;
+
+  if (use_64bit)
+    return &amd64_emit_ops;
+  else
+#endif
+    return &i386_emit_ops;
+}
+
 /* This is initialized assuming an amd64 target.
    x86_arch_setup will correct it for i386 or amd64 targets.  */

@@ -1504,5 +2533,6 @@ struct linux_target_ops the_low_target =
   x86_linux_process_qsupported,
   x86_supports_tracepoints,
   x86_get_thread_area,
-  x86_install_fast_tracepoint_jump_pad
+  x86_install_fast_tracepoint_jump_pad,
+  x86_emit_ops
 };
Index: src/gdb/gdbserver/server.h
===================================================================
--- src.orig/gdb/gdbserver/server.h     2010-06-07 16:13:07.000000000 +0100
+++ src/gdb/gdbserver/server.h  2010-06-07 16:13:24.000000000 +0100
@@ -573,6 +573,56 @@ void supply_fast_tracepoint_registers (s
 void stop_tracing (void);
 #endif

+/* Bytecode compilation function vector.  */
+
+struct emit_ops
+{
+  void (*emit_prologue) (void);
+  void (*emit_epilogue) (void);
+  void (*emit_add) (void);
+  void (*emit_sub) (void);
+  void (*emit_mul) (void);
+  void (*emit_lsh) (void);
+  void (*emit_rsh_signed) (void);
+  void (*emit_rsh_unsigned) (void);
+  void (*emit_ext) (int arg);
+  void (*emit_log_not) (void);
+  void (*emit_bit_and) (void);
+  void (*emit_bit_or) (void);
+  void (*emit_bit_xor) (void);
+  void (*emit_bit_not) (void);
+  void (*emit_equal) (void);
+  void (*emit_less_signed) (void);
+  void (*emit_less_unsigned) (void);
+  void (*emit_ref) (int size);
+  void (*emit_if_goto) (int *offset_p, int *size_p);
+  void (*emit_goto) (int *offset_p, int *size_p);
+  void (*write_goto_address) (CORE_ADDR from, CORE_ADDR to, int size);
+  void (*emit_const) (int64_t num);
+  void (*emit_call) (CORE_ADDR fn);
+  void (*emit_reg) (int reg);
+  void (*emit_pop) (void);
+  void (*emit_stack_flush) (void);
+  void (*emit_zero_ext) (int arg);
+  void (*emit_swap) (void);
+  void (*emit_stack_adjust) (int n);
+
+  /* Emit code for a generic function that takes one fixed integer
+     argument and returns a 64-bit int (for instance, tsv getter).  */
+  void (*emit_int_call_1) (CORE_ADDR fn, int arg1);
+
+  /* Emit code for a generic function that takes one fixed integer
+     argument and a 64-bit int from the top of the stack, and returns
+     nothing (for instance, tsv setter).  */
+  void (*emit_void_call_2) (CORE_ADDR fn, int arg1);
+};
+
+/* Returns the address of the get_raw_reg function in the IPA.  */
+CORE_ADDR get_raw_reg_func_addr (void);
+
+CORE_ADDR current_insn_ptr;
+int emit_error;
+
 /* Version information, from version.c.  */
 extern const char version[];
 extern const char host_name[];
Index: src/gdb/gdbserver/tracepoint.c
===================================================================
--- src.orig/gdb/gdbserver/tracepoint.c 2010-06-07 16:13:07.000000000 +0100
+++ src/gdb/gdbserver/tracepoint.c      2010-06-07 16:42:28.000000000 +0100
@@ -132,6 +132,9 @@ trace_vdebug (const char *fmt, ...)
 # define traceframe_write_count gdb_agent_traceframe_write_count
 # define traceframes_created gdb_agent_traceframes_created
 # define trace_state_variables gdb_agent_trace_state_variables
+# define get_raw_reg gdb_agent_get_raw_reg
+# define get_trace_state_variable_value gdb_agent_get_trace_state_variable_value
+# define set_trace_state_variable_value gdb_agent_set_trace_state_variable_value
 #endif

 #ifndef IN_PROCESS_AGENT
@@ -162,6 +165,9 @@ struct ipa_sym_addresses
   CORE_ADDR addr_traceframe_write_count;
   CORE_ADDR addr_traceframes_created;
   CORE_ADDR addr_trace_state_variables;
+  CORE_ADDR addr_get_raw_reg;
+  CORE_ADDR addr_get_trace_state_variable_value;
+  CORE_ADDR addr_set_trace_state_variable_value;
 };

 #define STRINGIZE_1(STR) #STR
@@ -200,6 +206,9 @@ static struct
   IPA_SYM(traceframe_write_count),
   IPA_SYM(traceframes_created),
   IPA_SYM(trace_state_variables),
+  IPA_SYM(get_raw_reg),
+  IPA_SYM(get_trace_state_variable_value),
+  IPA_SYM(set_trace_state_variable_value),
 };

 struct ipa_sym_addresses ipa_sym_addrs;
@@ -564,6 +573,9 @@ enum tracepoint_type

struct tracepoint_hit_ctx;

+typedef enum eval_result_type (*condfn) (struct tracepoint_hit_ctx *,
+                                        ULONGEST *);
+
 /* The definition of a tracepoint.  */

 /* Tracepoints may have multiple locations, each at a different
@@ -612,6 +624,8 @@ struct tracepoint
      Note that while-stepping steps are not counted as "hits".  */
   long hit_count;

+  CORE_ADDR compiled_cond;
+
   /* Link to the next tracepoint in the list.  */
   struct tracepoint *next;

@@ -1189,6 +1203,8 @@ static void collect_data_at_tracepoint (
 static void collect_data_at_step (struct tracepoint_hit_ctx *ctx,
                                  CORE_ADDR stop_pc,
                                  struct tracepoint *tpoint, int current_step);
+static void compile_tracepoint_condition (struct tracepoint *tpoint,
+                                         CORE_ADDR *jump_entry);
 #endif
 static void do_action_at_tracepoint (struct tracepoint_hit_ctx *ctx,
                                     CORE_ADDR stop_pc,
@@ -1610,6 +1626,7 @@ add_tracepoint (int num, CORE_ADDR addr)
   tpoint->type = trap_tracepoint;
   tpoint->orig_size = -1;
   tpoint->source_strings = NULL;
+  tpoint->compiled_cond = 0;
   tpoint->handle = NULL;
   tpoint->next = NULL;

@@ -1854,7 +1871,7 @@ create_trace_state_variable (int num, in
   return tsv;
 }

-static LONGEST
+IP_AGENT_EXPORT LONGEST
 get_trace_state_variable_value (int num)
 {
   struct trace_state_variable *tsv;
@@ -1880,7 +1897,7 @@ get_trace_state_variable_value (int num)
   return tsv->value;
 }

-static void
+IP_AGENT_EXPORT void
 set_trace_state_variable_value (int num, LONGEST val)
 {
   struct trace_state_variable *tsv;
@@ -3906,7 +3923,10 @@ condition_true_at_tracepoint (struct tra
   ULONGEST value = 0;
   enum eval_result_type err;

-  err = eval_agent_expr (ctx, NULL, tpoint->cond, &value);
+  if (tpoint->compiled_cond)
+    err = ((condfn) (uintptr_t) (tpoint->compiled_cond)) (ctx, &value);
+  else
+    err = eval_agent_expr (ctx, NULL, tpoint->cond, &value);

   if (err != expr_eval_no_error)
     {
@@ -4944,6 +4964,582 @@ gdb_collect (struct tracepoint *tpoint,

#ifndef IN_PROCESS_AGENT

+/* Bytecode compilation.  */
+
+CORE_ADDR current_insn_ptr;
+
+int emit_error;
+
+struct bytecode_address
+{
+  int pc;
+  CORE_ADDR address;
+  int goto_pc;
+  /* Offset and size of field to be modified in the goto block.  */
+  int from_offset, from_size;
+  struct bytecode_address *next;
+} *bytecode_address_table;
+
+CORE_ADDR
+get_raw_reg_func_addr (void)
+{
+  return ipa_sym_addrs.addr_get_raw_reg;
+}
+
+static void
+emit_prologue (void)
+{
+  target_emit_ops ()->emit_prologue ();
+}
+
+static void
+emit_epilogue (void)
+{
+  target_emit_ops ()->emit_epilogue ();
+}
+
+static void
+emit_add (void)
+{
+  target_emit_ops ()->emit_add ();
+}
+
+static void
+emit_sub (void)
+{
+  target_emit_ops ()->emit_sub ();
+}
+
+static void
+emit_mul (void)
+{
+  target_emit_ops ()->emit_mul ();
+}
+
+static void
+emit_lsh (void)
+{
+  target_emit_ops ()->emit_lsh ();
+}
+
+static void
+emit_rsh_signed (void)
+{
+  target_emit_ops ()->emit_rsh_signed ();
+}
+
+static void
+emit_rsh_unsigned (void)
+{
+  target_emit_ops ()->emit_rsh_unsigned ();
+}
+
+static void
+emit_ext (int arg)
+{
+  target_emit_ops ()->emit_ext (arg);
+}
+
+static void
+emit_log_not (void)
+{
+  target_emit_ops ()->emit_log_not ();
+}
+
+static void
+emit_bit_and (void)
+{
+  target_emit_ops ()->emit_bit_and ();
+}
+
+static void
+emit_bit_or (void)
+{
+  target_emit_ops ()->emit_bit_or ();
+}
+
+static void
+emit_bit_xor (void)
+{
+  target_emit_ops ()->emit_bit_xor ();
+}
+
+static void
+emit_bit_not (void)
+{
+  target_emit_ops ()->emit_bit_not ();
+}
+
+static void
+emit_equal (void)
+{
+  target_emit_ops ()->emit_equal ();
+}
+
+static void
+emit_less_signed (void)
+{
+  target_emit_ops ()->emit_less_signed ();
+}
+
+static void
+emit_less_unsigned (void)
+{
+  target_emit_ops ()->emit_less_unsigned ();
+}
+
+static void
+emit_ref (int size)
+{
+  target_emit_ops ()->emit_ref (size);
+}
+
+static void
+emit_if_goto (int *offset_p, int *size_p)
+{
+  target_emit_ops ()->emit_if_goto (offset_p, size_p);
+}
+
+static void
+emit_goto (int *offset_p, int *size_p)
+{
+  target_emit_ops ()->emit_goto (offset_p, size_p);
+}
+
+static void
+write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
+{
+  target_emit_ops ()->write_goto_address (from, to, size);
+}
+
+static void
+emit_const (int64_t num)
+{
+  target_emit_ops ()->emit_const (num);
+}
+
+static void
+emit_reg (int reg)
+{
+  target_emit_ops ()->emit_reg (reg);
+}
+
+static void
+emit_pop (void)
+{
+  target_emit_ops ()->emit_pop ();
+}
+
+static void
+emit_stack_flush (void)
+{
+  target_emit_ops ()->emit_stack_flush ();
+}
+
+static void
+emit_zero_ext (int arg)
+{
+  target_emit_ops ()->emit_zero_ext (arg);
+}
+
+static void
+emit_swap (void)
+{
+  target_emit_ops ()->emit_swap ();
+}
+
+static void
+emit_stack_adjust (int n)
+{
+  target_emit_ops ()->emit_stack_adjust (n);
+}
+
+/* FN's prototype is `LONGEST(*fn)(int)'.  */
+
+static void
+emit_int_call_1 (CORE_ADDR fn, int arg1)
+{
+  target_emit_ops ()->emit_int_call_1 (fn, arg1);
+}
+
+/* FN's prototype is `void(*fn)(int,int64_t)'.  */
+
+static void
+emit_void_call_2 (CORE_ADDR fn, int arg1)
+{
+  target_emit_ops ()->emit_void_call_2 (fn, arg1);
+}
+
+static enum eval_result_type compile_bytecodes (struct agent_expr *aexpr);
+
+static void
+compile_tracepoint_condition (struct tracepoint *tpoint, CORE_ADDR *jump_entry)
+{
+  CORE_ADDR entry_point = *jump_entry;
+  enum eval_result_type err;
+
+  trace_debug ("Starting condition compilation for tracepoint %d\n",
+              tpoint->number);
+
+  /* Initialize the global pointer to the code being built.  */
+  current_insn_ptr = *jump_entry;
+
+  emit_prologue ();
+
+  err = compile_bytecodes (tpoint->cond);
+
+  if (err == expr_eval_no_error)
+    {
+      emit_epilogue ();
+
+      /* Record the beginning of the compiled code.  */
+      tpoint->compiled_cond = entry_point;
+
+      trace_debug ("Condition compilation for tracepoint %d complete\n",
+                  tpoint->number);
+    }
+  else
+    {
+      /* Leave the unfinished code in situ, but don't point to it.  */
+
+      tpoint->compiled_cond = 0;
+
+      trace_debug ("Condition compilation for tracepoint %d failed, "
+                  "error code %d",
+                  tpoint->number, err);
+    }
+
+  /* Update the code pointer passed in.  Note that we do this even if
+     the compile fails, so that we can look at the partial results
+     instead of letting them be overwritten.  */
+  *jump_entry = current_insn_ptr;
+
+  /* Leave a gap, to aid dump decipherment.  */
+  *jump_entry += 16;
+}
+
+/* Given an agent expression, turn it into native code.  */
+
+static enum eval_result_type
+compile_bytecodes (struct agent_expr *aexpr)
+{
+  int pc = 0;
+  int done = 0;
+  unsigned char op;
+  int arg;
+  /* This is only used to build 64-bit value for constants.  */
+  ULONGEST top;
+  struct bytecode_address *aentry, *aentry2;
+
+#define UNHANDLED                                      \
+  do                                                   \
+    {                                                  \
+      trace_debug ("Cannot compile op 0x%x\n", op);    \
+      return expr_eval_unhandled_opcode;               \
+    } while (0)
+
+  if (aexpr->length == 0)
+    {
+      trace_debug ("empty agent expression\n");
+      return expr_eval_empty_expression;
+    }
+
+  bytecode_address_table = NULL;
+
+  while (!done)
+    {
+      op = aexpr->bytes[pc];
+
+      trace_debug ("About to compile op 0x%x, pc=%d\n", op, pc);
+
+      /* Record the compiled-code address of the bytecode, for use by
+        jump instructions.  */
+      aentry = xmalloc (sizeof (struct bytecode_address));
+      aentry->pc = pc;
+      aentry->address = current_insn_ptr;
+      aentry->goto_pc = -1;
+      aentry->from_offset = aentry->from_size = 0;
+      aentry->next = bytecode_address_table;
+      bytecode_address_table = aentry;
+
+      ++pc;
+
+      emit_error = 0;
+
+      switch (op)
+       {
+       case gdb_agent_op_add:
+         emit_add ();
+         break;
+
+       case gdb_agent_op_sub:
+         emit_sub ();
+         break;
+
+       case gdb_agent_op_mul:
+         emit_mul ();
+         break;
+
+       case gdb_agent_op_div_signed:
+         UNHANDLED;
+         break;
+
+       case gdb_agent_op_div_unsigned:
+         UNHANDLED;
+         break;
+
+       case gdb_agent_op_rem_signed:
+         UNHANDLED;
+         break;
+
+       case gdb_agent_op_rem_unsigned:
+         UNHANDLED;
+         break;
+
+       case gdb_agent_op_lsh:
+         emit_lsh ();
+         break;
+
+       case gdb_agent_op_rsh_signed:
+         emit_rsh_signed ();
+         break;
+
+       case gdb_agent_op_rsh_unsigned:
+         emit_rsh_unsigned ();
+         break;
+
+       case gdb_agent_op_trace:
+         UNHANDLED;
+         break;
+
+       case gdb_agent_op_trace_quick:
+         UNHANDLED;
+         break;
+
+       case gdb_agent_op_log_not:
+         emit_log_not ();
+         break;
+
+       case gdb_agent_op_bit_and:
+         emit_bit_and ();
+         break;
+
+       case gdb_agent_op_bit_or:
+         emit_bit_or ();
+         break;
+
+       case gdb_agent_op_bit_xor:
+         emit_bit_xor ();
+         break;
+
+       case gdb_agent_op_bit_not:
+         emit_bit_not ();
+         break;
+
+       case gdb_agent_op_equal:
+         emit_equal ();
+         break;
+
+       case gdb_agent_op_less_signed:
+         emit_less_signed ();
+         break;
+
+       case gdb_agent_op_less_unsigned:
+         emit_less_unsigned ();
+         break;
+
+       case gdb_agent_op_ext:
+         arg = aexpr->bytes[pc++];
+         if (arg < (sizeof (LONGEST) * 8))
+           emit_ext (arg);
+         break;
+
+       case gdb_agent_op_ref8:
+         emit_ref (1);
+         break;
+
+       case gdb_agent_op_ref16:
+         emit_ref (2);
+         break;
+
+       case gdb_agent_op_ref32:
+         emit_ref (4);
+         break;
+
+       case gdb_agent_op_ref64:
+         emit_ref (8);
+         break;
+
+       case gdb_agent_op_if_goto:
+         arg = aexpr->bytes[pc++];
+         arg = (arg << 8) + aexpr->bytes[pc++];
+         aentry->goto_pc = arg;
+         emit_if_goto (&(aentry->from_offset), &(aentry->from_size));
+         break;
+
+       case gdb_agent_op_goto:
+         arg = aexpr->bytes[pc++];
+         arg = (arg << 8) + aexpr->bytes[pc++];
+         aentry->goto_pc = arg;
+         emit_goto (&(aentry->from_offset), &(aentry->from_size));
+         break;
+
+       case gdb_agent_op_const8:
+         emit_stack_flush ();
+         top = aexpr->bytes[pc++];
+         emit_const (top);
+         break;
+
+       case gdb_agent_op_const16:
+         emit_stack_flush ();
+         top = aexpr->bytes[pc++];
+         top = (top << 8) + aexpr->bytes[pc++];
+         emit_const (top);
+         break;
+
+       case gdb_agent_op_const32:
+         emit_stack_flush ();
+         top = aexpr->bytes[pc++];
+         top = (top << 8) + aexpr->bytes[pc++];
+         top = (top << 8) + aexpr->bytes[pc++];
+         top = (top << 8) + aexpr->bytes[pc++];
+         emit_const (top);
+         break;
+
+       case gdb_agent_op_const64:
+         emit_stack_flush ();
+         top = aexpr->bytes[pc++];
+         top = (top << 8) + aexpr->bytes[pc++];
+         top = (top << 8) + aexpr->bytes[pc++];
+         top = (top << 8) + aexpr->bytes[pc++];
+         top = (top << 8) + aexpr->bytes[pc++];
+         top = (top << 8) + aexpr->bytes[pc++];
+         top = (top << 8) + aexpr->bytes[pc++];
+         top = (top << 8) + aexpr->bytes[pc++];
+         emit_const (top);
+         break;
+
+       case gdb_agent_op_reg:
+         emit_stack_flush ();
+         arg = aexpr->bytes[pc++];
+         arg = (arg << 8) + aexpr->bytes[pc++];
+         emit_reg (arg);
+         break;
+
+       case gdb_agent_op_end:
+         trace_debug ("At end of expression\n");
+
+         /* Assume there is one stack element left, and that it is
+            cached in "top" where emit_epilogue can get to it.  */
+         emit_stack_adjust (1);
+
+         done = 1;
+         break;
+
+       case gdb_agent_op_dup:
+         /* In our design, dup is equivalent to stack flushing.  */
+         emit_stack_flush ();
+         break;
+
+       case gdb_agent_op_pop:
+         emit_pop ();
+         break;
+
+       case gdb_agent_op_zero_ext:
+         arg = aexpr->bytes[pc++];
+         if (arg < (sizeof (LONGEST) * 8))
+           emit_zero_ext (arg);
+         break;
+
+       case gdb_agent_op_swap:
+         emit_swap ();
+         break;
+
+       case gdb_agent_op_getv:
+         emit_stack_flush ();
+         arg = aexpr->bytes[pc++];
+         arg = (arg << 8) + aexpr->bytes[pc++];
+         emit_int_call_1 (ipa_sym_addrs.addr_get_trace_state_variable_value,
+                          arg);
+         break;
+
+       case gdb_agent_op_setv:
+         arg = aexpr->bytes[pc++];
+         arg = (arg << 8) + aexpr->bytes[pc++];
+         emit_void_call_2 (ipa_sym_addrs.addr_set_trace_state_variable_value,
+                           arg);
+         break;
+
+       case gdb_agent_op_tracev:
+         UNHANDLED;
+         break;
+
+         /* GDB never (currently) generates any of these ops.  */
+       case gdb_agent_op_float:
+       case gdb_agent_op_ref_float:
+       case gdb_agent_op_ref_double:
+       case gdb_agent_op_ref_long_double:
+       case gdb_agent_op_l_to_d:
+       case gdb_agent_op_d_to_l:
+       case gdb_agent_op_trace16:
+         UNHANDLED;
+         break;
+
+       default:
+         trace_debug ("Agent expression op 0x%x not recognized\n", op);
+         /* Don't struggle on, things will just get worse.  */
+         return expr_eval_unrecognized_opcode;
+       }
+
+      /* This catches errors that occur in target-specific code
+        emission.  */
+      if (emit_error)
+       {
+         trace_debug ("Error %d while emitting code for %s\n",
+                      emit_error, gdb_agent_op_names[op]);
+         return expr_eval_unhandled_opcode;
+       }
+
+      trace_debug ("Op %s compiled\n", gdb_agent_op_names[op]);
+    }
+
+  /* Now fill in real addresses as goto destinations.  */
+  for (aentry = bytecode_address_table; aentry; aentry = aentry->next)
+    {
+      int written = 0;
+
+      if (aentry->goto_pc < 0)
+       continue;
+
+      /* Find the location that we are going to, and call back into
+        target-specific code to write the actual address or
+        displacement.  */
+      for (aentry2 = bytecode_address_table; aentry2; aentry2 = aentry2->next)
+       {
+         if (aentry2->pc == aentry->goto_pc)
+           {
+             trace_debug ("Want to jump from %s to %s\n",
+                          paddress (aentry->address),
+                          paddress (aentry2->address));
+             write_goto_address (aentry->address + aentry->from_offset,
+                                 aentry2->address, aentry->from_size);
+             written = 1;
+             break;
+           }
+       }
+
+      /* Error out if we didn't find a destination.  */
+      if (!written)
+       {
+         trace_debug ("Destination of goto %d not found\n",
+                      aentry->goto_pc);
+         return expr_eval_invalid_goto;
+       }
+    }
+
+  return expr_eval_no_error;
+}
+
 /* We'll need to adjust these when we consider bi-arch setups, and big
    endian machines.  */

@@ -5022,6 +5618,28 @@ download_tracepoints (void)
       if (tpoint->type != fast_tracepoint)
        continue;

+      /* Maybe download a compiled condition.  */
+      if (tpoint->cond != NULL && target_emit_ops() != NULL)
+       {
+         CORE_ADDR jentry, jump_entry;
+
+         jentry = jump_entry = get_jump_space_head ();
+
+         if (tpoint->cond != NULL)
+           {
+             /* Pad to 8-byte alignment. (needed?)  */
+             /* Actually this should be left for the target to
+                decide.  */
+             jentry = UALIGN (jentry, 8);
+
+             compile_tracepoint_condition (tpoint, &jentry);
+           }
+
+         /* Pad to 8-byte alignment.  */
+         jentry = UALIGN (jentry, 8);
+         claim_jump_space (jentry - jump_entry);
+       }
+
       target_tracepoint = *tpoint;

       prev_tpptr = tpptr;
Index: src/gdb/gdbserver/target.h
===================================================================
--- src.orig/gdb/gdbserver/target.h     2010-06-07 16:13:07.000000000 +0100
+++ src/gdb/gdbserver/target.h  2010-06-07 16:13:24.000000000 +0100
@@ -22,6 +22,8 @@
 #ifndef TARGET_H
 #define TARGET_H

+struct emit_ops;
+
 /* Ways to "resume" a thread.  */

 enum resume_kind
@@ -349,6 +351,10 @@ struct target_ops
                                           ULONGEST *jjump_pad_insn_size,
                                           CORE_ADDR *adjusted_insn_addr,
                                           CORE_ADDR *adjusted_insn_addr_end);
+
+  /* Return the bytecode operations vector for the current inferior.
+     Returns NULL if bytecode compilation is not supported.  */
+  struct emit_ops *(*emit_ops) (void);
 };

 extern struct target_ops *the_target;
@@ -452,6 +458,8 @@ void set_target_ops (struct target_ops *
                                                   adjusted_insn_addr,  \
                                                   adjusted_insn_addr_end)

+#define target_emit_ops() (*the_target->emit_ops) ()
+
 /* Start non-stop mode, returns 0 on success, -1 on failure.   */

 int start_non_stop (int nonstop);
Index: src/gdb/gdbserver/linux-amd64-ipa.c
===================================================================
--- src.orig/gdb/gdbserver/linux-amd64-ipa.c    2010-06-07 16:13:07.000000000 +0100
+++ src/gdb/gdbserver/linux-amd64-ipa.c 2010-06-07 16:13:24.000000000 +0100
@@ -66,6 +66,15 @@ supply_fast_tracepoint_registers (struct
                     ((char *) buf) + x86_64_ft_collect_regmap[i]);
 }

+ULONGEST __attribute__ ((visibility("default"), used))
+gdb_agent_get_raw_reg (const unsigned char *raw_regs, int regnum)
+{
+  if (regnum > X86_64_NUM_FT_COLLECT_GREGS)
+    return 0;
+
+  return *(ULONGEST *) (raw_regs + x86_64_ft_collect_regmap[regnum]);
+}
+
 /* This is only needed because reg-i386-linux-lib.o references it.  We
    may use it proper at some point.  */
 const char *gdbserver_xmltarget;
Index: src/gdb/gdbserver/linux-low.c
===================================================================
--- src.orig/gdb/gdbserver/linux-low.c  2010-06-07 16:13:07.000000000 +0100
+++ src/gdb/gdbserver/linux-low.c       2010-06-07 16:13:24.000000000 +0100
@@ -5054,6 +5054,15 @@ linux_install_fast_tracepoint_jump_pad (
      adjusted_insn_addr, adjusted_insn_addr_end);
 }

+static struct emit_ops *
+linux_emit_ops (void)
+{
+  if (the_low_target.emit_ops != NULL)
+    return (*the_low_target.emit_ops) ();
+  else
+    return NULL;
+}
+
 static struct target_ops linux_target_ops = {
   linux_create_inferior,
   linux_attach,
@@ -5109,7 +5118,8 @@ static struct target_ops linux_target_op
   linux_unpause_all,
   linux_cancel_breakpoints,
   linux_stabilize_threads,
-  linux_install_fast_tracepoint_jump_pad
+  linux_install_fast_tracepoint_jump_pad,
+  linux_emit_ops
 };

 static void
Index: src/gdb/gdbserver/linux-low.h
===================================================================
--- src.orig/gdb/gdbserver/linux-low.h  2010-06-07 16:13:07.000000000 +0100
+++ src/gdb/gdbserver/linux-low.h       2010-06-07 16:13:24.000000000 +0100
@@ -136,6 +136,10 @@ struct linux_target_ops
                                           ULONGEST *jjump_pad_insn_size,
                                           CORE_ADDR *adjusted_insn_addr,
                                           CORE_ADDR *adjusted_insn_addr_end);
+
+  /* Return the bytecode operations vector for the current inferior.
+     Returns NULL if bytecode compilation is not supported.  */
+  struct emit_ops *(*emit_ops) (void);
 };

 extern struct linux_target_ops the_low_target;
Index: src/gdb/gdbserver/linux-i386-ipa.c
===================================================================
--- src.orig/gdb/gdbserver/linux-i386-ipa.c     2010-06-07 16:43:41.000000000 +0100
+++ src/gdb/gdbserver/linux-i386-ipa.c  2010-06-07 16:44:45.000000000 +0100
@@ -95,6 +95,21 @@ supply_fast_tracepoint_registers (struct
     }
 }

+ULONGEST __attribute__ ((visibility("default"), used))
+gdb_agent_get_raw_reg (unsigned char *raw_regs, int regnum)
+{
+  /* This should maybe be allowed to return an error code, or perhaps
+     better, have the emit_reg detect this, and emit a constant zero,
+     or something.  */
+
+  if (regnum > i386_num_regs)
+    return 0;
+  else if (regnum >= I386_CS_REGNUM && regnum <= I386_GS_REGNUM)
+    return *(short *) (raw_regs + i386_ft_collect_regmap[regnum]);
+  else
+    return *(int *) (raw_regs + i386_ft_collect_regmap[regnum]);
+}
+
 /* This is only needed because reg-i386-linux-lib.o references it.  We
    may use it proper at some point.  */
 const char *gdbserver_xmltarget;


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]