This is the mail archive of the gdb-patches@sourceware.org mailing list for the GDB project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[patch v5 19/21] btrace, gdbserver: read branch trace incrementally


Read branch trace data incrementally and extend the current trace rather than
discarding it and reading the entire trace buffer each time.

If the branch trace buffer overflowed, we can't extend the current trace so we
discard it and start anew by reading the entire branch trace buffer.

Reviewed-by: Eli Zaretskii  <eliz@gnu.org>
CC: Pedro Alves  <palves@redhat.com>
2013-09-19  Markus Metzger  <markus.t.metzger@intel.com>

	* common/linux-btrace.c (perf_event_read_bts, linux_read_btrace):
	Support delta reads.
	* common/linux-btrace.h (linux_read_btrace): Change parameters
	and return type to allow error reporting.
	* common/btrace-common.h (btrace_read_type)<btrace_read_delta>:
	New.
	(btrace_block)<begin>: Comment on BEGIN == 0.
	* btrace.c (btrace_compute_ftrace): Start from the end of
	the current trace.
	(btrace_stitch_trace, btrace_clear_history): New.
	(btrace_fetch): Read delta trace, return if replaying.
	(btrace_clear): Move clear history code to btrace_clear_history.
	(parse_xml_btrace): Throw an error if parsing failed.
	* target.h (struct target_ops)<to_read_btrace>: Change parameters
	and return type to allow error reporting.
	(target_read_btrace): Change parameters and return type to allow
	error reporting.
	* target.c (target_read_btrace): Update.
	* remote.c (remote_read_btrace): Support delta reads.  Pass
	errors on.

gdbserver/
	* target.h (target_ops)<read_btrace>: Change parameters and
	return type to allow error reporting.
	* server.c (handle_qxfer_btrace): Support delta reads.  Pass
	trace reading errors on.
	* linux-low.c (linux_low_read_btrace): Pass trace reading
	errors on.


---
 gdb/NEWS                   |    4 +
 gdb/btrace.c               |  147 ++++++++++++++++++++++++++++++++++++++-----
 gdb/common/btrace-common.h |   10 +++-
 gdb/common/linux-btrace.c  |   89 +++++++++++++++++++--------
 gdb/common/linux-btrace.h  |   10 ++-
 gdb/doc/gdb.texinfo        |    8 +++
 gdb/gdbserver/linux-low.c  |   22 +++++--
 gdb/gdbserver/server.c     |   11 +++-
 gdb/gdbserver/target.h     |    6 +-
 gdb/remote.c               |   23 ++++---
 gdb/target.c               |    9 ++-
 gdb/target.h               |   14 +++--
 12 files changed, 278 insertions(+), 75 deletions(-)

diff --git a/gdb/NEWS b/gdb/NEWS
index 4b41e87..6005b89 100644
--- a/gdb/NEWS
+++ b/gdb/NEWS
@@ -149,6 +149,10 @@ qXfer:libraries-svr4:read's annex
   necessary for library list updating, resulting in significant
   speedup.
 
+qXfer:btrace:read's annex
+  The qXfer:btrace:read packet supports a new annex 'delta' to read
+  branch trace incrementally.
+
 * New features in the GDB remote stub, GDBserver
 
   ** GDBserver now supports target-assisted range stepping.  Currently
diff --git a/gdb/btrace.c b/gdb/btrace.c
index 7029034..82cb134 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -599,9 +599,9 @@ btrace_compute_ftrace (struct btrace_thread_info *btinfo,
   DEBUG ("compute ftrace");
 
   gdbarch = target_gdbarch ();
-  begin = NULL;
-  end = NULL;
-  level = INT_MAX;
+  begin = btinfo->begin;
+  end = btinfo->end;
+  level = begin != NULL ? -btinfo->level : INT_MAX;
   blk = VEC_length (btrace_block_s, btrace);
 
   while (blk != 0)
@@ -717,27 +717,149 @@ btrace_teardown (struct thread_info *tp)
   btrace_clear (tp);
 }
 
+/* Adjust the block trace in order to stitch old and new trace together.
+   Return 0 on success, -1 otherwise.  */
+
+static int
+btrace_stitch_trace (VEC (btrace_block_s) **btrace,
+		     const struct btrace_thread_info *btinfo)
+{
+  struct btrace_function *end;
+  struct btrace_insn *insn;
+  btrace_block_s *block;
+
+  /* If we don't have trace, there's nothing to do.  */
+  if (VEC_empty (btrace_block_s, *btrace))
+    return 0;
+
+  end = btinfo->end;
+  gdb_assert (end != NULL);
+
+  block = VEC_last (btrace_block_s, *btrace);
+  insn = VEC_last (btrace_insn_s, end->insn);
+
+  /* If the current PC at the end of the block is the same as in our current
+     trace, there are two explanations:
+       1. we executed the instruction and some branch brought us back.
+       2. we have not made any progress.
+     In the first case, the delta trace vector should contain at least two
+     entries.
+     In the second case, the delta trace vector should contain exactly one
+     entry for the partial block containing the current PC.  Remove it.  */
+  if (block->end == insn->pc && VEC_length (btrace_block_s, *btrace) == 1)
+    {
+      VEC_pop (btrace_block_s, *btrace);
+      return 0;
+    }
+
+  DEBUG ("stitching %s to %s", ftrace_print_insn_addr (insn),
+	 core_addr_to_string_nz (block->end));
+
+  /* Do a simple sanity check to make sure we don't accidentally end up
+     with a bad block.  This should not occur in practice.  */
+  if (block->end < insn->pc)
+    {
+      warning (_("Error while trying to read delta trace.  Falling back to "
+		 "a full read."));
+      return -1;
+    }
+
+  /* We adjust the last block to start at the end of our current trace.  */
+  gdb_assert (block->begin == 0);
+  block->begin = insn->pc;
+
+  /* We simply pop the last insn so we can insert it again as part of
+     the normal branch trace computation.
+     Since instruction iterators are based on indices in the instructions
+     vector, we don't leave any pointers dangling.  */
+  DEBUG ("pruning insn at %s for stitching", ftrace_print_insn_addr (insn));
+
+  VEC_pop (btrace_insn_s, end->insn);
+
+  /* The instructions vector may become empty temporarily if this has
+     been the only instruction in this function segment.
+     This violates the invariant but will be remedied shortly.  */
+  return 0;
+}
+
+/* Clear the branch trace histories in BTINFO.  */
+
+static void
+btrace_clear_history (struct btrace_thread_info *btinfo)
+{
+  xfree (btinfo->insn_history);
+  xfree (btinfo->call_history);
+  xfree (btinfo->replay);
+
+  btinfo->insn_history = NULL;
+  btinfo->call_history = NULL;
+  btinfo->replay = NULL;
+}
+
 /* See btrace.h.  */
 
 void
 btrace_fetch (struct thread_info *tp)
 {
   struct btrace_thread_info *btinfo;
+  struct btrace_target_info *tinfo;
   VEC (btrace_block_s) *btrace;
   struct cleanup *cleanup;
+  int errcode;
 
   DEBUG ("fetch thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
 
+  btrace = NULL;
   btinfo = &tp->btrace;
-  if (btinfo->target == NULL)
+  tinfo = btinfo->target;
+  if (tinfo == NULL)
+    return;
+
+  /* There's no way we could get new trace while replaying.
+     On the other hand, delta trace would return a partial record with the
+     current PC, which is the replay PC, not the last PC, as expected.  */
+  if (btinfo->replay != NULL)
     return;
 
-  btrace = target_read_btrace (btinfo->target, btrace_read_new);
   cleanup = make_cleanup (VEC_cleanup (btrace_block_s), &btrace);
 
+  /* Let's first try to extend the trace we already have.  */
+  if (btinfo->end != NULL)
+    {
+      errcode = target_read_btrace (&btrace, tinfo, btrace_read_delta);
+      if (errcode == 0)
+	{
+	  /* Success.  Let's try to stitch the traces together.  */
+	  errcode = btrace_stitch_trace (&btrace, btinfo);
+	}
+      else
+	{
+	  /* We failed to read delta trace.  Let's try to read new trace.  */
+	  errcode = target_read_btrace (&btrace, tinfo, btrace_read_new);
+
+	  /* If we got any new trace, discard what we have.  */
+	  if (errcode == 0 && !VEC_empty (btrace_block_s, btrace))
+	    btrace_clear (tp);
+	}
+
+      /* If we were not able to read the trace, we start over.  */
+      if (errcode != 0)
+	{
+	  btrace_clear (tp);
+	  errcode = target_read_btrace (&btrace, tinfo, btrace_read_all);
+	}
+    }
+  else
+    errcode = target_read_btrace (&btrace, tinfo, btrace_read_all);
+
+  /* If we were not able to read the branch trace, signal an error.  */
+  if (errcode != 0)
+    error (_("Failed to read branch trace."));
+
+  /* Compute the trace, provided we have any.  */
   if (!VEC_empty (btrace_block_s, btrace))
     {
-      btrace_clear (tp);
+      btrace_clear_history (btinfo);
       btrace_compute_ftrace (btinfo, btrace);
     }
 
@@ -772,13 +894,7 @@ btrace_clear (struct thread_info *tp)
   btinfo->begin = NULL;
   btinfo->end = NULL;
 
-  xfree (btinfo->insn_history);
-  xfree (btinfo->call_history);
-  xfree (btinfo->replay);
-
-  btinfo->insn_history = NULL;
-  btinfo->call_history = NULL;
-  btinfo->replay = NULL;
+  btrace_clear_history (btinfo);
 }
 
 /* See btrace.h.  */
@@ -870,10 +986,7 @@ parse_xml_btrace (const char *buffer)
   errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
 				 buffer, &btrace);
   if (errcode != 0)
-    {
-      do_cleanups (cleanup);
-      return NULL;
-    }
+    error (_("Error parsing branch trace."));
 
   /* Keep parse results.  */
   discard_cleanups (cleanup);
diff --git a/gdb/common/btrace-common.h b/gdb/common/btrace-common.h
index b157c7c..2a9f46f 100644
--- a/gdb/common/btrace-common.h
+++ b/gdb/common/btrace-common.h
@@ -42,7 +42,9 @@
    asynchronous, e.g. interrupts.  */
 struct btrace_block
 {
-  /* The address of the first byte of the first instruction in the block.  */
+  /* The address of the first byte of the first instruction in the block.
+     The address may be zero if we do not know the beginning of this block,
+     such as for the first block in a delta trace.  */
   CORE_ADDR begin;
 
   /* The address of the first byte of the last instruction in the block.  */
@@ -67,7 +69,11 @@ enum btrace_read_type
   btrace_read_all,
 
   /* Send all available trace, if it changed.  */
-  btrace_read_new
+  btrace_read_new,
+
+  /* Send the trace since the last request.  This will fail if the trace
+     buffer overflowed.  */
+  btrace_read_delta
 };
 
 #endif /* BTRACE_COMMON_H */
diff --git a/gdb/common/linux-btrace.c b/gdb/common/linux-btrace.c
index 20e43e9..353869d 100644
--- a/gdb/common/linux-btrace.c
+++ b/gdb/common/linux-btrace.c
@@ -169,11 +169,11 @@ perf_event_sample_ok (const struct perf_event_sample *sample)
 
 static VEC (btrace_block_s) *
 perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin,
-		     const uint8_t *end, const uint8_t *start)
+		     const uint8_t *end, const uint8_t *start, size_t size)
 {
   VEC (btrace_block_s) *btrace = NULL;
   struct perf_event_sample sample;
-  size_t read = 0, size = (end - begin);
+  size_t read = 0;
   struct btrace_block block = { 0, 0 };
   struct regcache *regcache;
 
@@ -249,6 +249,13 @@ perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin,
       block.end = psample->bts.from;
     }
 
+  /* Push the last block (i.e. the first one of inferior execution), as well.
+     We don't know where it ends, but we know where it starts.  If we're
+     reading delta trace, we can fill in the start address later on.
+     Otherwise we will prune it.  */
+  block.begin = 0;
+  VEC_safe_push (btrace_block_s, btrace, &block);
+
   return btrace;
 }
 
@@ -501,21 +508,24 @@ linux_btrace_has_changed (struct btrace_target_info *tinfo)
 
 /* See linux-btrace.h.  */
 
-VEC (btrace_block_s) *
-linux_read_btrace (struct btrace_target_info *tinfo,
+int
+linux_read_btrace (VEC (btrace_block_s) **btrace,
+		   struct btrace_target_info *tinfo,
 		   enum btrace_read_type type)
 {
-  VEC (btrace_block_s) *btrace = NULL;
   volatile struct perf_event_mmap_page *header;
   const uint8_t *begin, *end, *start;
-  unsigned long data_head, retries = 5;
-  size_t buffer_size;
+  unsigned long data_head, data_tail, retries = 5;
+  size_t buffer_size, size;
 
+  /* For delta reads, we return at least the partial last block containing
+     the current PC.  */
   if (type == btrace_read_new && !linux_btrace_has_changed (tinfo))
-    return NULL;
+    return 0;
 
   header = perf_event_header (tinfo);
   buffer_size = perf_event_buffer_size (tinfo);
+  data_tail = tinfo->data_head;
 
   /* We may need to retry reading the trace.  See below.  */
   while (retries--)
@@ -523,23 +533,45 @@ linux_read_btrace (struct btrace_target_info *tinfo,
       data_head = header->data_head;
 
       /* Delete any leftover trace from the previous iteration.  */
-      VEC_free (btrace_block_s, btrace);
+      VEC_free (btrace_block_s, *btrace);
 
-      /* If there's new trace, let's read it.  */
-      if (data_head != tinfo->data_head)
+      if (type == btrace_read_delta)
 	{
-	  /* Data_head keeps growing; the buffer itself is circular.  */
-	  begin = perf_event_buffer_begin (tinfo);
-	  start = begin + data_head % buffer_size;
-
-	  if (data_head <= buffer_size)
-	    end = start;
-	  else
-	    end = perf_event_buffer_end (tinfo);
+	  /* Determine the number of bytes to read and check for buffer
+	     overflows.  */
+
+	  /* Check for data head overflows.  We might be able to recover from
+	     those but they are very unlikely and it's not really worth the
+	     effort, I think.  */
+	  if (data_head < data_tail)
+	    return -EOVERFLOW;
+
+	  /* If the buffer is smaller than the trace delta, we overflowed.  */
+	  size = data_head - data_tail;
+	  if (buffer_size < size)
+	    return -EOVERFLOW;
+	}
+      else
+	{
+	  /* Read the entire buffer.  */
+	  size = buffer_size;
 
-	  btrace = perf_event_read_bts (tinfo, begin, end, start);
+	  /* Adjust the size if the buffer has not overflowed, yet.  */
+	  if (data_head < size)
+	    size = data_head;
 	}
 
+      /* Data_head keeps growing; the buffer itself is circular.  */
+      begin = perf_event_buffer_begin (tinfo);
+      start = begin + data_head % buffer_size;
+
+      if (data_head <= buffer_size)
+	end = start;
+      else
+	end = perf_event_buffer_end (tinfo);
+
+      *btrace = perf_event_read_bts (tinfo, begin, end, start, size);
+
       /* The stopping thread notifies its ptracer before it is scheduled out.
 	 On multi-core systems, the debugger might therefore run while the
 	 kernel might be writing the last branch trace records.
@@ -551,7 +583,13 @@ linux_read_btrace (struct btrace_target_info *tinfo,
 
   tinfo->data_head = data_head;
 
-  return btrace;
+  /* Prune the incomplete last block (i.e. the first one of inferior execution)
+     if we're not doing a delta read.  There is no way of filling in its zeroed
+     BEGIN element.  */
+  if (!VEC_empty (btrace_block_s, *btrace) && type != btrace_read_delta)
+    VEC_pop (btrace_block_s, *btrace);
+
+  return 0;
 }
 
 #else /* !HAVE_LINUX_PERF_EVENT_H */
@@ -577,16 +615,17 @@ linux_enable_btrace (ptid_t ptid)
 int
 linux_disable_btrace (struct btrace_target_info *tinfo)
 {
-  return ENOSYS;
+  return -ENOSYS;
 }
 
 /* See linux-btrace.h.  */
 
-VEC (btrace_block_s) *
-linux_read_btrace (struct btrace_target_info *tinfo,
+int
+linux_read_btrace (VEC (btrace_block_s) **btrace,
+		   struct btrace_target_info *tinfo,
 		   enum btrace_read_type type)
 {
-  return NULL;
+  return -ENOSYS;
 }
 
 #endif /* !HAVE_LINUX_PERF_EVENT_H */
diff --git a/gdb/common/linux-btrace.h b/gdb/common/linux-btrace.h
index d4e8402..ea86319 100644
--- a/gdb/common/linux-btrace.h
+++ b/gdb/common/linux-btrace.h
@@ -70,8 +70,12 @@ extern struct btrace_target_info *linux_enable_btrace (ptid_t ptid);
 /* Disable branch tracing and deallocate @tinfo.  */
 extern int linux_disable_btrace (struct btrace_target_info *tinfo);
 
-/* Read branch trace data.  */
-extern VEC (btrace_block_s) *linux_read_btrace (struct btrace_target_info *,
-						enum btrace_read_type);
+/* Read branch trace data for the thread indicated by BTINFO into BTRACE
+   using the TYPE read method.
+   The branch trace will start with the most recent block and continue
+   towards older blocks.  */
+extern int linux_read_btrace (VEC (btrace_block_s) **btrace,
+			      struct btrace_target_info *btinfo,
+			      enum btrace_read_type type);
 
 #endif /* LINUX_BTRACE_H */
diff --git a/gdb/doc/gdb.texinfo b/gdb/doc/gdb.texinfo
index 3449d52..1098b60 100644
--- a/gdb/doc/gdb.texinfo
+++ b/gdb/doc/gdb.texinfo
@@ -39260,6 +39260,14 @@ Returns all available branch trace.
 @item new
 Returns all available branch trace if the branch trace changed since
 the last read request.
+
+@item delta
+Returns the new branch trace since the last read request.  Adds a new
+block to the end of the trace that begins at zero and ends at the source
+location of the first branch in the trace buffer.  This extra block is
+used to stitch traces together.
+
+If the trace buffer overflowed, returns an error indicating the overflow.
 @end table
 
 This packet is not probed by default; the remote stub must request it
diff --git a/gdb/gdbserver/linux-low.c b/gdb/gdbserver/linux-low.c
index 799fcc9..273cfdf 100644
--- a/gdb/gdbserver/linux-low.c
+++ b/gdb/gdbserver/linux-low.c
@@ -5722,17 +5722,27 @@ linux_low_enable_btrace (ptid_t ptid)
   return tinfo;
 }
 
-/* Read branch trace data as btrace xml document.  */
+/* The read_btrace target method.  */
 
-static void
+static int
 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
 		       int type)
 {
   VEC (btrace_block_s) *btrace;
   struct btrace_block *block;
-  int i;
+  int i, errcode;
+
+  btrace = NULL;
+  errcode = linux_read_btrace (&btrace, tinfo, type);
+  if (errcode != 0)
+    {
+      if (errcode == -EOVERFLOW)
+	buffer_grow_str0 (buffer, "E.Overflow.");
+      else
+	buffer_grow_str0 (buffer, "E.Generic Error.");
 
-  btrace = linux_read_btrace (tinfo, type);
+      return -1;
+    }
 
   buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
   buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
@@ -5741,9 +5751,11 @@ linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
     buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
 		       paddress (block->begin), paddress (block->end));
 
-  buffer_grow_str (buffer, "</btrace>\n");
+  buffer_grow_str0 (buffer, "</btrace>\n");
 
   VEC_free (btrace_block_s, btrace);
+
+  return 0;
 }
 #endif /* HAVE_LINUX_BTRACE */
 
diff --git a/gdb/gdbserver/server.c b/gdb/gdbserver/server.c
index 4de20d5..ce84a61 100644
--- a/gdb/gdbserver/server.c
+++ b/gdb/gdbserver/server.c
@@ -1348,7 +1348,7 @@ handle_qxfer_btrace (const char *annex,
 {
   static struct buffer cache;
   struct thread_info *thread;
-  int type;
+  int type, result;
 
   if (the_target->read_btrace == NULL || writebuf != NULL)
     return -2;
@@ -1380,6 +1380,8 @@ handle_qxfer_btrace (const char *annex,
     type = btrace_read_all;
   else if (strcmp (annex, "new") == 0)
     type = btrace_read_new;
+  else if (strcmp (annex, "delta") == 0)
+    type = btrace_read_delta;
   else
     {
       strcpy (own_buf, "E.Bad annex.");
@@ -1390,7 +1392,12 @@ handle_qxfer_btrace (const char *annex,
     {
       buffer_free (&cache);
 
-      target_read_btrace (thread->btrace, &cache, type);
+      result = target_read_btrace (thread->btrace, &cache, type);
+      if (result != 0)
+	{
+	  memcpy (own_buf, cache.buffer, cache.used_size);
+	  return -3;
+	}
     }
   else if (offset > cache.used_size)
     {
diff --git a/gdb/gdbserver/target.h b/gdb/gdbserver/target.h
index c5e6fee..0fa83fc 100644
--- a/gdb/gdbserver/target.h
+++ b/gdb/gdbserver/target.h
@@ -360,8 +360,10 @@ struct target_ops
   int (*disable_btrace) (struct btrace_target_info *tinfo);
 
   /* Read branch trace data into buffer.  We use an int to specify the type
-     to break a cyclic dependency.  */
-  void (*read_btrace) (struct btrace_target_info *, struct buffer *, int type);
+     to break a cyclic dependency.
+     Return 0 on success; print an error message into BUFFER and return -1,
+     otherwise.  */
+  int (*read_btrace) (struct btrace_target_info *, struct buffer *, int type);
 
   /* Return true if target supports range stepping.  */
   int (*supports_range_stepping) (void);
diff --git a/gdb/remote.c b/gdb/remote.c
index 33ca9f3..69289a7 100644
--- a/gdb/remote.c
+++ b/gdb/remote.c
@@ -11365,13 +11365,14 @@ remote_teardown_btrace (struct btrace_target_info *tinfo)
 
 /* Read the branch trace.  */
 
-static VEC (btrace_block_s) *
-remote_read_btrace (struct btrace_target_info *tinfo,
+static int
+remote_read_btrace (VEC (btrace_block_s) **btrace,
+		    struct btrace_target_info *tinfo,
 		    enum btrace_read_type type)
 {
   struct packet_config *packet = &remote_protocol_packets[PACKET_qXfer_btrace];
   struct remote_state *rs = get_remote_state ();
-  VEC (btrace_block_s) *btrace = NULL;
+  struct cleanup *cleanup;
   const char *annex;
   char *xml;
 
@@ -11390,6 +11391,9 @@ remote_read_btrace (struct btrace_target_info *tinfo,
     case btrace_read_new:
       annex = "new";
       break;
+    case btrace_read_delta:
+      annex = "delta";
+      break;
     default:
       internal_error (__FILE__, __LINE__,
 		      _("Bad branch tracing read type: %u."),
@@ -11398,15 +11402,14 @@ remote_read_btrace (struct btrace_target_info *tinfo,
 
   xml = target_read_stralloc (&current_target,
                               TARGET_OBJECT_BTRACE, annex);
-  if (xml != NULL)
-    {
-      struct cleanup *cleanup = make_cleanup (xfree, xml);
+  if (xml == NULL)
+    return -1;
 
-      btrace = parse_xml_btrace (xml);
-      do_cleanups (cleanup);
-    }
+  cleanup = make_cleanup (xfree, xml);
+  *btrace = parse_xml_btrace (xml);
+  do_cleanups (cleanup);
 
-  return btrace;
+  return 0;
 }
 
 static int
diff --git a/gdb/target.c b/gdb/target.c
index c97dcfb..06fb617 100644
--- a/gdb/target.c
+++ b/gdb/target.c
@@ -4199,18 +4199,19 @@ target_teardown_btrace (struct btrace_target_info *btinfo)
 
 /* See target.h.  */
 
-VEC (btrace_block_s) *
-target_read_btrace (struct btrace_target_info *btinfo,
+int
+target_read_btrace (VEC (btrace_block_s) **btrace,
+		    struct btrace_target_info *btinfo,
 		    enum btrace_read_type type)
 {
   struct target_ops *t;
 
   for (t = current_target.beneath; t != NULL; t = t->beneath)
     if (t->to_read_btrace != NULL)
-      return t->to_read_btrace (btinfo, type);
+      return t->to_read_btrace (btrace, btinfo, type);
 
   tcomplain ();
-  return NULL;
+  return ENOSYS;
 }
 
 /* See target.h.  */
diff --git a/gdb/target.h b/gdb/target.h
index b86ea4e..28fcf57 100644
--- a/gdb/target.h
+++ b/gdb/target.h
@@ -823,9 +823,12 @@ struct target_ops
        be attempting to talk to a remote target.  */
     void (*to_teardown_btrace) (struct btrace_target_info *tinfo);
 
-    /* Read branch trace data.  */
-    VEC (btrace_block_s) *(*to_read_btrace) (struct btrace_target_info *,
-					     enum btrace_read_type);
+    /* Read branch trace data into DATA.  The vector is cleared before any
+       new data is added.
+       Returns 0 on success, a negative errno code otherwise.  */
+    int (*to_read_btrace) (VEC (btrace_block_s) **data,
+			   struct btrace_target_info *,
+			   enum btrace_read_type);
 
     /* Stop trace recording.  */
     void (*to_stop_recording) (void);
@@ -1955,8 +1958,9 @@ extern void target_disable_btrace (struct btrace_target_info *btinfo);
 extern void target_teardown_btrace (struct btrace_target_info *btinfo);
 
 /* See to_read_btrace in struct target_ops.  */
-extern VEC (btrace_block_s) *target_read_btrace (struct btrace_target_info *,
-						 enum btrace_read_type);
+extern int target_read_btrace (VEC (btrace_block_s) **,
+			       struct btrace_target_info *,
+			       enum btrace_read_type);
 
 /* See to_stop_recording in struct target_ops.  */
 extern void target_stop_recording (void);
-- 
1.7.1


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]