This is the mail archive of the
gdb-patches@sourceware.org
mailing list for the GDB project.
[PATCH 08/11] [SQUASH] btrace: Adjust struct btrace_function::{flow,segment}.
- From: Tim Wiederhake <tim dot wiederhake at intel dot com>
- To: gdb-patches at sourceware dot org
- Cc: markus dot t dot metzger at intel dot com
- Date: Fri, 17 Feb 2017 14:26:26 +0100
- Subject: [PATCH 08/11] [SQUASH] btrace: Adjust struct btrace_function::{flow,segment}.
- Authentication-results: sourceware.org; auth=none
- References: <1487337989-6367-1-git-send-email-tim.wiederhake@intel.com>
This patch stands alone for easier review and is meant to be squashed together
for committing. ChangeLog will be added to the squashed commit.
2017-02-17 Tim Wiederhake <tim.wiederhake@intel.com>
---
gdb/btrace.c | 80 +++++++++++++++++++++++++------------------
gdb/btrace.h | 20 ++++-------
gdb/python/py-record-btrace.c | 8 ++---
gdb/record-btrace.c | 5 +--
4 files changed, 60 insertions(+), 53 deletions(-)
diff --git a/gdb/btrace.c b/gdb/btrace.c
index 880a703..701daa3 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -231,7 +231,6 @@ ftrace_new_function (struct btrace_thread_info *btinfo,
bfun->msym = mfun;
bfun->sym = fun;
- bfun->flow.prev = prev;
if (prev == NULL)
{
@@ -241,9 +240,6 @@ ftrace_new_function (struct btrace_thread_info *btinfo,
}
else
{
- gdb_assert (prev->flow.next == NULL);
- prev->flow.next = bfun;
-
bfun->number = prev->number + 1;
bfun->insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
bfun->level = prev->level;
@@ -277,16 +273,24 @@ ftrace_fixup_caller (struct btrace_thread_info *btinfo,
struct btrace_function *caller,
enum btrace_function_flag flags)
{
- struct btrace_function *prev, *next;
+ unsigned int prev, next;
+ prev = bfun->prev_segment;
+ next = bfun->next_segment;
ftrace_update_caller (bfun, caller, flags);
/* Update all function segments belonging to the same function. */
- for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
- ftrace_update_caller (prev, caller, flags);
+ for (; prev != 0; prev = bfun->prev_segment)
+ {
+ bfun = ftrace_find_call_by_number (btinfo, prev);
+ ftrace_update_caller (bfun, caller, flags);
+ }
- for (next = bfun->segment.next; next != NULL; next = next->segment.next)
- ftrace_update_caller (next, caller, flags);
+ for (; next != 0; next = bfun->next_segment)
+ {
+ bfun = ftrace_find_call_by_number (btinfo, next);
+ ftrace_update_caller (bfun, caller, flags);
+ }
}
/* Add a new function segment for a call.
@@ -408,10 +412,10 @@ ftrace_new_return (struct btrace_thread_info *btinfo,
{
/* The caller of PREV is the preceding btrace function segment in this
function instance. */
- gdb_assert (caller->segment.next == NULL);
+ gdb_assert (caller->next_segment == 0);
- caller->segment.next = bfun;
- bfun->segment.prev = caller;
+ caller->next_segment = bfun->number;
+ bfun->prev_segment = caller->number;
/* Maintain the function level. */
bfun->level = caller->level;
@@ -682,7 +686,8 @@ ftrace_match_backtrace (struct btrace_thread_info *btinfo,
/* Add ADJUSTMENT to the level of BFUN and succeeding function segments. */
static void
-ftrace_fixup_level (struct btrace_function *bfun, int adjustment)
+ftrace_fixup_level (struct btrace_thread_info *btinfo,
+ struct btrace_function *bfun, int adjustment)
{
if (adjustment == 0)
return;
@@ -690,8 +695,11 @@ ftrace_fixup_level (struct btrace_function *bfun, int adjustment)
DEBUG_FTRACE ("fixup level (%+d)", adjustment);
ftrace_debug (bfun, "..bfun");
- for (; bfun != NULL; bfun = bfun->flow.next)
- bfun->level += adjustment;
+ while (bfun != NULL)
+ {
+ bfun->level += adjustment;
+ bfun = ftrace_find_call_by_number (btinfo, bfun->number + 1);
+ }
}
/* Recompute the global level offset. Traverse the function trace and compute
@@ -718,8 +726,11 @@ ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
end = NULL;
level = INT_MAX;
- for (; bfun != end; bfun = bfun->flow.next)
- level = std::min (level, bfun->level);
+ while (bfun != end)
+ {
+ level = std::min (level, bfun->level);
+ bfun = ftrace_find_call_by_number (btinfo, bfun->number + 1);
+ }
DEBUG_FTRACE ("setting global level offset: %d", -level);
btinfo->level = -level;
@@ -738,14 +749,14 @@ ftrace_connect_bfun (struct btrace_thread_info *btinfo,
ftrace_debug (next, "..next");
/* The function segments are not yet connected. */
- gdb_assert (prev->segment.next == NULL);
- gdb_assert (next->segment.prev == NULL);
+ gdb_assert (prev->next_segment == 0);
+ gdb_assert (next->prev_segment == 0);
- prev->segment.next = next;
- next->segment.prev = prev;
+ prev->next_segment = next->number;
+ next->prev_segment = prev->number;
/* We may have moved NEXT to a different function level. */
- ftrace_fixup_level (next, prev->level - next->level);
+ ftrace_fixup_level (btinfo, next, prev->level - next->level);
/* If we run out of back trace for one, let's use the other's. */
if (prev->up == 0)
@@ -816,7 +827,8 @@ ftrace_connect_bfun (struct btrace_thread_info *btinfo,
Otherwise we will fix up CALLER's level when we connect it
to PREV's caller in the next iteration. */
- ftrace_fixup_level (caller, prev->level - caller->level - 1);
+ ftrace_fixup_level (btinfo, caller,
+ prev->level - caller->level - 1);
break;
}
@@ -912,7 +924,7 @@ ftrace_bridge_gap (struct btrace_thread_info *btinfo,
To catch this, we already fix up the level here where we can start at RHS
instead of at BEST_R. We will ignore the level fixup when connecting
BEST_L to BEST_R as they will already be on the same level. */
- ftrace_fixup_level (rhs, best_l->level - best_r->level);
+ ftrace_fixup_level (btinfo, rhs, best_l->level - best_r->level);
ftrace_connect_backtrace (btinfo, best_l, best_r);
@@ -925,12 +937,14 @@ ftrace_bridge_gap (struct btrace_thread_info *btinfo,
static void
btrace_bridge_gaps (struct thread_info *tp, VEC (bfun_s) **gaps)
{
+ struct btrace_thread_info *btinfo;
VEC (bfun_s) *remaining;
struct cleanup *old_chain;
int min_matches;
DEBUG ("bridge gaps");
+ btinfo = &tp->btrace;
remaining = NULL;
old_chain = make_cleanup (VEC_cleanup (bfun_s), &remaining);
@@ -959,20 +973,20 @@ btrace_bridge_gaps (struct thread_info *tp, VEC (bfun_s) **gaps)
all but the leftmost gap in such a sequence.
Also ignore gaps at the beginning of the trace. */
- lhs = gap->flow.prev;
+ lhs = ftrace_find_call_by_number (btinfo, gap->number - 1);
if (lhs == NULL || lhs->errcode != 0)
continue;
/* Skip gaps to the right. */
- for (rhs = gap->flow.next; rhs != NULL; rhs = rhs->flow.next)
- if (rhs->errcode == 0)
- break;
+ rhs = ftrace_find_call_by_number (btinfo, gap->number + 1);
+ while (rhs != NULL && rhs->errcode != 0)
+ rhs = ftrace_find_call_by_number (btinfo, rhs->number + 1);
/* Ignore gaps at the end of the trace. */
if (rhs == NULL)
continue;
- bridged = ftrace_bridge_gap (&tp->btrace, lhs, rhs, min_matches);
+ bridged = ftrace_bridge_gap (btinfo, lhs, rhs, min_matches);
/* Keep track of gaps we were not able to bridge and try again.
If we just pushed them to the end of GAPS we would risk an
@@ -1002,7 +1016,7 @@ btrace_bridge_gaps (struct thread_info *tp, VEC (bfun_s) **gaps)
/* We may omit this in some cases. Not sure it is worth the extra
complication, though. */
- ftrace_compute_global_level_offset (&tp->btrace);
+ ftrace_compute_global_level_offset (btinfo);
}
/* Compute the function branch trace from BTS trace. */
@@ -2371,7 +2385,7 @@ btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
{
const struct btrace_function *next;
- next = bfun->flow.next;
+ next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
if (next == NULL)
break;
@@ -2401,7 +2415,7 @@ btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
{
const struct btrace_function *next;
- next = bfun->flow.next;
+ next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
if (next == NULL)
{
/* We stepped past the last function.
@@ -2450,7 +2464,7 @@ btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
{
const struct btrace_function *prev;
- prev = bfun->flow.prev;
+ prev = ftrace_find_call_by_number (it->btinfo, bfun->number - 1);
if (prev == NULL)
break;
diff --git a/gdb/btrace.h b/gdb/btrace.h
index 2b28ff8..8f8a7fa 100644
--- a/gdb/btrace.h
+++ b/gdb/btrace.h
@@ -83,13 +83,6 @@ struct btrace_insn
typedef struct btrace_insn btrace_insn_s;
DEF_VEC_O (btrace_insn_s);
-/* A doubly-linked list of branch trace function segments. */
-struct btrace_func_link
-{
- struct btrace_function *prev;
- struct btrace_function *next;
-};
-
/* Flags for btrace function segments. */
enum btrace_function_flag
{
@@ -144,13 +137,12 @@ struct btrace_function
struct minimal_symbol *msym;
struct symbol *sym;
- /* The previous and next segment belonging to the same function.
- If a function calls another function, the former will have at least
- two segments: one before the call and another after the return. */
- struct btrace_func_link segment;
-
- /* The previous and next function in control flow order. */
- struct btrace_func_link flow;
+ /* The function segment numbers of the previous and next segment belonging to
+ the same function. If a function calls another function, the former will
+ have at least two segments: one before the call and another after the
+ return. Will be zero if there is no such function segment. */
+ unsigned int prev_segment;
+ unsigned int next_segment;
/* The function segment number of the directly preceding function segment in
a (fake) call stack. Will be zero if there is no such function segment in
diff --git a/gdb/python/py-record-btrace.c b/gdb/python/py-record-btrace.c
index 14ad5b7..2c8132f 100644
--- a/gdb/python/py-record-btrace.c
+++ b/gdb/python/py-record-btrace.c
@@ -478,10 +478,10 @@ btpy_call_prev_sibling (PyObject *self, void *closure)
if (func == NULL)
Py_RETURN_NONE;
- if (func->segment.prev == NULL)
+ if (func->prev_segment == 0)
Py_RETURN_NONE;
- return btpy_call_new (obj->ptid, func->segment.prev->number);
+ return btpy_call_new (obj->ptid, func->prev_segment);
}
/* Implementation of BtraceFunctionCall.next_sibling [BtraceFunctionCall].
@@ -500,10 +500,10 @@ btpy_call_next_sibling (PyObject *self, void *closure)
if (func == NULL)
Py_RETURN_NONE;
- if (func->segment.next == NULL)
+ if (func->next_segment == 0)
Py_RETURN_NONE;
- return btpy_call_new (obj->ptid, func->segment.next->number);
+ return btpy_call_new (obj->ptid, func->next_segment);
}
/* Python rich compare function to allow for equality and inequality checks
diff --git a/gdb/record-btrace.c b/gdb/record-btrace.c
index 791963c..7ba3844 100644
--- a/gdb/record-btrace.c
+++ b/gdb/record-btrace.c
@@ -1605,8 +1605,9 @@ record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
bfun = cache->bfun;
gdb_assert (bfun != NULL);
- while (bfun->segment.prev != NULL)
- bfun = bfun->segment.prev;
+ while (bfun->prev_segment != 0)
+ bfun = VEC_index (btrace_fun_s, cache->tp->btrace.functions,
+ bfun->prev_segment - 1);
code = get_frame_func (this_frame);
special = bfun->number;
--
2.7.4