From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (qmail 954 invoked by alias); 17 Feb 2017 13:27:20 -0000 Mailing-List: contact gdb-patches-help@sourceware.org; run by ezmlm Precedence: bulk List-Id: List-Subscribe: List-Archive: List-Post: List-Help: , Sender: gdb-patches-owner@sourceware.org Received: (qmail 736 invoked by uid 89); 17 Feb 2017 13:27:19 -0000 Authentication-Results: sourceware.org; auth=none X-Virus-Found: No X-Spam-SWARE-Status: No, score=-23.7 required=5.0 tests=AWL,BAYES_00,GIT_PATCH_0,GIT_PATCH_1,GIT_PATCH_2,GIT_PATCH_3,KAM_LAZY_DOMAIN_SECURITY,RP_MATCHES_RCVD autolearn=ham version=3.3.2 spammy=btp, squash, 3439, held X-HELO: mga02.intel.com Received: from mga02.intel.com (HELO mga02.intel.com) (134.134.136.20) by sourceware.org (qpsmtpd/0.93/v0.84-503-g423c35a) with ESMTP; Fri, 17 Feb 2017 13:27:13 +0000 Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by orsmga101.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 17 Feb 2017 05:27:11 -0800 X-ExtLoop1: 1 Received: from irvmail001.ir.intel.com ([163.33.26.43]) by fmsmga001.fm.intel.com with ESMTP; 17 Feb 2017 05:27:10 -0800 Received: from ulvlx001.iul.intel.com (ulvlx001.iul.intel.com [172.28.207.17]) by irvmail001.ir.intel.com (8.14.3/8.13.6/MailSET/Hub) with ESMTP id v1HDR9kq001615; Fri, 17 Feb 2017 13:27:09 GMT Received: from ulvlx001.iul.intel.com (localhost [127.0.0.1]) by ulvlx001.iul.intel.com with ESMTP id v1HDR9wH006665; Fri, 17 Feb 2017 14:27:09 +0100 Received: (from twiederh@localhost) by ulvlx001.iul.intel.com with œ id v1HDR9XM006661; Fri, 17 Feb 2017 14:27:09 +0100 From: Tim Wiederhake To: gdb-patches@sourceware.org Cc: markus.t.metzger@intel.com Subject: [PATCH 06/11] [SQUASH] btrace: Save function calls in a vector. Date: Fri, 17 Feb 2017 13:27:00 -0000 Message-Id: <1487337989-6367-7-git-send-email-tim.wiederhake@intel.com> In-Reply-To: <1487337989-6367-1-git-send-email-tim.wiederhake@intel.com> References: <1487337989-6367-1-git-send-email-tim.wiederhake@intel.com> X-IsSubscribed: yes X-SW-Source: 2017-02/txt/msg00487.txt.bz2 This patch stands alone for easier review and is meant to be squashed together for committing. ChangeLog will be added to the squashed commit. 2017-02-17 Tim Wiederhake --- gdb/btrace.c | 87 +++++++++++++++++++++++++++++------------------------ gdb/btrace.h | 12 +++++--- gdb/record-btrace.c | 2 +- 3 files changed, 56 insertions(+), 45 deletions(-) diff --git a/gdb/btrace.c b/gdb/btrace.c index 1e110cc..cc22737 100644 --- a/gdb/btrace.c +++ b/gdb/btrace.c @@ -204,17 +204,20 @@ ftrace_function_switched (const struct btrace_function *bfun, /* Allocate and initialize a new branch trace function segment at the end of the trace. MFUN and FUN are the symbol information we have for this - function. */ + function. This invalidates all struct btrace_function pointers held. */ static struct btrace_function * ftrace_new_function (struct btrace_thread_info *btinfo, struct minimal_symbol *mfun, struct symbol *fun) { - struct btrace_function *prev, *bfun; + struct btrace_function *prev = NULL, *bfun; - prev = btinfo->end; - bfun = XCNEW (struct btrace_function); + if (!VEC_empty (btrace_fun_s, btinfo->functions)) + prev = VEC_last (btrace_fun_s, btinfo->functions); + + bfun = VEC_safe_push (btrace_fun_s, btinfo->functions, NULL); + memset (bfun, 0, sizeof (*bfun)); bfun->msym = mfun; bfun->sym = fun; @@ -1490,6 +1493,10 @@ btrace_enable (struct thread_info *tp, const struct btrace_config *conf) DEBUG ("enable thread %s (%s)", print_thread_id (tp), target_pid_to_str (tp->ptid)); + /* Temporarily prevent resizing the vector until reworking struct + btrace_function is complete. */ + VEC_reserve (btrace_fun_s, tp->btrace.functions, 1000000); + tp->btrace.target = target_enable_btrace (tp->ptid, conf); /* We're done if we failed to enable tracing. */ @@ -1550,6 +1557,7 @@ btrace_disable (struct thread_info *tp) btp->target = NULL; btrace_clear (tp); + VEC_free (btrace_fun_s, btp->functions); } /* See btrace.h. */ @@ -1804,6 +1812,11 @@ btrace_fetch (struct thread_info *tp) /* We should not be called on running or exited threads. */ gdb_assert (can_access_registers_ptid (tp->ptid)); + /* Temporarily prevent resizing the vector until reworking struct + btrace_function is complete. */ + if (btinfo->functions == NULL) + VEC_reserve (btrace_fun_s, btinfo->functions, 1000000); + btrace_data_init (&btrace); cleanup = make_cleanup_btrace_data (&btrace); @@ -1843,19 +1856,13 @@ btrace_fetch (struct thread_info *tp) /* Compute the trace, provided we have any. */ if (!btrace_data_empty (&btrace)) { - struct btrace_function *bfun; - /* Store the raw trace data. The stored data will be cleared in btrace_clear, so we always append the new trace. */ btrace_data_append (&btinfo->data, &btrace); btrace_maint_clear (btinfo); - VEC_truncate (btrace_fun_p, btinfo->functions, 0); btrace_clear_history (btinfo); btrace_compute_ftrace (tp, &btrace); - - for (bfun = btinfo->begin; bfun != NULL; bfun = bfun->flow.next) - VEC_safe_push (btrace_fun_p, btinfo->functions, bfun); } do_cleanups (cleanup); @@ -1868,6 +1875,7 @@ btrace_clear (struct thread_info *tp) { struct btrace_thread_info *btinfo; struct btrace_function *it, *trash; + unsigned int length, i; DEBUG ("clear thread %s (%s)", print_thread_id (tp), target_pid_to_str (tp->ptid)); @@ -1878,17 +1886,18 @@ btrace_clear (struct thread_info *tp) btinfo = &tp->btrace; - VEC_free (btrace_fun_p, btinfo->functions); - - it = btinfo->begin; - while (it != NULL) + length = VEC_length (btrace_fun_s, btinfo->functions); + for (i = 0; i < length; ++i) { - trash = it; - it = it->flow.next; + struct btrace_function *bfun; + + bfun = VEC_index (btrace_fun_s, btinfo->functions, i); - xfree (trash); + VEC_free (btrace_insn_s, bfun->insn); } + VEC_truncate (btrace_fun_s, btinfo->functions, 0); + btinfo->begin = NULL; btinfo->end = NULL; btinfo->ngaps = 0; @@ -2237,7 +2246,7 @@ btrace_insn_get (const struct btrace_insn_iterator *it) unsigned int index, end; index = it->insn_index; - bfun = VEC_index (btrace_fun_p, it->btinfo->functions, it->call_index); + bfun = VEC_index (btrace_fun_s, it->btinfo->functions, it->call_index); /* Check if the iterator points to a gap in the trace. */ if (bfun->errcode != 0) @@ -2258,7 +2267,7 @@ btrace_insn_get_error (const struct btrace_insn_iterator *it) { const struct btrace_function *bfun; - bfun = VEC_index (btrace_fun_p, it->btinfo->functions, it->call_index); + bfun = VEC_index (btrace_fun_s, it->btinfo->functions, it->call_index); return bfun->errcode; } @@ -2269,7 +2278,7 @@ btrace_insn_number (const struct btrace_insn_iterator *it) { const struct btrace_function *bfun; - bfun = VEC_index (btrace_fun_p, it->btinfo->functions, it->call_index); + bfun = VEC_index (btrace_fun_s, it->btinfo->functions, it->call_index); return bfun->insn_offset + it->insn_index; } @@ -2324,7 +2333,7 @@ btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride) const struct btrace_function *bfun; unsigned int index, steps; - bfun = VEC_index (btrace_fun_p, it->btinfo->functions, it->call_index); + bfun = VEC_index (btrace_fun_s, it->btinfo->functions, it->call_index); steps = 0; index = it->insn_index; @@ -2406,7 +2415,7 @@ btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride) const struct btrace_function *bfun; unsigned int index, steps; - bfun = VEC_index (btrace_fun_p, it->btinfo->functions, it->call_index); + bfun = VEC_index (btrace_fun_s, it->btinfo->functions, it->call_index); steps = 0; index = it->insn_index; @@ -2489,16 +2498,16 @@ btrace_find_insn_by_number (struct btrace_insn_iterator *it, const struct btrace_function *bfun; unsigned int upper, lower; - if (VEC_empty (btrace_fun_p, btinfo->functions)) + if (VEC_empty (btrace_fun_s, btinfo->functions)) return 0; lower = 0; - bfun = VEC_index (btrace_fun_p, btinfo->functions, lower); + bfun = VEC_index (btrace_fun_s, btinfo->functions, lower); if (number < bfun->insn_offset) return 0; - upper = VEC_length (btrace_fun_p, btinfo->functions) - 1; - bfun = VEC_index (btrace_fun_p, btinfo->functions, upper); + upper = VEC_length (btrace_fun_s, btinfo->functions) - 1; + bfun = VEC_index (btrace_fun_s, btinfo->functions, upper); if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun)) return 0; @@ -2507,7 +2516,7 @@ btrace_find_insn_by_number (struct btrace_insn_iterator *it, { const unsigned int average = lower + (upper - lower) / 2; - bfun = VEC_index (btrace_fun_p, btinfo->functions, average); + bfun = VEC_index (btrace_fun_s, btinfo->functions, average); if (number < bfun->insn_offset) { @@ -2538,10 +2547,10 @@ btrace_ends_with_single_insn (const struct btrace_thread_info* btinfo) { const btrace_function *bfun; - if (VEC_empty (btrace_fun_p, btinfo->functions)) + if (VEC_empty (btrace_fun_s, btinfo->functions)) return 0; - bfun = VEC_last (btrace_fun_p, btinfo->functions); + bfun = VEC_last (btrace_fun_s, btinfo->functions); return ftrace_call_num_insn (bfun) == 1; } @@ -2550,10 +2559,10 @@ btrace_ends_with_single_insn (const struct btrace_thread_info* btinfo) const struct btrace_function * btrace_call_get (const struct btrace_call_iterator *it) { - if (it->call_index >= VEC_length (btrace_fun_p, it->btinfo->functions)) + if (it->call_index >= VEC_length (btrace_fun_s, it->btinfo->functions)) return NULL; - return VEC_index (btrace_fun_p, it->btinfo->functions, it->call_index); + return VEC_index (btrace_fun_s, it->btinfo->functions, it->call_index); } /* See btrace.h. */ @@ -2561,7 +2570,7 @@ btrace_call_get (const struct btrace_call_iterator *it) unsigned int btrace_call_number (const struct btrace_call_iterator *it) { - const unsigned int length = VEC_length (btrace_fun_p, it->btinfo->functions); + const unsigned int length = VEC_length (btrace_fun_s, it->btinfo->functions); if ((it->call_index == length) && btrace_ends_with_single_insn (it->btinfo)) return length; @@ -2575,13 +2584,13 @@ void btrace_call_begin (struct btrace_call_iterator *it, const struct btrace_thread_info *btinfo) { - if (VEC_empty (btrace_fun_p, btinfo->functions)) + if (VEC_empty (btrace_fun_s, btinfo->functions)) error (_("No trace.")); it->btinfo = btinfo; it->call_index = 0; - if ((VEC_length (btrace_fun_p, it->btinfo->functions) == 1) + if ((VEC_length (btrace_fun_s, it->btinfo->functions) == 1) && (btrace_ends_with_single_insn (btinfo))) it->call_index = 1; } @@ -2592,11 +2601,11 @@ void btrace_call_end (struct btrace_call_iterator *it, const struct btrace_thread_info *btinfo) { - if (VEC_empty (btrace_fun_p, btinfo->functions)) + if (VEC_empty (btrace_fun_s, btinfo->functions)) error (_("No trace.")); it->btinfo = btinfo; - it->call_index = VEC_length (btrace_fun_p, it->btinfo->functions); + it->call_index = VEC_length (btrace_fun_s, it->btinfo->functions); } /* See btrace.h. */ @@ -2604,7 +2613,7 @@ btrace_call_end (struct btrace_call_iterator *it, unsigned int btrace_call_next (struct btrace_call_iterator *it, unsigned int stride) { - const unsigned int length = VEC_length (btrace_fun_p, it->btinfo->functions); + const unsigned int length = VEC_length (btrace_fun_s, it->btinfo->functions); if (it->call_index + stride < length - 1) { @@ -2634,7 +2643,7 @@ btrace_call_next (struct btrace_call_iterator *it, unsigned int stride) unsigned int btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride) { - const unsigned int length = VEC_length (btrace_fun_p, it->btinfo->functions); + const unsigned int length = VEC_length (btrace_fun_s, it->btinfo->functions); int steps; if (length == 0 || stride == 0) @@ -2677,7 +2686,7 @@ btrace_find_call_by_number (struct btrace_call_iterator *it, const struct btrace_thread_info *btinfo, unsigned int number) { - const unsigned int length = VEC_length (btrace_fun_p, btinfo->functions); + const unsigned int length = VEC_length (btrace_fun_s, btinfo->functions); if ((number == 0) || (number > length)) return 0; diff --git a/gdb/btrace.h b/gdb/btrace.h index 53df6e9..92435e7 100644 --- a/gdb/btrace.h +++ b/gdb/btrace.h @@ -187,8 +187,9 @@ struct btrace_function btrace_function_flags flags; }; -typedef struct btrace_function *btrace_fun_p; -DEF_VEC_P (btrace_fun_p); +/* A vector of branch trace function segments. */ +typedef struct btrace_function btrace_fun_s; +DEF_VEC_O (btrace_fun_s); /* A branch trace instruction iterator. */ struct btrace_insn_iterator @@ -343,9 +344,10 @@ struct btrace_thread_info struct btrace_function *begin; struct btrace_function *end; - /* Vector of pointer to decoded function segments. These are in execution - order with the first element == BEGIN and the last element == END. */ - VEC (btrace_fun_p) *functions; + /* Vector of decoded function call segments in execution flow order. Note + that the numbering for btrace function segments starts with 1, so function + call segment i will be at index (i - 1). */ + VEC (btrace_fun_s) *functions; /* The function level offset. When added to each function's LEVEL, this normalizes the function levels such that the smallest level diff --git a/gdb/record-btrace.c b/gdb/record-btrace.c index 83e65e7..87fbcba 100644 --- a/gdb/record-btrace.c +++ b/gdb/record-btrace.c @@ -1692,7 +1692,7 @@ record_btrace_frame_sniffer (const struct frame_unwind *self, replay = tp->btrace.replay; if (replay != NULL) - bfun = VEC_index (btrace_fun_p, tp->btrace.functions, + bfun = VEC_index (btrace_fun_s, tp->btrace.functions, replay->call_index); } else -- 2.7.4