diff options
author | Vincent Ambo <tazjin@google.com> | 2020-05-27T00·26+0100 |
---|---|---|
committer | Vincent Ambo <tazjin@google.com> | 2020-05-27T00·26+0100 |
commit | afe04691aca3f669f517adaeb5bd4a87a481fb4a (patch) | |
tree | 60ae6c91a3959b2f6486256e26ff126e598d1e5d /src/stacktrace_x86-inl.h |
Squashed 'third_party/glog/' content from commit 9ef754a3023
git-subtree-dir: third_party/glog git-subtree-split: 9ef754a3023e6fd10f20fe53dfca96dd898182e3
Diffstat (limited to 'src/stacktrace_x86-inl.h')
-rw-r--r-- | src/stacktrace_x86-inl.h | 146 |
1 files changed, 146 insertions, 0 deletions
diff --git a/src/stacktrace_x86-inl.h b/src/stacktrace_x86-inl.h new file mode 100644 index 000000000000..3b8d5a8282d5 --- /dev/null +++ b/src/stacktrace_x86-inl.h @@ -0,0 +1,146 @@ +// Copyright (c) 2000 - 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Produce stack trace + +#include <stdint.h> // for uintptr_t + +#include "utilities.h" // for OS_* macros + +#if !defined(OS_WINDOWS) +#include <unistd.h> +#include <sys/mman.h> +#endif + +#include <stdio.h> // for NULL +#include "stacktrace.h" + +_START_GOOGLE_NAMESPACE_ + +// Given a pointer to a stack frame, locate and return the calling +// stackframe, or return NULL if no stackframe can be found. Perform sanity +// checks (the strictness of which is controlled by the boolean parameter +// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned. +template<bool STRICT_UNWINDING> +static void **NextStackFrame(void **old_sp) { + void **new_sp = (void **) *old_sp; + + // Check that the transition from frame pointer old_sp to frame + // pointer new_sp isn't clearly bogus + if (STRICT_UNWINDING) { + // With the stack growing downwards, older stack frame must be + // at a greater address that the current one. + if (new_sp <= old_sp) return NULL; + // Assume stack frames larger than 100,000 bytes are bogus. + if ((uintptr_t)new_sp - (uintptr_t)old_sp > 100000) return NULL; + } else { + // In the non-strict mode, allow discontiguous stack frames. + // (alternate-signal-stacks for example). + if (new_sp == old_sp) return NULL; + // And allow frames upto about 1MB. + if ((new_sp > old_sp) + && ((uintptr_t)new_sp - (uintptr_t)old_sp > 1000000)) return NULL; + } + if ((uintptr_t)new_sp & (sizeof(void *) - 1)) return NULL; +#ifdef __i386__ + // On 64-bit machines, the stack pointer can be very close to + // 0xffffffff, so we explicitly check for a pointer into the + // last two pages in the address space + if ((uintptr_t)new_sp >= 0xffffe000) return NULL; +#endif +#if !defined(OS_WINDOWS) + if (!STRICT_UNWINDING) { + // Lax sanity checks cause a crash in 32-bit tcmalloc/crash_reason_test + // on AMD-based machines with VDSO-enabled kernels. + // Make an extra sanity check to insure new_sp is readable. + // Note: NextStackFrame<false>() is only called while the program + // is already on its last leg, so it's ok to be slow here. + static int page_size = getpagesize(); + void *new_sp_aligned = (void *)((uintptr_t)new_sp & ~(page_size - 1)); + if (msync(new_sp_aligned, page_size, MS_ASYNC) == -1) + return NULL; + } +#endif + return new_sp; +} + +// If you change this function, also change GetStackFrames below. +int GetStackTrace(void** result, int max_depth, int skip_count) { + void **sp; + +#ifdef __GNUC__ +#if __GNUC__ * 100 + __GNUC_MINOR__ >= 402 +#define USE_BUILTIN_FRAME_ADDRESS +#endif +#endif + +#ifdef USE_BUILTIN_FRAME_ADDRESS + sp = reinterpret_cast<void**>(__builtin_frame_address(0)); +#elif defined(__i386__) + // Stack frame format: + // sp[0] pointer to previous frame + // sp[1] caller address + // sp[2] first argument + // ... + sp = (void **)&result - 2; +#elif defined(__x86_64__) + // __builtin_frame_address(0) can return the wrong address on gcc-4.1.0-k8 + unsigned long rbp; + // Move the value of the register %rbp into the local variable rbp. + // We need 'volatile' to prevent this instruction from getting moved + // around during optimization to before function prologue is done. + // An alternative way to achieve this + // would be (before this __asm__ instruction) to call Noop() defined as + // static void Noop() __attribute__ ((noinline)); // prevent inlining + // static void Noop() { asm(""); } // prevent optimizing-away + __asm__ volatile ("mov %%rbp, %0" : "=r" (rbp)); + // Arguments are passed in registers on x86-64, so we can't just + // offset from &result + sp = (void **) rbp; +#endif + + int n = 0; + while (sp && n < max_depth) { + if (*(sp+1) == (void *)0) { + // In 64-bit code, we often see a frame that + // points to itself and has a return address of 0. + break; + } + if (skip_count > 0) { + skip_count--; + } else { + result[n++] = *(sp+1); + } + // Use strict unwinding rules. + sp = NextStackFrame<true>(sp); + } + return n; +} + +_END_GOOGLE_NAMESPACE_ |