From: Stefan Monnier Date: Fri, 10 Mar 2023 20:30:20 +0000 (-0500) Subject: src/profiler.c: Share more code between CPU and Memory profilers X-Git-Url: http://git.eshelyaron.com/gitweb/?a=commitdiff_plain;h=83be04c66ffaec86aee136b9a94979169d1ba68d;p=emacs.git src/profiler.c: Share more code between CPU and Memory profilers * src/profiler.c (struct profiler_log): New type. (make_log): Use it. (cpu, memory): New vars to replace cpu_log, memory_log, cpu_gc_count, and mem_gc_count. (add_sample): New function, extracted from `handle_profiler_signal`. (handle_profiler_signal, malloc_probe): Use it. (Fprofiler_cpu_start, Fprofiler_memory_start): Adjust call to `make_log`. (export_log): New function, extracted from `Fprofiler_cpu_log`. (Fprofiler_cpu_log, Fprofiler_memory_log): Use it. (syms_of_profiler, syms_of_profiler_for_pdumper): Adjust to new `cpu` and `memory` vars. --- diff --git a/src/profiler.c b/src/profiler.c index b96f7211934..d5a5a2cf5f3 100644 --- a/src/profiler.c +++ b/src/profiler.c @@ -49,7 +49,12 @@ static const struct hash_table_test hashtest_profiler = hashfn_profiler, }; -static Lisp_Object +struct profiler_log { + Lisp_Object log; + EMACS_INT gc_count; +}; + +static struct profiler_log make_log (void) { /* We use a standard Elisp hash-table object, but we use it in @@ -60,11 +65,13 @@ make_log (void) = clip_to_bounds (0, profiler_log_size, MOST_POSITIVE_FIXNUM); ptrdiff_t max_stack_depth = clip_to_bounds (0, profiler_max_stack_depth, PTRDIFF_MAX);; - Lisp_Object log = make_hash_table (hashtest_profiler, heap_size, - DEFAULT_REHASH_SIZE, - DEFAULT_REHASH_THRESHOLD, - Qnil, false); - struct Lisp_Hash_Table *h = XHASH_TABLE (log); + struct profiler_log log + = { make_hash_table (hashtest_profiler, heap_size, + DEFAULT_REHASH_SIZE, + DEFAULT_REHASH_THRESHOLD, + Qnil, false), + 0 }; + struct Lisp_Hash_Table *h = XHASH_TABLE (log.log); /* What is special about our hash-tables is that the values are pre-filled with the vectors we'll use as keys. */ @@ -222,13 +229,10 @@ static enum profiler_cpu_running profiler_cpu_running; /* Hash-table log of CPU profiler. */ -static Lisp_Object cpu_log; +static struct profiler_log cpu; -/* Separate counter for the time spent in the GC. */ -static EMACS_INT cpu_gc_count; - -/* Separate counter for the memory allocations during GC. */ -static EMACS_INT mem_gc_count; +/* Hash-table log of Memory profiler. */ +static struct profiler_log memory; /* The current sampling interval in nanoseconds. */ static EMACS_INT current_sampling_interval; @@ -236,30 +240,37 @@ static EMACS_INT current_sampling_interval; /* Signal handler for sampling profiler. */ static void -handle_profiler_signal (int signal) +add_sample (struct profiler_log *log, EMACS_INT count) { - if (EQ (backtrace_top_function (), QAutomatic_GC)) + if (EQ (backtrace_top_function (), QAutomatic_GC)) /* bug#60237 */ /* Special case the time-count inside GC because the hash-table code is not prepared to be used while the GC is running. More specifically it uses ASIZE at many places where it does not expect the ARRAY_MARK_FLAG to be set. We could try and harden the hash-table code, but it doesn't seem worth the effort. */ - cpu_gc_count = saturated_add (cpu_gc_count, 1); + log->gc_count = saturated_add (log->gc_count, count); else { - EMACS_INT count = 1; + eassert (HASH_TABLE_P (log->log)); + record_backtrace (XHASH_TABLE (log->log), count); + } +} + + +static void +handle_profiler_signal (int signal) +{ + EMACS_INT count = 1; #if defined HAVE_ITIMERSPEC && defined HAVE_TIMER_GETOVERRUN - if (profiler_timer_ok) - { - int overruns = timer_getoverrun (profiler_timer); - eassert (overruns >= 0); - count += overruns; - } -#endif - eassert (HASH_TABLE_P (cpu_log)); - record_backtrace (XHASH_TABLE (cpu_log), count); + if (profiler_timer_ok) + { + int overruns = timer_getoverrun (profiler_timer); + eassert (overruns >= 0); + count += overruns; } +#endif + add_sample (&cpu, count); } static void @@ -346,11 +357,8 @@ See also `profiler-log-size' and `profiler-max-stack-depth'. */) if (profiler_cpu_running) error ("CPU profiler is already running"); - if (NILP (cpu_log)) - { - cpu_gc_count = 0; - cpu_log = make_log (); - } + if (NILP (cpu.log)) + cpu = make_log (); int status = setup_cpu_timer (sampling_interval); if (status < 0) @@ -412,6 +420,21 @@ DEFUN ("profiler-cpu-running-p", return profiler_cpu_running ? Qt : Qnil; } +static Lisp_Object +export_log (struct profiler_log *log) +{ + Lisp_Object result = log->log; + Fputhash (CALLN (Fvector, QAutomatic_GC, Qnil), + make_fixnum (log->gc_count), + result); + /* Here we're making the log visible to Elisp, so it's not safe any + more for our use afterwards since we can't rely on its special + pre-allocated keys anymore. So we have to allocate a new one. */ + if (profiler_cpu_running) + *log = make_log (); + return result; +} + DEFUN ("profiler-cpu-log", Fprofiler_cpu_log, Sprofiler_cpu_log, 0, 0, 0, doc: /* Return the current cpu profiler log. @@ -421,16 +444,7 @@ of functions, where the last few elements may be nil. Before returning, a new log is allocated for future samples. */) (void) { - Lisp_Object result = cpu_log; - /* Here we're making the log visible to Elisp, so it's not safe any - more for our use afterwards since we can't rely on its special - pre-allocated keys anymore. So we have to allocate a new one. */ - cpu_log = profiler_cpu_running ? make_log () : Qnil; - Fputhash (CALLN (Fvector, QAutomatic_GC, Qnil), - make_fixnum (cpu_gc_count), - result); - cpu_gc_count = 0; - return result; + return (export_log (&cpu)); } #endif /* PROFILER_CPU_SUPPORT */ @@ -439,8 +453,6 @@ Before returning, a new log is allocated for future samples. */) /* True if memory profiler is running. */ bool profiler_memory_running; -static Lisp_Object memory_log; - DEFUN ("profiler-memory-start", Fprofiler_memory_start, Sprofiler_memory_start, 0, 0, 0, doc: /* Start/restart the memory profiler. @@ -453,11 +465,8 @@ See also `profiler-log-size' and `profiler-max-stack-depth'. */) if (profiler_memory_running) error ("Memory profiler is already running"); - if (NILP (memory_log)) - { - mem_gc_count = 0; - memory_log = make_log (); - } + if (NILP (memory.log)) + memory = make_log (); profiler_memory_running = true; @@ -496,16 +505,7 @@ of functions, where the last few elements may be nil. Before returning, a new log is allocated for future samples. */) (void) { - Lisp_Object result = memory_log; - /* Here we're making the log visible to Elisp , so it's not safe any - more for our use afterwards since we can't rely on its special - pre-allocated keys anymore. So we have to allocate a new one. */ - memory_log = profiler_memory_running ? make_log () : Qnil; - Fputhash (CALLN (Fvector, QAutomatic_GC, Qnil), - make_fixnum (mem_gc_count), - result); - mem_gc_count = 0; - return result; + return (export_log (&memory)); } @@ -515,19 +515,7 @@ Before returning, a new log is allocated for future samples. */) void malloc_probe (size_t size) { - if (EQ (backtrace_top_function (), QAutomatic_GC)) /* bug#60237 */ - /* Special case the malloc-count inside GC because the hash-table - code is not prepared to be used while the GC is running. - E.g. it uses ASIZE at many places where it does not expect - the ARRAY_MARK_FLAG to be set and in anyn case it'd modify the - heap behind the GC's back. */ - mem_gc_count = saturated_add (mem_gc_count, size); - else - { - eassert (HASH_TABLE_P (memory_log)); - record_backtrace (XHASH_TABLE (memory_log), - min (size, MOST_POSITIVE_FIXNUM)); - } + add_sample (&memory, min (size, MOST_POSITIVE_FIXNUM)); } DEFUN ("function-equal", Ffunction_equal, Sfunction_equal, 2, 2, 0, @@ -612,16 +600,16 @@ to make room for new entries. */); #ifdef PROFILER_CPU_SUPPORT profiler_cpu_running = NOT_RUNNING; - cpu_log = Qnil; - staticpro (&cpu_log); + cpu.log = Qnil; + staticpro (&cpu.log); defsubr (&Sprofiler_cpu_start); defsubr (&Sprofiler_cpu_stop); defsubr (&Sprofiler_cpu_running_p); defsubr (&Sprofiler_cpu_log); #endif profiler_memory_running = false; - memory_log = Qnil; - staticpro (&memory_log); + memory.log = Qnil; + staticpro (&memory.log); defsubr (&Sprofiler_memory_start); defsubr (&Sprofiler_memory_stop); defsubr (&Sprofiler_memory_running_p); @@ -636,16 +624,16 @@ syms_of_profiler_for_pdumper (void) if (dumped_with_pdumper_p ()) { #ifdef PROFILER_CPU_SUPPORT - cpu_log = Qnil; + cpu.log = Qnil; #endif - memory_log = Qnil; + memory.log = Qnil; } else { #ifdef PROFILER_CPU_SUPPORT - eassert (NILP (cpu_log)); + eassert (NILP (cpu.log)); #endif - eassert (NILP (memory_log)); + eassert (NILP (memory.log)); } }