summaryrefslogtreecommitdiff
path: root/src/profiler.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/profiler.c')
-rw-r--r--src/profiler.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/src/profiler.c b/src/profiler.c
index 312574d7526..4c7812aa778 100644
--- a/src/profiler.c
+++ b/src/profiler.c
@@ -55,7 +55,7 @@ make_log (EMACS_INT heap_size, EMACS_INT max_stack_depth)
ptrdiff_t i = ASIZE (h->key_and_value) >> 1;
while (i > 0)
set_hash_key_slot (h, --i,
- Fmake_vector (make_number (max_stack_depth), Qnil));
+ Fmake_vector (make_fixnum (max_stack_depth), Qnil));
return log;
}
@@ -158,13 +158,13 @@ record_backtrace (log_t *log, EMACS_INT count)
{
EMACS_INT old_val = XINT (HASH_VALUE (log, j));
EMACS_INT new_val = saturated_add (old_val, count);
- set_hash_value_slot (log, j, make_number (new_val));
+ set_hash_value_slot (log, j, make_fixnum (new_val));
}
else
{ /* BEWARE! hash_put in general can allocate memory.
But currently it only does that if log->next_free is -1. */
eassert (0 <= log->next_free);
- ptrdiff_t j = hash_put (log, backtrace, make_number (count), hash);
+ ptrdiff_t j = hash_put (log, backtrace, make_fixnum (count), hash);
/* Let's make sure we've put `backtrace' right where it
already was to start with. */
eassert (index == j);
@@ -266,7 +266,7 @@ setup_cpu_timer (Lisp_Object sampling_interval)
struct timespec interval;
int billion = 1000000000;
- if (! RANGED_INTEGERP (1, sampling_interval,
+ if (! RANGED_FIXNUMP (1, sampling_interval,
(TYPE_MAXIMUM (time_t) < EMACS_INT_MAX / billion
? ((EMACS_INT) TYPE_MAXIMUM (time_t) * billion
+ (billion - 1))
@@ -422,8 +422,8 @@ Before returning, a new log is allocated for future samples. */)
cpu_log = (profiler_cpu_running
? make_log (profiler_log_size, profiler_max_stack_depth)
: Qnil);
- Fputhash (Fmake_vector (make_number (1), QAutomatic_GC),
- make_number (cpu_gc_count),
+ Fputhash (Fmake_vector (make_fixnum (1), QAutomatic_GC),
+ make_fixnum (cpu_gc_count),
result);
cpu_gc_count = 0;
return result;