Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions include/rtdef.h
Original file line number Diff line number Diff line change
Expand Up @@ -935,6 +935,8 @@ struct rt_thread
#ifdef RT_USING_CPU_USAGE_TRACER
rt_ubase_t user_time; /**< Ticks on user */
rt_ubase_t system_time; /**< Ticks on system */
rt_ubase_t total_time_prev; /**< Previous total ticks snapshot */
rt_uint8_t cpu_usage; /**< Recent CPU usage in percent */
#endif /* RT_USING_CPU_USAGE_TRACER */

#ifdef RT_USING_MEM_PROTECTION
Expand Down
11 changes: 11 additions & 0 deletions src/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -199,6 +199,17 @@ config RT_USING_CPU_USAGE_TRACER
percentage information through the list thread command.
It will automatically integrate with the scheduler to track thread execution time.

if RT_USING_CPU_USAGE_TRACER
config RT_CPU_USAGE_CALC_INTERVAL_MS
int "CPU usage sampling interval (ms)"
default 200
range 50 5000
help
Sampling window for thread CPU usage display.
A shorter interval updates faster but fluctuates more.
A longer interval is smoother but has higher display latency.
endif

menu "kservice options"
config RT_USING_TINY_FFS
bool "Enable kservice to use tiny finding first bit set method"
Expand Down
179 changes: 151 additions & 28 deletions src/kservice.c
Original file line number Diff line number Diff line change
Expand Up @@ -572,49 +572,172 @@ rt_err_t rt_backtrace_thread(rt_thread_t thread)
}

#ifdef RT_USING_CPU_USAGE_TRACER

#ifndef RT_CPU_USAGE_CALC_INTERVAL_MS
#define RT_CPU_USAGE_CALC_INTERVAL_MS 200U
#endif

#define RT_CPU_USAGE_CALC_INTERVAL_TICK \
((RT_TICK_PER_SECOND * RT_CPU_USAGE_CALC_INTERVAL_MS + 999U) / 1000U)

static rt_tick_t _cpu_usage_sample_tick;
static rt_bool_t _cpu_usage_inited = RT_FALSE;
static struct rt_cpu_usage_stats _cpu_usage_prev_cpu_stat[RT_CPUS_NR];

/*
* Calculate total CPU-time delta for this sampling window and
* refresh per-CPU snapshots.
*
* Each counter delta is computed in rt_ubase_t width first, so wrap-around
* on 32-bit targets is handled naturally by unsigned arithmetic.
*/
static rt_uint64_t _cpu_usage_calc_total_delta(void)
{
rt_uint64_t total_delta = 0;
int i;

for (i = 0; i < RT_CPUS_NR; i++)
{
rt_cpu_t pcpu = rt_cpu_index(i);
rt_ubase_t user_now = pcpu->cpu_stat.user;
rt_ubase_t system_now = pcpu->cpu_stat.system;
rt_ubase_t idle_now = pcpu->cpu_stat.idle;

/* Per-counter delta first to avoid overflow artifacts after sum. */
rt_ubase_t user_delta = (rt_ubase_t)(user_now - _cpu_usage_prev_cpu_stat[i].user);
rt_ubase_t system_delta = (rt_ubase_t)(system_now - _cpu_usage_prev_cpu_stat[i].system);
rt_ubase_t idle_delta = (rt_ubase_t)(idle_now - _cpu_usage_prev_cpu_stat[i].idle);

total_delta += (rt_uint64_t)user_delta;
total_delta += (rt_uint64_t)system_delta;
total_delta += (rt_uint64_t)idle_delta;

_cpu_usage_prev_cpu_stat[i].user = user_now;
_cpu_usage_prev_cpu_stat[i].system = system_now;
_cpu_usage_prev_cpu_stat[i].idle = idle_now;
}

return total_delta;
}

static void _cpu_usage_snapshot_init(void)
{
struct rt_object_information *info;
rt_list_t *list;
rt_list_t *node;
rt_base_t level;
int i;

info = rt_object_get_information(RT_Object_Class_Thread);
list = &info->object_list;

level = rt_spin_lock_irqsave(&info->spinlock);
for (node = list->next; node != list; node = node->next)
{
struct rt_object *obj = rt_list_entry(node, struct rt_object, list);
struct rt_thread *t = (struct rt_thread *)obj;

t->total_time_prev = 0U;
t->cpu_usage = 0U;
}
rt_spin_unlock_irqrestore(&info->spinlock, level);

for (i = 0; i < RT_CPUS_NR; i++)
{
_cpu_usage_prev_cpu_stat[i].user = 0U;
_cpu_usage_prev_cpu_stat[i].system = 0U;
_cpu_usage_prev_cpu_stat[i].idle = 0U;
}

_cpu_usage_sample_tick = rt_tick_get();
_cpu_usage_inited = RT_TRUE;
}

static void _cpu_usage_refresh_threads(rt_uint64_t total_delta)
{
struct rt_object_information *info;
rt_list_t *list;
rt_list_t *node;
rt_base_t level;

info = rt_object_get_information(RT_Object_Class_Thread);
list = &info->object_list;

level = rt_spin_lock_irqsave(&info->spinlock);
for (node = list->next; node != list; node = node->next)
{
struct rt_object *obj = rt_list_entry(node, struct rt_object, list);
struct rt_thread *t = (struct rt_thread *)obj;
rt_ubase_t total_now = (rt_ubase_t)(t->user_time + t->system_time);
rt_ubase_t total_delta_now = (rt_ubase_t)(total_now - t->total_time_prev);
rt_uint64_t thread_delta = (rt_uint64_t)total_delta_now;

if (total_delta > 0U)
{
rt_uint64_t usage = (thread_delta * 100U) / total_delta;
t->cpu_usage = (rt_uint8_t)(usage > 100U ? 100U : usage);
}
else
{
t->cpu_usage = 0U;
}

t->total_time_prev = total_now;
}
rt_spin_unlock_irqrestore(&info->spinlock, level);
}

static void _cpu_usage_update(void)
{
rt_tick_t tick_now;
rt_tick_t delta_tick;
rt_uint64_t total_delta;
rt_bool_t bypass_interval_check = RT_FALSE;

if (!_cpu_usage_inited)
{
_cpu_usage_snapshot_init();
bypass_interval_check = RT_TRUE;
}

tick_now = rt_tick_get();
delta_tick = rt_tick_get_delta(_cpu_usage_sample_tick);
if (!bypass_interval_check && delta_tick < RT_CPU_USAGE_CALC_INTERVAL_TICK)
{
return;
}

total_delta = _cpu_usage_calc_total_delta();
_cpu_usage_refresh_threads(total_delta);
_cpu_usage_sample_tick = tick_now;
}

/**
* @brief Get thread usage percentage relative to total system CPU time
* @brief Get thread CPU usage percentage in the recent sampling window
*
* This function calculates the CPU usage percentage of a specific thread
* relative to the total CPU time consumed by all threads in the system.
* This function returns per-thread CPU usage based on delta runtime in the
* latest sampling window, rather than cumulative runtime since boot.
*
* @param thread Pointer to the thread object. Must not be NULL.
*
* @return The CPU usage percentage as an integer value (0-100).
* Returns 0 if total system time is 0 or if CPU usage tracing is not enabled.
* If sampling interval has not elapsed yet, the previous cached value
* is returned (initial value is 0).
*
* @note This function requires RT_USING_CPU_USAGE_TRACER to be enabled.
* @note The percentage is calculated as: (thread_time * 100) / total_system_time
* @note Due to integer arithmetic, the result is truncated and may not sum
* to exactly 100% across all threads due to rounding.
* @note The percentage is calculated as
* (thread_time_delta * 100) / total_time_delta,
* where total_time_delta is the sum of user/system/idle deltas of all CPUs.
* @note Sampling interval can be tuned with RT_CPU_USAGE_CALC_INTERVAL_MS.
* @note If thread is NULL, an assertion will be triggered in debug builds.
*/
rt_uint8_t rt_thread_get_usage(rt_thread_t thread)
{
rt_ubase_t thread_time;
rt_ubase_t total_time = 0U;
int i;
rt_cpu_t pcpu;

RT_ASSERT(thread != RT_NULL);

thread_time = thread->user_time + thread->system_time;

/* Calculate total system time by summing all CPUs' time */
for (i = 0; i < RT_CPUS_NR; i++)
{
pcpu = rt_cpu_index(i);
total_time += pcpu->cpu_stat.user + pcpu->cpu_stat.system + pcpu->cpu_stat.idle;
}

if (total_time > 0U)
{
/* Calculate thread usage percentage: (thread_time * 100) / total_time */
rt_ubase_t usage = (thread_time * 100U) / total_time;
return (rt_uint8_t)(usage > 100U ? 100U : usage);
}
_cpu_usage_update();

return 0U;
return thread->cpu_usage;
}
#endif /* RT_USING_CPU_USAGE_TRACER */

Expand Down
7 changes: 7 additions & 0 deletions src/thread.c
Original file line number Diff line number Diff line change
Expand Up @@ -277,6 +277,13 @@ static rt_err_t _thread_init(struct rt_thread *thread,
thread->system_time = 0;
#endif

#ifdef RT_USING_CPU_USAGE_TRACER
thread->user_time = 0;
thread->system_time = 0;
thread->total_time_prev = 0;
thread->cpu_usage = 0;
#endif /* RT_USING_CPU_USAGE_TRACER */

#ifdef RT_USING_PTHREADS
thread->pthread_data = RT_NULL;
#endif /* RT_USING_PTHREADS */
Expand Down
Loading