FIX freq and cpu calculation errors
This commit is contained in:
parent
e7d5b63c38
commit
da9a6b0c46
|
@ -6,6 +6,7 @@ local cpu = require 'sys'
|
||||||
local pure = require 'pure'
|
local pure = require 'pure'
|
||||||
|
|
||||||
local __math_floor = math.floor
|
local __math_floor = math.floor
|
||||||
|
local __string_format = string.format
|
||||||
|
|
||||||
return function(update_freq, main_state, config, common, width, point)
|
return function(update_freq, main_state, config, common, width, point)
|
||||||
local dial_inner_radius = 30
|
local dial_inner_radius = 30
|
||||||
|
@ -89,6 +90,27 @@ return function(update_freq, main_state, config, common, width, point)
|
||||||
}
|
}
|
||||||
end
|
end
|
||||||
|
|
||||||
|
local read_load = function(core_topology, phy_core_id, thread_id)
|
||||||
|
local i = core_topology[phy_core_id].cpus[thread_id].lgl_cpu_id
|
||||||
|
return mod_state[i].percent_active * 100
|
||||||
|
end
|
||||||
|
|
||||||
|
local get_load_functions = function(cores, nthreads, core_topology)
|
||||||
|
if nthreads == 1 then
|
||||||
|
local update = function(c)
|
||||||
|
dial.set(cores[c].loads, read_load(core_topology, c, 1))
|
||||||
|
end
|
||||||
|
return update, dial.draw_static, dial.draw_dynamic
|
||||||
|
else
|
||||||
|
local update = function(c)
|
||||||
|
for t = 1, nthreads do
|
||||||
|
compound_dial.set(cores[c].loads, t, read_load(core_topology, c, t))
|
||||||
|
end
|
||||||
|
end
|
||||||
|
return update, compound_dial.draw_static, compound_dial.draw_dynamic
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
local mk_core_group = function(group_config, y)
|
local mk_core_group = function(group_config, y)
|
||||||
local nthreads = group_config.threads
|
local nthreads = group_config.threads
|
||||||
local core_topology = topology[nthreads]
|
local core_topology = topology[nthreads]
|
||||||
|
@ -98,32 +120,8 @@ return function(update_freq, main_state, config, common, width, point)
|
||||||
create_core, core_cols, y, nthreads, group_config.padding
|
create_core, core_cols, y, nthreads, group_config.padding
|
||||||
)
|
)
|
||||||
local cores = pure.map_n(_create_core, ncores)
|
local cores = pure.map_n(_create_core, ncores)
|
||||||
local group_loads = mod_state[nthreads]
|
local update_loads, draw_static_loads, draw_dynamic_loads =
|
||||||
local update_loads
|
get_load_functions(cores, nthreads, core_topology)
|
||||||
local draw_static_loads
|
|
||||||
local draw_dynamic_loads
|
|
||||||
if nthreads == 1 then
|
|
||||||
update_loads = function(c)
|
|
||||||
dial.set(
|
|
||||||
cores[c].loads,
|
|
||||||
group_loads[c][1].percent_active * 100
|
|
||||||
)
|
|
||||||
end
|
|
||||||
draw_static_loads = dial.draw_static
|
|
||||||
draw_dynamic_loads = dial.draw_dynamic
|
|
||||||
else
|
|
||||||
update_loads = function(c)
|
|
||||||
for t = 1, nthreads do
|
|
||||||
compound_dial.set(
|
|
||||||
cores[c].loads,
|
|
||||||
t,
|
|
||||||
group_loads[c][t].percent_active * 100
|
|
||||||
)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
draw_static_loads = compound_dial.draw_static
|
|
||||||
draw_dynamic_loads = compound_dial.draw_dynamic
|
|
||||||
end
|
|
||||||
local update = function()
|
local update = function()
|
||||||
for c = 1, ncores do
|
for c = 1, ncores do
|
||||||
local temp = __math_floor(
|
local temp = __math_floor(
|
||||||
|
@ -159,13 +157,23 @@ return function(update_freq, main_state, config, common, width, point)
|
||||||
-- HWP status
|
-- HWP status
|
||||||
|
|
||||||
local mk_hwp_freq = function(y)
|
local mk_hwp_freq = function(y)
|
||||||
local hwp_paths = cpu.get_hwp_paths()
|
local hwp_paths = cpu.get_hwp_paths(topology)
|
||||||
|
local freq_labels
|
||||||
|
local cpu_group_map = cpu.topology_to_cpu_map(topology)
|
||||||
|
local format_label = function(group_id)
|
||||||
|
return __string_format('Ave Freq (%i)', group_id)
|
||||||
|
end
|
||||||
|
if #topology == 1 then
|
||||||
|
freq_labels = {'Ave Freq'}
|
||||||
|
else
|
||||||
|
freq_labels = pure.map_n(format_label, #topology)
|
||||||
|
end
|
||||||
local cpu_status = common.make_text_rows(
|
local cpu_status = common.make_text_rows(
|
||||||
point.x,
|
point.x,
|
||||||
y,
|
y,
|
||||||
width,
|
width,
|
||||||
text_spacing,
|
text_spacing,
|
||||||
{'HWP Preference', 'Ave Freq'}
|
pure.concat({'HWP Preference'}, freq_labels)
|
||||||
)
|
)
|
||||||
local update = function()
|
local update = function()
|
||||||
-- For some reason this call is slow (querying anything with pstate in
|
-- For some reason this call is slow (querying anything with pstate in
|
||||||
|
@ -174,13 +182,18 @@ return function(update_freq, main_state, config, common, width, point)
|
||||||
if main_state.trigger10 == 0 then
|
if main_state.trigger10 == 0 then
|
||||||
common.text_rows_set(cpu_status, 1, cpu.read_hwp(hwp_paths))
|
common.text_rows_set(cpu_status, 1, cpu.read_hwp(hwp_paths))
|
||||||
end
|
end
|
||||||
common.text_rows_set(cpu_status, 2, cpu.read_freq())
|
local ave_freqs = cpu.read_ave_freqs(topology, cpu_group_map)
|
||||||
|
local i = 2
|
||||||
|
for group_id, _ in pairs(topology) do
|
||||||
|
common.text_rows_set(cpu_status, i, ave_freqs[group_id])
|
||||||
|
i = i + 1
|
||||||
|
end
|
||||||
end
|
end
|
||||||
local static = pure.partial(common.text_rows_draw_static, cpu_status)
|
local static = pure.partial(common.text_rows_draw_static, cpu_status)
|
||||||
local dynamic = pure.partial(common.text_rows_draw_dynamic, cpu_status)
|
local dynamic = pure.partial(common.text_rows_draw_dynamic, cpu_status)
|
||||||
return common.mk_acc(
|
return common.mk_acc(
|
||||||
width,
|
width,
|
||||||
text_spacing,
|
text_spacing * #topology,
|
||||||
update,
|
update,
|
||||||
static,
|
static,
|
||||||
dynamic
|
dynamic
|
||||||
|
@ -203,12 +216,8 @@ return function(update_freq, main_state, config, common, width, point)
|
||||||
)
|
)
|
||||||
local update = function()
|
local update = function()
|
||||||
local s = 0
|
local s = 0
|
||||||
for g = 1, #mod_state do
|
for i = 1, ncpus do
|
||||||
for c = 1, #mod_state[g] do
|
s = s + mod_state[i].percent_active
|
||||||
for t = 1, #mod_state[g][c] do
|
|
||||||
s = s + mod_state[g][c][t].percent_active
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
common.tagged_percent_timeseries_set(total_load, s / ncpus * 100)
|
common.tagged_percent_timeseries_set(total_load, s / ncpus * 100)
|
||||||
end
|
end
|
||||||
|
@ -269,10 +278,10 @@ return function(update_freq, main_state, config, common, width, point)
|
||||||
point = point,
|
point = point,
|
||||||
width = width,
|
width = width,
|
||||||
set_state = update_state,
|
set_state = update_state,
|
||||||
top = {
|
top = pure.concat(
|
||||||
table.unpack(pure.map(core_group_section, config.core_groups)),
|
pure.map(core_group_section, config.core_groups),
|
||||||
-- {mk_hwp_freq, config.show_stats, sep_spacing},
|
{{mk_hwp_freq, config.show_stats, sep_spacing}}
|
||||||
},
|
),
|
||||||
common.mk_section(
|
common.mk_section(
|
||||||
sep_spacing,
|
sep_spacing,
|
||||||
{mk_load_plot, config.show_plot, geo.table.sec_break},
|
{mk_load_plot, config.show_plot, geo.table.sec_break},
|
||||||
|
|
193
src/sys.lua
193
src/sys.lua
|
@ -172,7 +172,7 @@ end
|
||||||
--------------------------------------------------------------------------------
|
--------------------------------------------------------------------------------
|
||||||
-- cpu
|
-- cpu
|
||||||
|
|
||||||
-- ASSUME nproc and lscpu will always be available
|
-- ASSUME lscpu will always be available
|
||||||
|
|
||||||
M.get_core_number = function()
|
M.get_core_number = function()
|
||||||
return __tonumber(i_o.read_file('/proc/cpuinfo', 'cpu cores%s+:%s(%d+)'))
|
return __tonumber(i_o.read_file('/proc/cpuinfo', 'cpu cores%s+:%s(%d+)'))
|
||||||
|
@ -180,13 +180,10 @@ end
|
||||||
|
|
||||||
M.get_cpu_number = function(topology)
|
M.get_cpu_number = function(topology)
|
||||||
local n = 0
|
local n = 0
|
||||||
for g = 1, #topology do
|
for g, c in pairs(topology) do
|
||||||
for c = 1, #topology[g] do
|
n = n + g * #c
|
||||||
n = n + #topology[g][c].cpus
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
return n
|
return n
|
||||||
-- return __tonumber(i_o.execute_cmd('nproc', nil, '*n'))
|
|
||||||
end
|
end
|
||||||
|
|
||||||
local get_coretemp_dir = function()
|
local get_coretemp_dir = function()
|
||||||
|
@ -208,25 +205,6 @@ M.get_core_threads = function()
|
||||||
return pure.fmap_maybe(make_indexer, i_o.execute_cmd(cmd))
|
return pure.fmap_maybe(make_indexer, i_o.execute_cmd(cmd))
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
||||||
-- map cores to integer values starting at 1; this is necessary since some cpus
|
|
||||||
-- don't report their core id's as a sequence of integers starting at 0
|
|
||||||
-- local get_core_id_indexer = function()
|
|
||||||
-- local make_indexer = pure.compose(
|
|
||||||
-- pure.array_to_map,
|
|
||||||
-- pure.partial(pure.imap, function(i, c) return {__tonumber(c), i} end),
|
|
||||||
-- pure.partial(gmatch_to_table1, '(%d+)')
|
|
||||||
-- )
|
|
||||||
-- return pure.fmap_maybe(
|
|
||||||
-- make_indexer,
|
|
||||||
-- i_o.execute_cmd('lscpu -p=CORE | tail -n+5 | sort -k1,1n')
|
|
||||||
-- )
|
|
||||||
-- end
|
|
||||||
|
|
||||||
-- conky_core_idx: the ID of the dial to be drawn for this core
|
|
||||||
-- conky_thread_idx: the ID of the individual indicator within one dial
|
|
||||||
-- corresponding to one thread in a core (starting at 1 for each core)
|
|
||||||
|
|
||||||
local get_coretemp_mapper = function()
|
local get_coretemp_mapper = function()
|
||||||
local d = get_coretemp_dir()
|
local d = get_coretemp_dir()
|
||||||
i_o.assert_exe_exists('grep')
|
i_o.assert_exe_exists('grep')
|
||||||
|
@ -289,91 +267,54 @@ M.get_core_topology = function()
|
||||||
return pure.fmap_maybe(f, out)
|
return pure.fmap_maybe(f, out)
|
||||||
end
|
end
|
||||||
|
|
||||||
-- for t, k in pairs(get_core_topology()) do
|
M.topology_to_cpu_map = function(topology)
|
||||||
-- print(t)
|
local r = {}
|
||||||
-- for x, y in pairs(k) do
|
for group_id, group in pairs(topology) do
|
||||||
-- print(x, y.phy_core_id, y.coretemp_path, #y.cpus)
|
for _, core in pairs(group) do
|
||||||
-- -- for _, z in pairs(y.cpus) do
|
for _, cpu in pairs(core.cpus) do
|
||||||
-- -- print(x,z.cpu,z.conky_cpu)
|
r[cpu.lgl_cpu_id] = group_id
|
||||||
-- -- end
|
end
|
||||||
-- end
|
|
||||||
-- end
|
|
||||||
|
|
||||||
local get_core_mappings = function()
|
|
||||||
local core_threads = M.get_core_threads()
|
|
||||||
local assign_cpus = function(x)
|
|
||||||
return {
|
|
||||||
cpu_id = __tonumber(x[1]),
|
|
||||||
core_id = __tonumber(x[2])
|
|
||||||
}
|
|
||||||
end
|
|
||||||
local map_ids = function(indexer)
|
|
||||||
local f = function(acc, next)
|
|
||||||
local cpu_id = __tonumber(next[1]) + 1
|
|
||||||
local core_id = __tonumber(next[2])
|
|
||||||
local conky_core_idx = indexer[core_id]
|
|
||||||
acc.mappings[cpu_id] = {
|
|
||||||
conky_core_idx = conky_core_idx,
|
|
||||||
conky_thread_id = acc.thread_ids[conky_core_idx],
|
|
||||||
}
|
|
||||||
acc.thread_ids[conky_core_idx] = acc.thread_ids[conky_core_idx] + 1
|
|
||||||
return acc
|
|
||||||
end
|
end
|
||||||
local cpu_to_core_map = pure.maybe(
|
|
||||||
{},
|
|
||||||
pure.partial(gmatch_to_tableN, '(%d+),(%d+)'),
|
|
||||||
i_o.execute_cmd('lscpu -y -p=cpu,core | grep -v \'^#\' | sort -k1,1n')
|
|
||||||
)
|
|
||||||
local init = {mappings = {}, _conky_core_index = 0, _thread_ids = {}}
|
|
||||||
return pure.reduce(f, init, cpu_to_core_map).mappings
|
|
||||||
end
|
end
|
||||||
-- return pure.fmap_maybe(map_ids, )
|
return r
|
||||||
end
|
end
|
||||||
|
|
||||||
M.get_coretemp_paths = function()
|
M.read_ave_freqs = function(topology, cpu_group_map)
|
||||||
local get_paths = function(indexer)
|
|
||||||
local d = get_coretemp_dir()
|
|
||||||
i_o.assert_exe_exists('grep')
|
|
||||||
local get_labels = pure.compose(
|
|
||||||
i_o.execute_cmd,
|
|
||||||
pure.partial(__string_format, 'grep Core %s/temp*_label', true)
|
|
||||||
)
|
|
||||||
local to_tuple = function(m)
|
|
||||||
return {
|
|
||||||
indexer[__tonumber(m[2])],
|
|
||||||
__string_format('%s/%s_input', d, m[1])
|
|
||||||
}
|
|
||||||
end
|
|
||||||
local f = pure.compose(
|
|
||||||
pure.array_to_map,
|
|
||||||
pure.partial(pure.map, to_tuple),
|
|
||||||
pure.partial(gmatch_to_tableN, '/([^/\n]+)_label:Core (%d+)\n')
|
|
||||||
)
|
|
||||||
return pure.maybe({}, f, pure.fmap_maybe(get_labels, d))
|
|
||||||
end
|
|
||||||
return pure.maybe({}, get_paths, get_core_id_indexer())
|
|
||||||
end
|
|
||||||
|
|
||||||
local match_freq = function(c)
|
|
||||||
local f = 0
|
|
||||||
local n = 0
|
|
||||||
for s in __string_gmatch(c, '(%d+%.%d+)') do
|
|
||||||
f = f + __tonumber(s)
|
|
||||||
n = n + 1
|
|
||||||
end
|
|
||||||
return __string_format('%.0f Mhz', f / n)
|
|
||||||
end
|
|
||||||
|
|
||||||
M.read_freq = function()
|
|
||||||
-- NOTE: Using the builtin conky functions for getting cpu freq seems to make
|
-- NOTE: Using the builtin conky functions for getting cpu freq seems to make
|
||||||
-- the entire loop jittery due to high variance latency. Querying
|
-- the entire loop jittery due to high variance latency. Querying
|
||||||
-- scaling_cur_freq in sysfs seems to do the same thing. It appears lscpu
|
-- scaling_cur_freq in sysfs seems to do the same thing. It appears lscpu
|
||||||
-- (which queries /proc/cpuinfo) is much faster and doesn't have this jittery
|
-- (which queries /proc/cpuinfo) is much faster and doesn't have this jittery
|
||||||
-- problem.
|
-- problem.
|
||||||
return pure.maybe('N/A', match_freq, i_o.execute_cmd('lscpu -p=MHZ'))
|
local out = i_o.execute_cmd('lscpu -p=MHZ')
|
||||||
|
local init_freqs = function(v)
|
||||||
|
local r = {}
|
||||||
|
for group_id, _ in pairs(topology) do
|
||||||
|
r[group_id] = v
|
||||||
|
end
|
||||||
|
return r
|
||||||
|
end
|
||||||
|
if out == nil then
|
||||||
|
return init_freqs('N/A')
|
||||||
|
else
|
||||||
|
local ave_freqs = init_freqs(0)
|
||||||
|
local cpu_id = 1
|
||||||
|
for s in __string_gmatch(out, '(%d+%.%d+)') do
|
||||||
|
local group_id = cpu_group_map[cpu_id]
|
||||||
|
ave_freqs[group_id] = ave_freqs[group_id] + __tonumber(s)
|
||||||
|
cpu_id = cpu_id + 1
|
||||||
|
end
|
||||||
|
for group_id, _ in pairs(ave_freqs) do
|
||||||
|
ave_freqs[group_id] =
|
||||||
|
__string_format(
|
||||||
|
'%.0f Mhz',
|
||||||
|
ave_freqs[group_id] / (group_id * #topology[group_id])
|
||||||
|
)
|
||||||
|
end
|
||||||
|
return ave_freqs
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
M.get_hwp_paths = function()
|
M.get_hwp_paths = function(topology)
|
||||||
-- ASSUME this will never fail
|
-- ASSUME this will never fail
|
||||||
return pure.map_n(
|
return pure.map_n(
|
||||||
function(i)
|
function(i)
|
||||||
|
@ -381,7 +322,7 @@ M.get_hwp_paths = function()
|
||||||
.. (i - 1)
|
.. (i - 1)
|
||||||
.. '/cpufreq/energy_performance_preference'
|
.. '/cpufreq/energy_performance_preference'
|
||||||
end,
|
end,
|
||||||
M.get_cpu_number()
|
M.get_cpu_number(topology)
|
||||||
)
|
)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
@ -410,23 +351,14 @@ M.read_hwp = function(hwp_paths)
|
||||||
end
|
end
|
||||||
|
|
||||||
M.init_cpu_loads = function(topo)
|
M.init_cpu_loads = function(topo)
|
||||||
-- -- local m = get_core_mappings()
|
local ncpus = M.get_cpu_number(topo)
|
||||||
-- local topo = get_core_topology()
|
|
||||||
local cpu_loads = {}
|
local cpu_loads = {}
|
||||||
for core_group_id, core_group in pairs(topo) do
|
for lgl_cpu_id = 1, ncpus do
|
||||||
cpu_loads[core_group_id] = {}
|
cpu_loads[lgl_cpu_id] = {
|
||||||
for lgl_core_id, core in pairs(core_group) do
|
active_prev = 0,
|
||||||
cpu_loads[core_group_id][lgl_core_id] = {}
|
total_prev = 0,
|
||||||
for thread_id = 1, #core.cpus do
|
percent_active = 0,
|
||||||
cpu_loads[core_group_id][lgl_core_id][thread_id] = {
|
}
|
||||||
active_prev = 0,
|
|
||||||
total_prev = 0,
|
|
||||||
percent_active = 0,
|
|
||||||
-- core_id = lgl_core_id,
|
|
||||||
-- thread_id = thread_id,
|
|
||||||
}
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
return cpu_loads
|
return cpu_loads
|
||||||
end
|
end
|
||||||
|
@ -434,24 +366,17 @@ end
|
||||||
M.read_cpu_loads = function(cpu_loads)
|
M.read_cpu_loads = function(cpu_loads)
|
||||||
local iter = io.lines('/proc/stat')
|
local iter = io.lines('/proc/stat')
|
||||||
iter() -- ignore first line
|
iter() -- ignore first line
|
||||||
for group_id = 1, #cpu_loads do
|
for lgl_cpu_id = 1, #cpu_loads do
|
||||||
local group = cpu_loads[group_id]
|
local ln = iter()
|
||||||
for lgl_core_id = 1, #group do
|
local user, system, idle =
|
||||||
local core = group[lgl_core_id]
|
__string_match(ln, '%d+ (%d+) %d+ (%d+) (%d+)', 4)
|
||||||
for thread_id = 1, #core do
|
local active = user + system
|
||||||
local ln = iter()
|
local total = active + idle
|
||||||
local user, system, idle =
|
local cpu = cpu_loads[lgl_cpu_id]
|
||||||
__string_match(ln, '%d+ (%d+) %d+ (%d+) (%d+)', 4)
|
if total > cpu.total_prev then -- guard against 1/0 errors
|
||||||
local active = user + system
|
cpu.percent_active = (active - cpu.active_prev) / (total - cpu.total_prev)
|
||||||
local total = active + idle
|
cpu.active_prev = active
|
||||||
local thread = core[thread_id]
|
cpu.total_prev = total
|
||||||
if total > thread.total_prev then -- guard against 1/0 errors
|
|
||||||
thread.percent_active =
|
|
||||||
(active - thread.active_prev) / (total - thread.total_prev)
|
|
||||||
thread.active_prev = active
|
|
||||||
thread.total_prev = total
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
return cpu_loads
|
return cpu_loads
|
||||||
|
|
Loading…
Reference in New Issue