Compare commits

..

2 Commits

6 changed files with 292 additions and 93 deletions

View File

@ -87,10 +87,11 @@ let Memory =
let Network = let Network =
{ Type = { geometry : PlotGeo_.Type }, default.geometry = PlotGeo_::{=} } { Type = { geometry : PlotGeo_.Type }, default.geometry = PlotGeo_::{=} }
let CoreGroup = { threads : Natural, rows : Natural, padding : Natural }
let Processor = let Processor =
{ Type = { Type =
{ core_rows : Natural { core_groups : List CoreGroup
, core_padding : Natural
, show_stats : Bool , show_stats : Bool
, show_plot : Bool , show_plot : Bool
, table_rows : Natural , table_rows : Natural

View File

@ -439,6 +439,14 @@ return function(config)
) )
end end
M.make_blank_dial = function(x, y, radius, thickness, threshold)
return dial.make(
geom.make_arc(x, y, radius, DIAL_THETA0, DIAL_THETA1),
arc.config(style.line(thickness, CAP_BUTT), patterns.indicator.bg),
threshold_indicator(threshold)
)
end
M.make_dial = function(x, y, radius, thickness, threshold, _format, pre_function) M.make_dial = function(x, y, radius, thickness, threshold, _format, pre_function)
return { return {
dial = dial.make( dial = dial.make(

View File

@ -52,8 +52,14 @@ return function(update_freq, config, common, width, point)
vid_utilization = 0 vid_utilization = 0
} }
local runtime_status_file = config.dev_power..'/runtime_status'
local want_nvidia_query = config.show_temp or config.show_clock
or config.gpu_util or config.mem_util or config.vid_util
local update_state = function() local update_state = function()
if i_o.read_file(config.dev_power, nil, '*l') == 'on' then local is_active = i_o.read_file(runtime_status_file, nil, '*l') == 'active'
if is_active and want_nvidia_query then
local nvidia_settings_glob = i_o.execute_cmd(NV_QUERY) local nvidia_settings_glob = i_o.execute_cmd(NV_QUERY)
if nvidia_settings_glob == nil then if nvidia_settings_glob == nil then
mod_state.error = 'Error' mod_state.error = 'Error'
@ -68,6 +74,8 @@ return function(update_freq, config, common, width, point)
= __string_match(nvidia_settings_glob, NV_REGEX) = __string_match(nvidia_settings_glob, NV_REGEX)
mod_state.error = false mod_state.error = false
end end
elseif is_active then
mod_state.error = false
else else
mod_state.error = 'Off' mod_state.error = 'Off'
end end

View File

@ -1,3 +1,4 @@
local dial = require 'dial'
local compound_dial = require 'compound_dial' local compound_dial = require 'compound_dial'
local text_table = require 'text_table' local text_table = require 'text_table'
local i_o = require 'i_o' local i_o = require 'i_o'
@ -21,7 +22,9 @@ return function(update_freq, main_state, config, common, width, point)
----------------------------------------------------------------------------- -----------------------------------------------------------------------------
-- processor state -- processor state
local mod_state = cpu.read_cpu_loads(cpu.init_cpu_loads()) local topology = cpu.get_core_topology()
local mod_state = cpu.read_cpu_loads(cpu.init_cpu_loads(topology))
local ncpus = cpu.get_cpu_number(topology)
local update_state = function() local update_state = function()
mod_state = cpu.read_cpu_loads(mod_state) mod_state = cpu.read_cpu_loads(mod_state)
@ -30,35 +33,39 @@ return function(update_freq, main_state, config, common, width, point)
----------------------------------------------------------------------------- -----------------------------------------------------------------------------
-- cores (loads and temps) -- cores (loads and temps)
local ncpus = cpu.get_cpu_number() -- TODO add this back
local ncores = cpu.get_core_number() -- local is_evenly_distributed = function(ncores, rows)
local nthreads = ncpus / ncores -- if rows == 0 then
-- return false
-- elseif math.fmod(ncores, rows) == 0 then
-- return true
-- else
-- i_o.warnf('could not evenly distribute %i cores over %i rows', ncores, rows)
-- return false
-- end
-- end
local show_cores = false local create_core = function(core_cols, y, nthreads, padding, c)
if config.core_rows > 0 then
if math.fmod(ncores, config.core_rows) == 0 then
show_cores = true
else
i_o.warnf(
'could not evenly distribute %i cores over %i rows; disabling',
ncores,
config.core_rows
)
end
end
local create_core = function(core_cols, y, c)
local dial_x = point.x + local dial_x = point.x +
(core_cols == 1 (core_cols == 1
and (width / 2) and (width / 2)
or (config.core_padding + dial_outer_radius + or (padding + dial_outer_radius +
(width - 2 * (dial_outer_radius + config.core_padding)) (width - 2 * (dial_outer_radius + padding))
* math.fmod(c - 1, core_cols) / (core_cols - 1))) * math.fmod(c - 1, core_cols) / (core_cols - 1)))
local dial_y = y + dial_outer_radius + local dial_y = y + dial_outer_radius +
(2 * dial_outer_radius + dial_y_spacing) (2 * dial_outer_radius + dial_y_spacing)
* math.floor((c - 1) / core_cols) * math.floor((c - 1) / core_cols)
return { local loads
if nthreads == 1 then
local single_thickness = dial_outer_radius - dial_inner_radius
loads = common.make_blank_dial(
dial_x,
dial_y,
dial_outer_radius - single_thickness / 2,
single_thickness,
80
)
else
loads = common.make_compound_dial( loads = common.make_compound_dial(
dial_x, dial_x,
dial_y, dial_y,
@ -67,7 +74,10 @@ return function(update_freq, main_state, config, common, width, point)
dial_thickness, dial_thickness,
80, 80,
nthreads nthreads
), )
end
return {
loads = loads,
coretemp = common.make_text_circle( coretemp = common.make_text_circle(
dial_x, dial_x,
dial_y, dial_y,
@ -79,44 +89,65 @@ return function(update_freq, main_state, config, common, width, point)
} }
end end
local mk_cores = function(y) local mk_core_group = function(group_config, y)
local core_cols = ncores / config.core_rows local nthreads = group_config.threads
local cores = pure.map_n(pure.partial(create_core, core_cols, y), ncores) local core_topology = topology[nthreads]
local coretemp_paths = cpu.get_coretemp_paths() local ncores = #core_topology
if #coretemp_paths ~= ncores then local core_cols = ncores / group_config.rows
i_o.warnf('could not find all coretemp paths') local _create_core = pure.partial(
end create_core, core_cols, y, nthreads, group_config.padding
local update_coretemps = function() )
for conky_core_idx, path in pairs(coretemp_paths) do local cores = pure.map_n(_create_core, ncores)
local temp = __math_floor(0.001 * i_o.read_file(path, nil, '*n')) local group_loads = mod_state[nthreads]
common.text_circle_set(cores[conky_core_idx].coretemp, temp) local update_loads
end local draw_static_loads
end local draw_dynamic_loads
local update = function() if nthreads == 1 then
for _, load_data in pairs(mod_state) do update_loads = function(c)
compound_dial.set( dial.set(
cores[load_data.conky_core_idx].loads, cores[c].loads,
load_data.conky_thread_id, group_loads[c][1].percent_active * 100
load_data.percent_active * 100
) )
end end
update_coretemps() draw_static_loads = dial.draw_static
draw_dynamic_loads = dial.draw_dynamic
else
update_loads = function(c)
for t = 1, nthreads do
compound_dial.set(
cores[c].loads,
t,
group_loads[c][t].percent_active * 100
)
end
end
draw_static_loads = compound_dial.draw_static
draw_dynamic_loads = compound_dial.draw_dynamic
end
local update = function()
for c = 1, ncores do
local temp = __math_floor(
0.001 * i_o.read_file(core_topology[c].coretemp_path, nil, '*n')
)
common.text_circle_set(cores[c].coretemp, temp)
update_loads(c)
end
end end
local static = function(cr) local static = function(cr)
for i = 1, #cores do for i = 1, ncores do
common.text_circle_draw_static(cores[i].coretemp, cr) common.text_circle_draw_static(cores[i].coretemp, cr)
compound_dial.draw_static(cores[i].loads, cr) draw_static_loads(cores[i].loads, cr)
end end
end end
local dynamic = function(cr) local dynamic = function(cr)
for i = 1, #cores do for i = 1, ncores do
common.text_circle_draw_dynamic(cores[i].coretemp, cr) common.text_circle_draw_dynamic(cores[i].coretemp, cr)
compound_dial.draw_dynamic(cores[i].loads, cr) draw_dynamic_loads(cores[i].loads, cr)
end end
end end
return common.mk_acc( return common.mk_acc(
width, width,
(dial_outer_radius * 2 + dial_y_spacing) * config.core_rows (dial_outer_radius * 2 + dial_y_spacing) * group_config.rows
- dial_y_spacing, - dial_y_spacing,
update, update,
static, static,
@ -172,8 +203,12 @@ return function(update_freq, main_state, config, common, width, point)
) )
local update = function() local update = function()
local s = 0 local s = 0
for i = 1, #mod_state do for g = 1, #mod_state do
s = s + mod_state[i].percent_active for c = 1, #mod_state[g] do
for t = 1, #mod_state[g][c] do
s = s + mod_state[g][c][t].percent_active
end
end
end end
common.tagged_percent_timeseries_set(total_load, s / ncpus * 100) common.tagged_percent_timeseries_set(total_load, s / ncpus * 100)
end end
@ -225,14 +260,18 @@ return function(update_freq, main_state, config, common, width, point)
----------------------------------------------------------------------------- -----------------------------------------------------------------------------
-- main functions -- main functions
local core_group_section = function (g)
return {pure.partial(mk_core_group, g), true, text_spacing}
end
return { return {
header = 'PROCESSOR', header = 'PROCESSOR',
point = point, point = point,
width = width, width = width,
set_state = update_state, set_state = update_state,
top = { top = {
{mk_cores, show_cores, text_spacing}, table.unpack(pure.map(core_group_section, config.core_groups)),
{mk_hwp_freq, config.show_stats, sep_spacing}, -- {mk_hwp_freq, config.show_stats, sep_spacing},
}, },
common.mk_section( common.mk_section(
sep_spacing, sep_spacing,

View File

@ -189,6 +189,32 @@ M.flatten = function(xs)
return r return r
end end
M.group_with = function(keyfun, valfun, seq)
local f = function(acc, next)
local k = keyfun(next)
local v = valfun(next)
if acc[k] == nil then
acc[k] = {v}
else
acc[k][#acc[k] + 1] = v
end
return acc
end
return M.reduce(f, {}, seq)
end
M.group_by = function(k, seq)
local f = function(acc, next)
if acc[k] == nil then
acc[k] = {next}
else
acc[k][#acc[k]] = next
end
return acc
end
return M.reduce(f, {}, seq)
end
M.concat = function(...) M.concat = function(...)
return M.flatten({...}) return M.flatten({...})
end end

View File

@ -178,8 +178,15 @@ M.get_core_number = function()
return __tonumber(i_o.read_file('/proc/cpuinfo', 'cpu cores%s+:%s(%d+)')) return __tonumber(i_o.read_file('/proc/cpuinfo', 'cpu cores%s+:%s(%d+)'))
end end
M.get_cpu_number = function() M.get_cpu_number = function(topology)
return __tonumber(i_o.execute_cmd('nproc', nil, '*n')) local n = 0
for g = 1, #topology do
for c = 1, #topology[g] do
n = n + #topology[g][c].cpus
end
end
return n
-- return __tonumber(i_o.execute_cmd('nproc', nil, '*n'))
end end
local get_coretemp_dir = function() local get_coretemp_dir = function()
@ -188,27 +195,123 @@ local get_coretemp_dir = function()
return pure.fmap_maybe(dirname, s) return pure.fmap_maybe(dirname, s)
end end
-- map cores to integer values starting at 1; this is necessary since some cpus -- return a table with keys corresponding to physcial core id and values to
-- don't report their core id's as a sequence of integers starting at 0 -- the number of threads of each core (usually 1 or 2)
local get_core_id_indexer = function() M.get_core_threads = function()
local cmd = 'lscpu -y -p=core | grep -v \'^#\' | sort -k1,1n | uniq -c'
local flip = function(c) return {__tonumber(c[2]), __tonumber(c[1])} end
local make_indexer = pure.compose( local make_indexer = pure.compose(
pure.array_to_map, pure.array_to_map,
pure.partial(pure.imap, function(i, c) return {__tonumber(c), i} end), pure.partial(pure.map, flip),
pure.partial(gmatch_to_table1, '(%d+)') pure.partial(gmatch_to_tableN, '(%d+) (%d+)')
)
return pure.fmap_maybe(
make_indexer,
i_o.execute_cmd('lscpu -p=CORE | tail -n+5 | sort | uniq')
) )
return pure.fmap_maybe(make_indexer, i_o.execute_cmd(cmd))
end end
-- map cores to integer values starting at 1; this is necessary since some cpus
-- don't report their core id's as a sequence of integers starting at 0
-- local get_core_id_indexer = function()
-- local make_indexer = pure.compose(
-- pure.array_to_map,
-- pure.partial(pure.imap, function(i, c) return {__tonumber(c), i} end),
-- pure.partial(gmatch_to_table1, '(%d+)')
-- )
-- return pure.fmap_maybe(
-- make_indexer,
-- i_o.execute_cmd('lscpu -p=CORE | tail -n+5 | sort -k1,1n')
-- )
-- end
-- conky_core_idx: the ID of the dial to be drawn for this core
-- conky_thread_idx: the ID of the individual indicator within one dial
-- corresponding to one thread in a core (starting at 1 for each core)
local get_coretemp_mapper = function()
local d = get_coretemp_dir()
i_o.assert_exe_exists('grep')
local get_labels = pure.compose(
i_o.execute_cmd,
pure.partial(__string_format, 'grep Core %s/temp*_label', true)
)
local to_tuple = function(m)
return {__tonumber(m[2]), __string_format('%s/%s_input', d, m[1])}
end
local to_map = pure.compose(
pure.array_to_map,
pure.partial(pure.map, to_tuple),
pure.partial(gmatch_to_tableN, '/([^/\n]+)_label:Core (%d+)\n')
)
return pure.maybe({}, to_map, pure.fmap_maybe(get_labels, d))
end
M.get_core_topology = function()
local coretemp_paths = get_coretemp_mapper()
local assign_cpu = function(i, x)
return {
lgl_cpu_id = i,
phy_core_id = __tonumber(x[1]),
phy_cpu_id = __tonumber(x[2])
}
end
local assign_core = function(acc, next)
local g = acc.grouped
local max_lgl_core_id = #g
local new_phy_core_id = next.phy_core_id
local new_cpu = {phy_cpu_id = next.phy_cpu_id, lgl_cpu_id = next.lgl_cpu_id}
if acc.prev_phy_core_id == new_phy_core_id then
local max_thread = #acc.grouped[max_lgl_core_id].cpus
acc.grouped[max_lgl_core_id].cpus[max_thread + 1] = new_cpu
else
local new_lgl_core_id = max_lgl_core_id + 1
acc.grouped[new_lgl_core_id] = {
phy_core_id = new_phy_core_id,
lgl_core_id = new_lgl_core_id,
coretemp_path = coretemp_paths[new_phy_core_id],
cpus = {new_cpu}
}
acc.prev_phy_core_id = new_phy_core_id
end
return acc
end
local get_threads = function(x)
return #x.cpus
end
local f = pure.compose(
pure.partial(pure.group_with, get_threads, pure.id),
pure.partial(pure.get, 'grouped'),
pure.partial(pure.reduce, assign_core, {prev_phy_core_id = -1, grouped = {}}),
pure.partial(pure.imap, assign_cpu),
pure.partial(gmatch_to_tableN, '(%d+),(%d+)')
)
local out =
i_o.execute_cmd('lscpu -y -p=core,cpu | grep -v \'^#\' | sort -k1,1n')
return pure.fmap_maybe(f, out)
end
-- for t, k in pairs(get_core_topology()) do
-- print(t)
-- for x, y in pairs(k) do
-- print(x, y.phy_core_id, y.coretemp_path, #y.cpus)
-- -- for _, z in pairs(y.cpus) do
-- -- print(x,z.cpu,z.conky_cpu)
-- -- end
-- end
-- end
local get_core_mappings = function() local get_core_mappings = function()
local ncores = M.get_core_number() local core_threads = M.get_core_threads()
local assign_cpus = function(x)
return {
cpu_id = __tonumber(x[1]),
core_id = __tonumber(x[2])
}
end
local map_ids = function(indexer) local map_ids = function(indexer)
local f = function(acc, next) local f = function(acc, next)
local cpu_id = __tonumber(next[1]) + 1 local cpu_id = __tonumber(next[1]) + 1
local core_id = next[2] local core_id = __tonumber(next[2])
local conky_core_idx = indexer[__tonumber(core_id)] local conky_core_idx = indexer[core_id]
acc.mappings[cpu_id] = { acc.mappings[cpu_id] = {
conky_core_idx = conky_core_idx, conky_core_idx = conky_core_idx,
conky_thread_id = acc.thread_ids[conky_core_idx], conky_thread_id = acc.thread_ids[conky_core_idx],
@ -219,12 +322,12 @@ local get_core_mappings = function()
local cpu_to_core_map = pure.maybe( local cpu_to_core_map = pure.maybe(
{}, {},
pure.partial(gmatch_to_tableN, '(%d+),(%d+)'), pure.partial(gmatch_to_tableN, '(%d+),(%d+)'),
i_o.execute_cmd('lscpu -p=cpu,CORE | tail -n+5') i_o.execute_cmd('lscpu -y -p=cpu,core | grep -v \'^#\' | sort -k1,1n')
) )
local init = {mappings = {}, thread_ids = pure.rep(ncores, 1)} local init = {mappings = {}, _conky_core_index = 0, _thread_ids = {}}
return pure.reduce(f, init, cpu_to_core_map).mappings return pure.reduce(f, init, cpu_to_core_map).mappings
end end
return pure.fmap_maybe(map_ids, get_core_id_indexer()) -- return pure.fmap_maybe(map_ids, )
end end
M.get_coretemp_paths = function() M.get_coretemp_paths = function()
@ -306,35 +409,49 @@ M.read_hwp = function(hwp_paths)
return mixed and 'Mixed' or (HWP_MAP[hwp_pref] or 'Unknown') return mixed and 'Mixed' or (HWP_MAP[hwp_pref] or 'Unknown')
end end
M.init_cpu_loads = function() M.init_cpu_loads = function(topo)
local m = get_core_mappings() -- -- local m = get_core_mappings()
-- local topo = get_core_topology()
local cpu_loads = {} local cpu_loads = {}
for cpu_id, core in pairs(m) do for core_group_id, core_group in pairs(topo) do
cpu_loads[cpu_id] = { cpu_loads[core_group_id] = {}
for lgl_core_id, core in pairs(core_group) do
cpu_loads[core_group_id][lgl_core_id] = {}
for thread_id = 1, #core.cpus do
cpu_loads[core_group_id][lgl_core_id][thread_id] = {
active_prev = 0, active_prev = 0,
total_prev = 0, total_prev = 0,
percent_active = 0, percent_active = 0,
conky_core_idx = core.conky_core_idx, -- core_id = lgl_core_id,
conky_thread_id = core.conky_thread_id, -- thread_id = thread_id,
} }
end end
end
end
return cpu_loads return cpu_loads
end end
M.read_cpu_loads = function(cpu_loads) M.read_cpu_loads = function(cpu_loads)
local ncpus = #cpu_loads
local iter = io.lines('/proc/stat') local iter = io.lines('/proc/stat')
iter() -- ignore first line iter() -- ignore first line
for i = 1, ncpus do for group_id = 1, #cpu_loads do
local group = cpu_loads[group_id]
for lgl_core_id = 1, #group do
local core = group[lgl_core_id]
for thread_id = 1, #core do
local ln = iter() local ln = iter()
local user, system, idle = __string_match(ln, '%d+ (%d+) %d+ (%d+) (%d+)', 4) local user, system, idle =
__string_match(ln, '%d+ (%d+) %d+ (%d+) (%d+)', 4)
local active = user + system local active = user + system
local total = active + idle local total = active + idle
local c = cpu_loads[i] local thread = core[thread_id]
if total > c.total_prev then -- guard against 1/0 errors if total > thread.total_prev then -- guard against 1/0 errors
c.percent_active = (active - c.active_prev) / (total - c.total_prev) thread.percent_active =
c.active_prev = active (active - thread.active_prev) / (total - thread.total_prev)
c.total_prev = total thread.active_prev = active
thread.total_prev = total
end
end
end end
end end
return cpu_loads return cpu_loads