Compare commits

..

No commits in common. "e7d5b63c388ec32af6cc74fa465f4e6c8a0f3bcd" and "ea6d5e60428b2f934aa3da3c946976535aca6f8e" have entirely different histories.

6 changed files with 88 additions and 287 deletions

View File

@ -87,11 +87,10 @@ let Memory =
let Network =
{ Type = { geometry : PlotGeo_.Type }, default.geometry = PlotGeo_::{=} }
let CoreGroup = { threads : Natural, rows : Natural, padding : Natural }
let Processor =
{ Type =
{ core_groups : List CoreGroup
{ core_rows : Natural
, core_padding : Natural
, show_stats : Bool
, show_plot : Bool
, table_rows : Natural

View File

@ -439,14 +439,6 @@ return function(config)
)
end
M.make_blank_dial = function(x, y, radius, thickness, threshold)
return dial.make(
geom.make_arc(x, y, radius, DIAL_THETA0, DIAL_THETA1),
arc.config(style.line(thickness, CAP_BUTT), patterns.indicator.bg),
threshold_indicator(threshold)
)
end
M.make_dial = function(x, y, radius, thickness, threshold, _format, pre_function)
return {
dial = dial.make(

View File

@ -52,14 +52,8 @@ return function(update_freq, config, common, width, point)
vid_utilization = 0
}
local runtime_status_file = config.dev_power..'/runtime_status'
local want_nvidia_query = config.show_temp or config.show_clock
or config.gpu_util or config.mem_util or config.vid_util
local update_state = function()
local is_active = i_o.read_file(runtime_status_file, nil, '*l') == 'active'
if is_active and want_nvidia_query then
if i_o.read_file(config.dev_power, nil, '*l') == 'on' then
local nvidia_settings_glob = i_o.execute_cmd(NV_QUERY)
if nvidia_settings_glob == nil then
mod_state.error = 'Error'
@ -74,8 +68,6 @@ return function(update_freq, config, common, width, point)
= __string_match(nvidia_settings_glob, NV_REGEX)
mod_state.error = false
end
elseif is_active then
mod_state.error = false
else
mod_state.error = 'Off'
end

View File

@ -1,4 +1,3 @@
local dial = require 'dial'
local compound_dial = require 'compound_dial'
local text_table = require 'text_table'
local i_o = require 'i_o'
@ -22,9 +21,7 @@ return function(update_freq, main_state, config, common, width, point)
-----------------------------------------------------------------------------
-- processor state
local topology = cpu.get_core_topology()
local mod_state = cpu.read_cpu_loads(cpu.init_cpu_loads(topology))
local ncpus = cpu.get_cpu_number(topology)
local mod_state = cpu.read_cpu_loads(cpu.init_cpu_loads())
local update_state = function()
mod_state = cpu.read_cpu_loads(mod_state)
@ -33,39 +30,35 @@ return function(update_freq, main_state, config, common, width, point)
-----------------------------------------------------------------------------
-- cores (loads and temps)
-- TODO add this back
-- local is_evenly_distributed = function(ncores, rows)
-- if rows == 0 then
-- return false
-- elseif math.fmod(ncores, rows) == 0 then
-- return true
-- else
-- i_o.warnf('could not evenly distribute %i cores over %i rows', ncores, rows)
-- return false
-- end
-- end
local ncpus = cpu.get_cpu_number()
local ncores = cpu.get_core_number()
local nthreads = ncpus / ncores
local create_core = function(core_cols, y, nthreads, padding, c)
local show_cores = false
if config.core_rows > 0 then
if math.fmod(ncores, config.core_rows) == 0 then
show_cores = true
else
i_o.warnf(
'could not evenly distribute %i cores over %i rows; disabling',
ncores,
config.core_rows
)
end
end
local create_core = function(core_cols, y, c)
local dial_x = point.x +
(core_cols == 1
and (width / 2)
or (padding + dial_outer_radius +
(width - 2 * (dial_outer_radius + padding))
or (config.core_padding + dial_outer_radius +
(width - 2 * (dial_outer_radius + config.core_padding))
* math.fmod(c - 1, core_cols) / (core_cols - 1)))
local dial_y = y + dial_outer_radius +
(2 * dial_outer_radius + dial_y_spacing)
* math.floor((c - 1) / core_cols)
local loads
if nthreads == 1 then
local single_thickness = dial_outer_radius - dial_inner_radius
loads = common.make_blank_dial(
dial_x,
dial_y,
dial_outer_radius - single_thickness / 2,
single_thickness,
80
)
else
return {
loads = common.make_compound_dial(
dial_x,
dial_y,
@ -74,10 +67,7 @@ return function(update_freq, main_state, config, common, width, point)
dial_thickness,
80,
nthreads
)
end
return {
loads = loads,
),
coretemp = common.make_text_circle(
dial_x,
dial_y,
@ -89,65 +79,44 @@ return function(update_freq, main_state, config, common, width, point)
}
end
local mk_core_group = function(group_config, y)
local nthreads = group_config.threads
local core_topology = topology[nthreads]
local ncores = #core_topology
local core_cols = ncores / group_config.rows
local _create_core = pure.partial(
create_core, core_cols, y, nthreads, group_config.padding
)
local cores = pure.map_n(_create_core, ncores)
local group_loads = mod_state[nthreads]
local update_loads
local draw_static_loads
local draw_dynamic_loads
if nthreads == 1 then
update_loads = function(c)
dial.set(
cores[c].loads,
group_loads[c][1].percent_active * 100
)
local mk_cores = function(y)
local core_cols = ncores / config.core_rows
local cores = pure.map_n(pure.partial(create_core, core_cols, y), ncores)
local coretemp_paths = cpu.get_coretemp_paths()
if #coretemp_paths ~= ncores then
i_o.warnf('could not find all coretemp paths')
end
draw_static_loads = dial.draw_static
draw_dynamic_loads = dial.draw_dynamic
else
update_loads = function(c)
for t = 1, nthreads do
compound_dial.set(
cores[c].loads,
t,
group_loads[c][t].percent_active * 100
)
local update_coretemps = function()
for conky_core_idx, path in pairs(coretemp_paths) do
local temp = __math_floor(0.001 * i_o.read_file(path, nil, '*n'))
common.text_circle_set(cores[conky_core_idx].coretemp, temp)
end
end
draw_static_loads = compound_dial.draw_static
draw_dynamic_loads = compound_dial.draw_dynamic
end
local update = function()
for c = 1, ncores do
local temp = __math_floor(
0.001 * i_o.read_file(core_topology[c].coretemp_path, nil, '*n')
for _, load_data in pairs(mod_state) do
compound_dial.set(
cores[load_data.conky_core_idx].loads,
load_data.conky_thread_id,
load_data.percent_active * 100
)
common.text_circle_set(cores[c].coretemp, temp)
update_loads(c)
end
update_coretemps()
end
local static = function(cr)
for i = 1, ncores do
for i = 1, #cores do
common.text_circle_draw_static(cores[i].coretemp, cr)
draw_static_loads(cores[i].loads, cr)
compound_dial.draw_static(cores[i].loads, cr)
end
end
local dynamic = function(cr)
for i = 1, ncores do
for i = 1, #cores do
common.text_circle_draw_dynamic(cores[i].coretemp, cr)
draw_dynamic_loads(cores[i].loads, cr)
compound_dial.draw_dynamic(cores[i].loads, cr)
end
end
return common.mk_acc(
width,
(dial_outer_radius * 2 + dial_y_spacing) * group_config.rows
(dial_outer_radius * 2 + dial_y_spacing) * config.core_rows
- dial_y_spacing,
update,
static,
@ -203,12 +172,8 @@ return function(update_freq, main_state, config, common, width, point)
)
local update = function()
local s = 0
for g = 1, #mod_state do
for c = 1, #mod_state[g] do
for t = 1, #mod_state[g][c] do
s = s + mod_state[g][c][t].percent_active
end
end
for i = 1, #mod_state do
s = s + mod_state[i].percent_active
end
common.tagged_percent_timeseries_set(total_load, s / ncpus * 100)
end
@ -260,18 +225,14 @@ return function(update_freq, main_state, config, common, width, point)
-----------------------------------------------------------------------------
-- main functions
local core_group_section = function (g)
return {pure.partial(mk_core_group, g), true, text_spacing}
end
return {
header = 'PROCESSOR',
point = point,
width = width,
set_state = update_state,
top = {
table.unpack(pure.map(core_group_section, config.core_groups)),
-- {mk_hwp_freq, config.show_stats, sep_spacing},
{mk_cores, show_cores, text_spacing},
{mk_hwp_freq, config.show_stats, sep_spacing},
},
common.mk_section(
sep_spacing,

View File

@ -189,32 +189,6 @@ M.flatten = function(xs)
return r
end
M.group_with = function(keyfun, valfun, seq)
local f = function(acc, next)
local k = keyfun(next)
local v = valfun(next)
if acc[k] == nil then
acc[k] = {v}
else
acc[k][#acc[k] + 1] = v
end
return acc
end
return M.reduce(f, {}, seq)
end
M.group_by = function(k, seq)
local f = function(acc, next)
if acc[k] == nil then
acc[k] = {next}
else
acc[k][#acc[k]] = next
end
return acc
end
return M.reduce(f, {}, seq)
end
M.concat = function(...)
return M.flatten({...})
end

View File

@ -178,15 +178,8 @@ M.get_core_number = function()
return __tonumber(i_o.read_file('/proc/cpuinfo', 'cpu cores%s+:%s(%d+)'))
end
M.get_cpu_number = function(topology)
local n = 0
for g = 1, #topology do
for c = 1, #topology[g] do
n = n + #topology[g][c].cpus
end
end
return n
-- return __tonumber(i_o.execute_cmd('nproc', nil, '*n'))
M.get_cpu_number = function()
return __tonumber(i_o.execute_cmd('nproc', nil, '*n'))
end
local get_coretemp_dir = function()
@ -195,123 +188,27 @@ local get_coretemp_dir = function()
return pure.fmap_maybe(dirname, s)
end
-- return a table with keys corresponding to physcial core id and values to
-- the number of threads of each core (usually 1 or 2)
M.get_core_threads = function()
local cmd = 'lscpu -y -p=core | grep -v \'^#\' | sort -k1,1n | uniq -c'
local flip = function(c) return {__tonumber(c[2]), __tonumber(c[1])} end
local make_indexer = pure.compose(
pure.array_to_map,
pure.partial(pure.map, flip),
pure.partial(gmatch_to_tableN, '(%d+) (%d+)')
)
return pure.fmap_maybe(make_indexer, i_o.execute_cmd(cmd))
end
-- map cores to integer values starting at 1; this is necessary since some cpus
-- don't report their core id's as a sequence of integers starting at 0
-- local get_core_id_indexer = function()
-- local make_indexer = pure.compose(
-- pure.array_to_map,
-- pure.partial(pure.imap, function(i, c) return {__tonumber(c), i} end),
-- pure.partial(gmatch_to_table1, '(%d+)')
-- )
-- return pure.fmap_maybe(
-- make_indexer,
-- i_o.execute_cmd('lscpu -p=CORE | tail -n+5 | sort -k1,1n')
-- )
-- end
-- conky_core_idx: the ID of the dial to be drawn for this core
-- conky_thread_idx: the ID of the individual indicator within one dial
-- corresponding to one thread in a core (starting at 1 for each core)
local get_coretemp_mapper = function()
local d = get_coretemp_dir()
i_o.assert_exe_exists('grep')
local get_labels = pure.compose(
i_o.execute_cmd,
pure.partial(__string_format, 'grep Core %s/temp*_label', true)
)
local to_tuple = function(m)
return {__tonumber(m[2]), __string_format('%s/%s_input', d, m[1])}
end
local to_map = pure.compose(
local get_core_id_indexer = function()
local make_indexer = pure.compose(
pure.array_to_map,
pure.partial(pure.map, to_tuple),
pure.partial(gmatch_to_tableN, '/([^/\n]+)_label:Core (%d+)\n')
pure.partial(pure.imap, function(i, c) return {__tonumber(c), i} end),
pure.partial(gmatch_to_table1, '(%d+)')
)
return pure.maybe({}, to_map, pure.fmap_maybe(get_labels, d))
end
M.get_core_topology = function()
local coretemp_paths = get_coretemp_mapper()
local assign_cpu = function(i, x)
return {
lgl_cpu_id = i,
phy_core_id = __tonumber(x[1]),
phy_cpu_id = __tonumber(x[2])
}
end
local assign_core = function(acc, next)
local g = acc.grouped
local max_lgl_core_id = #g
local new_phy_core_id = next.phy_core_id
local new_cpu = {phy_cpu_id = next.phy_cpu_id, lgl_cpu_id = next.lgl_cpu_id}
if acc.prev_phy_core_id == new_phy_core_id then
local max_thread = #acc.grouped[max_lgl_core_id].cpus
acc.grouped[max_lgl_core_id].cpus[max_thread + 1] = new_cpu
else
local new_lgl_core_id = max_lgl_core_id + 1
acc.grouped[new_lgl_core_id] = {
phy_core_id = new_phy_core_id,
lgl_core_id = new_lgl_core_id,
coretemp_path = coretemp_paths[new_phy_core_id],
cpus = {new_cpu}
}
acc.prev_phy_core_id = new_phy_core_id
end
return acc
end
local get_threads = function(x)
return #x.cpus
end
local f = pure.compose(
pure.partial(pure.group_with, get_threads, pure.id),
pure.partial(pure.get, 'grouped'),
pure.partial(pure.reduce, assign_core, {prev_phy_core_id = -1, grouped = {}}),
pure.partial(pure.imap, assign_cpu),
pure.partial(gmatch_to_tableN, '(%d+),(%d+)')
return pure.fmap_maybe(
make_indexer,
i_o.execute_cmd('lscpu -p=CORE | tail -n+5 | sort | uniq')
)
local out =
i_o.execute_cmd('lscpu -y -p=core,cpu | grep -v \'^#\' | sort -k1,1n')
return pure.fmap_maybe(f, out)
end
-- for t, k in pairs(get_core_topology()) do
-- print(t)
-- for x, y in pairs(k) do
-- print(x, y.phy_core_id, y.coretemp_path, #y.cpus)
-- -- for _, z in pairs(y.cpus) do
-- -- print(x,z.cpu,z.conky_cpu)
-- -- end
-- end
-- end
local get_core_mappings = function()
local core_threads = M.get_core_threads()
local assign_cpus = function(x)
return {
cpu_id = __tonumber(x[1]),
core_id = __tonumber(x[2])
}
end
local ncores = M.get_core_number()
local map_ids = function(indexer)
local f = function(acc, next)
local cpu_id = __tonumber(next[1]) + 1
local core_id = __tonumber(next[2])
local conky_core_idx = indexer[core_id]
local core_id = next[2]
local conky_core_idx = indexer[__tonumber(core_id)]
acc.mappings[cpu_id] = {
conky_core_idx = conky_core_idx,
conky_thread_id = acc.thread_ids[conky_core_idx],
@ -322,12 +219,12 @@ local get_core_mappings = function()
local cpu_to_core_map = pure.maybe(
{},
pure.partial(gmatch_to_tableN, '(%d+),(%d+)'),
i_o.execute_cmd('lscpu -y -p=cpu,core | grep -v \'^#\' | sort -k1,1n')
i_o.execute_cmd('lscpu -p=cpu,CORE | tail -n+5')
)
local init = {mappings = {}, _conky_core_index = 0, _thread_ids = {}}
local init = {mappings = {}, thread_ids = pure.rep(ncores, 1)}
return pure.reduce(f, init, cpu_to_core_map).mappings
end
-- return pure.fmap_maybe(map_ids, )
return pure.fmap_maybe(map_ids, get_core_id_indexer())
end
M.get_coretemp_paths = function()
@ -409,49 +306,35 @@ M.read_hwp = function(hwp_paths)
return mixed and 'Mixed' or (HWP_MAP[hwp_pref] or 'Unknown')
end
M.init_cpu_loads = function(topo)
-- -- local m = get_core_mappings()
-- local topo = get_core_topology()
M.init_cpu_loads = function()
local m = get_core_mappings()
local cpu_loads = {}
for core_group_id, core_group in pairs(topo) do
cpu_loads[core_group_id] = {}
for lgl_core_id, core in pairs(core_group) do
cpu_loads[core_group_id][lgl_core_id] = {}
for thread_id = 1, #core.cpus do
cpu_loads[core_group_id][lgl_core_id][thread_id] = {
for cpu_id, core in pairs(m) do
cpu_loads[cpu_id] = {
active_prev = 0,
total_prev = 0,
percent_active = 0,
-- core_id = lgl_core_id,
-- thread_id = thread_id,
conky_core_idx = core.conky_core_idx,
conky_thread_id = core.conky_thread_id,
}
end
end
end
return cpu_loads
end
M.read_cpu_loads = function(cpu_loads)
local ncpus = #cpu_loads
local iter = io.lines('/proc/stat')
iter() -- ignore first line
for group_id = 1, #cpu_loads do
local group = cpu_loads[group_id]
for lgl_core_id = 1, #group do
local core = group[lgl_core_id]
for thread_id = 1, #core do
for i = 1, ncpus do
local ln = iter()
local user, system, idle =
__string_match(ln, '%d+ (%d+) %d+ (%d+) (%d+)', 4)
local user, system, idle = __string_match(ln, '%d+ (%d+) %d+ (%d+) (%d+)', 4)
local active = user + system
local total = active + idle
local thread = core[thread_id]
if total > thread.total_prev then -- guard against 1/0 errors
thread.percent_active =
(active - thread.active_prev) / (total - thread.total_prev)
thread.active_prev = active
thread.total_prev = total
end
end
local c = cpu_loads[i]
if total > c.total_prev then -- guard against 1/0 errors
c.percent_active = (active - c.active_prev) / (total - c.total_prev)
c.active_prev = active
c.total_prev = total
end
end
return cpu_loads