Compare commits
No commits in common. "e7d5b63c388ec32af6cc74fa465f4e6c8a0f3bcd" and "ea6d5e60428b2f934aa3da3c946976535aca6f8e" have entirely different histories.
e7d5b63c38
...
ea6d5e6042
|
@ -87,11 +87,10 @@ let Memory =
|
||||||
let Network =
|
let Network =
|
||||||
{ Type = { geometry : PlotGeo_.Type }, default.geometry = PlotGeo_::{=} }
|
{ Type = { geometry : PlotGeo_.Type }, default.geometry = PlotGeo_::{=} }
|
||||||
|
|
||||||
let CoreGroup = { threads : Natural, rows : Natural, padding : Natural }
|
|
||||||
|
|
||||||
let Processor =
|
let Processor =
|
||||||
{ Type =
|
{ Type =
|
||||||
{ core_groups : List CoreGroup
|
{ core_rows : Natural
|
||||||
|
, core_padding : Natural
|
||||||
, show_stats : Bool
|
, show_stats : Bool
|
||||||
, show_plot : Bool
|
, show_plot : Bool
|
||||||
, table_rows : Natural
|
, table_rows : Natural
|
||||||
|
|
|
@ -439,14 +439,6 @@ return function(config)
|
||||||
)
|
)
|
||||||
end
|
end
|
||||||
|
|
||||||
M.make_blank_dial = function(x, y, radius, thickness, threshold)
|
|
||||||
return dial.make(
|
|
||||||
geom.make_arc(x, y, radius, DIAL_THETA0, DIAL_THETA1),
|
|
||||||
arc.config(style.line(thickness, CAP_BUTT), patterns.indicator.bg),
|
|
||||||
threshold_indicator(threshold)
|
|
||||||
)
|
|
||||||
end
|
|
||||||
|
|
||||||
M.make_dial = function(x, y, radius, thickness, threshold, _format, pre_function)
|
M.make_dial = function(x, y, radius, thickness, threshold, _format, pre_function)
|
||||||
return {
|
return {
|
||||||
dial = dial.make(
|
dial = dial.make(
|
||||||
|
|
|
@ -52,14 +52,8 @@ return function(update_freq, config, common, width, point)
|
||||||
vid_utilization = 0
|
vid_utilization = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
local runtime_status_file = config.dev_power..'/runtime_status'
|
|
||||||
|
|
||||||
local want_nvidia_query = config.show_temp or config.show_clock
|
|
||||||
or config.gpu_util or config.mem_util or config.vid_util
|
|
||||||
|
|
||||||
local update_state = function()
|
local update_state = function()
|
||||||
local is_active = i_o.read_file(runtime_status_file, nil, '*l') == 'active'
|
if i_o.read_file(config.dev_power, nil, '*l') == 'on' then
|
||||||
if is_active and want_nvidia_query then
|
|
||||||
local nvidia_settings_glob = i_o.execute_cmd(NV_QUERY)
|
local nvidia_settings_glob = i_o.execute_cmd(NV_QUERY)
|
||||||
if nvidia_settings_glob == nil then
|
if nvidia_settings_glob == nil then
|
||||||
mod_state.error = 'Error'
|
mod_state.error = 'Error'
|
||||||
|
@ -74,8 +68,6 @@ return function(update_freq, config, common, width, point)
|
||||||
= __string_match(nvidia_settings_glob, NV_REGEX)
|
= __string_match(nvidia_settings_glob, NV_REGEX)
|
||||||
mod_state.error = false
|
mod_state.error = false
|
||||||
end
|
end
|
||||||
elseif is_active then
|
|
||||||
mod_state.error = false
|
|
||||||
else
|
else
|
||||||
mod_state.error = 'Off'
|
mod_state.error = 'Off'
|
||||||
end
|
end
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
local dial = require 'dial'
|
|
||||||
local compound_dial = require 'compound_dial'
|
local compound_dial = require 'compound_dial'
|
||||||
local text_table = require 'text_table'
|
local text_table = require 'text_table'
|
||||||
local i_o = require 'i_o'
|
local i_o = require 'i_o'
|
||||||
|
@ -22,9 +21,7 @@ return function(update_freq, main_state, config, common, width, point)
|
||||||
-----------------------------------------------------------------------------
|
-----------------------------------------------------------------------------
|
||||||
-- processor state
|
-- processor state
|
||||||
|
|
||||||
local topology = cpu.get_core_topology()
|
local mod_state = cpu.read_cpu_loads(cpu.init_cpu_loads())
|
||||||
local mod_state = cpu.read_cpu_loads(cpu.init_cpu_loads(topology))
|
|
||||||
local ncpus = cpu.get_cpu_number(topology)
|
|
||||||
|
|
||||||
local update_state = function()
|
local update_state = function()
|
||||||
mod_state = cpu.read_cpu_loads(mod_state)
|
mod_state = cpu.read_cpu_loads(mod_state)
|
||||||
|
@ -33,39 +30,35 @@ return function(update_freq, main_state, config, common, width, point)
|
||||||
-----------------------------------------------------------------------------
|
-----------------------------------------------------------------------------
|
||||||
-- cores (loads and temps)
|
-- cores (loads and temps)
|
||||||
|
|
||||||
-- TODO add this back
|
local ncpus = cpu.get_cpu_number()
|
||||||
-- local is_evenly_distributed = function(ncores, rows)
|
local ncores = cpu.get_core_number()
|
||||||
-- if rows == 0 then
|
local nthreads = ncpus / ncores
|
||||||
-- return false
|
|
||||||
-- elseif math.fmod(ncores, rows) == 0 then
|
|
||||||
-- return true
|
|
||||||
-- else
|
|
||||||
-- i_o.warnf('could not evenly distribute %i cores over %i rows', ncores, rows)
|
|
||||||
-- return false
|
|
||||||
-- end
|
|
||||||
-- end
|
|
||||||
|
|
||||||
local create_core = function(core_cols, y, nthreads, padding, c)
|
local show_cores = false
|
||||||
|
|
||||||
|
if config.core_rows > 0 then
|
||||||
|
if math.fmod(ncores, config.core_rows) == 0 then
|
||||||
|
show_cores = true
|
||||||
|
else
|
||||||
|
i_o.warnf(
|
||||||
|
'could not evenly distribute %i cores over %i rows; disabling',
|
||||||
|
ncores,
|
||||||
|
config.core_rows
|
||||||
|
)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
local create_core = function(core_cols, y, c)
|
||||||
local dial_x = point.x +
|
local dial_x = point.x +
|
||||||
(core_cols == 1
|
(core_cols == 1
|
||||||
and (width / 2)
|
and (width / 2)
|
||||||
or (padding + dial_outer_radius +
|
or (config.core_padding + dial_outer_radius +
|
||||||
(width - 2 * (dial_outer_radius + padding))
|
(width - 2 * (dial_outer_radius + config.core_padding))
|
||||||
* math.fmod(c - 1, core_cols) / (core_cols - 1)))
|
* math.fmod(c - 1, core_cols) / (core_cols - 1)))
|
||||||
local dial_y = y + dial_outer_radius +
|
local dial_y = y + dial_outer_radius +
|
||||||
(2 * dial_outer_radius + dial_y_spacing)
|
(2 * dial_outer_radius + dial_y_spacing)
|
||||||
* math.floor((c - 1) / core_cols)
|
* math.floor((c - 1) / core_cols)
|
||||||
local loads
|
return {
|
||||||
if nthreads == 1 then
|
|
||||||
local single_thickness = dial_outer_radius - dial_inner_radius
|
|
||||||
loads = common.make_blank_dial(
|
|
||||||
dial_x,
|
|
||||||
dial_y,
|
|
||||||
dial_outer_radius - single_thickness / 2,
|
|
||||||
single_thickness,
|
|
||||||
80
|
|
||||||
)
|
|
||||||
else
|
|
||||||
loads = common.make_compound_dial(
|
loads = common.make_compound_dial(
|
||||||
dial_x,
|
dial_x,
|
||||||
dial_y,
|
dial_y,
|
||||||
|
@ -74,10 +67,7 @@ return function(update_freq, main_state, config, common, width, point)
|
||||||
dial_thickness,
|
dial_thickness,
|
||||||
80,
|
80,
|
||||||
nthreads
|
nthreads
|
||||||
)
|
),
|
||||||
end
|
|
||||||
return {
|
|
||||||
loads = loads,
|
|
||||||
coretemp = common.make_text_circle(
|
coretemp = common.make_text_circle(
|
||||||
dial_x,
|
dial_x,
|
||||||
dial_y,
|
dial_y,
|
||||||
|
@ -89,65 +79,44 @@ return function(update_freq, main_state, config, common, width, point)
|
||||||
}
|
}
|
||||||
end
|
end
|
||||||
|
|
||||||
local mk_core_group = function(group_config, y)
|
local mk_cores = function(y)
|
||||||
local nthreads = group_config.threads
|
local core_cols = ncores / config.core_rows
|
||||||
local core_topology = topology[nthreads]
|
local cores = pure.map_n(pure.partial(create_core, core_cols, y), ncores)
|
||||||
local ncores = #core_topology
|
local coretemp_paths = cpu.get_coretemp_paths()
|
||||||
local core_cols = ncores / group_config.rows
|
if #coretemp_paths ~= ncores then
|
||||||
local _create_core = pure.partial(
|
i_o.warnf('could not find all coretemp paths')
|
||||||
create_core, core_cols, y, nthreads, group_config.padding
|
end
|
||||||
)
|
local update_coretemps = function()
|
||||||
local cores = pure.map_n(_create_core, ncores)
|
for conky_core_idx, path in pairs(coretemp_paths) do
|
||||||
local group_loads = mod_state[nthreads]
|
local temp = __math_floor(0.001 * i_o.read_file(path, nil, '*n'))
|
||||||
local update_loads
|
common.text_circle_set(cores[conky_core_idx].coretemp, temp)
|
||||||
local draw_static_loads
|
|
||||||
local draw_dynamic_loads
|
|
||||||
if nthreads == 1 then
|
|
||||||
update_loads = function(c)
|
|
||||||
dial.set(
|
|
||||||
cores[c].loads,
|
|
||||||
group_loads[c][1].percent_active * 100
|
|
||||||
)
|
|
||||||
end
|
end
|
||||||
draw_static_loads = dial.draw_static
|
|
||||||
draw_dynamic_loads = dial.draw_dynamic
|
|
||||||
else
|
|
||||||
update_loads = function(c)
|
|
||||||
for t = 1, nthreads do
|
|
||||||
compound_dial.set(
|
|
||||||
cores[c].loads,
|
|
||||||
t,
|
|
||||||
group_loads[c][t].percent_active * 100
|
|
||||||
)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
draw_static_loads = compound_dial.draw_static
|
|
||||||
draw_dynamic_loads = compound_dial.draw_dynamic
|
|
||||||
end
|
end
|
||||||
local update = function()
|
local update = function()
|
||||||
for c = 1, ncores do
|
for _, load_data in pairs(mod_state) do
|
||||||
local temp = __math_floor(
|
compound_dial.set(
|
||||||
0.001 * i_o.read_file(core_topology[c].coretemp_path, nil, '*n')
|
cores[load_data.conky_core_idx].loads,
|
||||||
|
load_data.conky_thread_id,
|
||||||
|
load_data.percent_active * 100
|
||||||
)
|
)
|
||||||
common.text_circle_set(cores[c].coretemp, temp)
|
|
||||||
update_loads(c)
|
|
||||||
end
|
end
|
||||||
|
update_coretemps()
|
||||||
end
|
end
|
||||||
local static = function(cr)
|
local static = function(cr)
|
||||||
for i = 1, ncores do
|
for i = 1, #cores do
|
||||||
common.text_circle_draw_static(cores[i].coretemp, cr)
|
common.text_circle_draw_static(cores[i].coretemp, cr)
|
||||||
draw_static_loads(cores[i].loads, cr)
|
compound_dial.draw_static(cores[i].loads, cr)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
local dynamic = function(cr)
|
local dynamic = function(cr)
|
||||||
for i = 1, ncores do
|
for i = 1, #cores do
|
||||||
common.text_circle_draw_dynamic(cores[i].coretemp, cr)
|
common.text_circle_draw_dynamic(cores[i].coretemp, cr)
|
||||||
draw_dynamic_loads(cores[i].loads, cr)
|
compound_dial.draw_dynamic(cores[i].loads, cr)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
return common.mk_acc(
|
return common.mk_acc(
|
||||||
width,
|
width,
|
||||||
(dial_outer_radius * 2 + dial_y_spacing) * group_config.rows
|
(dial_outer_radius * 2 + dial_y_spacing) * config.core_rows
|
||||||
- dial_y_spacing,
|
- dial_y_spacing,
|
||||||
update,
|
update,
|
||||||
static,
|
static,
|
||||||
|
@ -203,12 +172,8 @@ return function(update_freq, main_state, config, common, width, point)
|
||||||
)
|
)
|
||||||
local update = function()
|
local update = function()
|
||||||
local s = 0
|
local s = 0
|
||||||
for g = 1, #mod_state do
|
for i = 1, #mod_state do
|
||||||
for c = 1, #mod_state[g] do
|
s = s + mod_state[i].percent_active
|
||||||
for t = 1, #mod_state[g][c] do
|
|
||||||
s = s + mod_state[g][c][t].percent_active
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
common.tagged_percent_timeseries_set(total_load, s / ncpus * 100)
|
common.tagged_percent_timeseries_set(total_load, s / ncpus * 100)
|
||||||
end
|
end
|
||||||
|
@ -260,18 +225,14 @@ return function(update_freq, main_state, config, common, width, point)
|
||||||
-----------------------------------------------------------------------------
|
-----------------------------------------------------------------------------
|
||||||
-- main functions
|
-- main functions
|
||||||
|
|
||||||
local core_group_section = function (g)
|
|
||||||
return {pure.partial(mk_core_group, g), true, text_spacing}
|
|
||||||
end
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
header = 'PROCESSOR',
|
header = 'PROCESSOR',
|
||||||
point = point,
|
point = point,
|
||||||
width = width,
|
width = width,
|
||||||
set_state = update_state,
|
set_state = update_state,
|
||||||
top = {
|
top = {
|
||||||
table.unpack(pure.map(core_group_section, config.core_groups)),
|
{mk_cores, show_cores, text_spacing},
|
||||||
-- {mk_hwp_freq, config.show_stats, sep_spacing},
|
{mk_hwp_freq, config.show_stats, sep_spacing},
|
||||||
},
|
},
|
||||||
common.mk_section(
|
common.mk_section(
|
||||||
sep_spacing,
|
sep_spacing,
|
||||||
|
|
26
src/pure.lua
26
src/pure.lua
|
@ -189,32 +189,6 @@ M.flatten = function(xs)
|
||||||
return r
|
return r
|
||||||
end
|
end
|
||||||
|
|
||||||
M.group_with = function(keyfun, valfun, seq)
|
|
||||||
local f = function(acc, next)
|
|
||||||
local k = keyfun(next)
|
|
||||||
local v = valfun(next)
|
|
||||||
if acc[k] == nil then
|
|
||||||
acc[k] = {v}
|
|
||||||
else
|
|
||||||
acc[k][#acc[k] + 1] = v
|
|
||||||
end
|
|
||||||
return acc
|
|
||||||
end
|
|
||||||
return M.reduce(f, {}, seq)
|
|
||||||
end
|
|
||||||
|
|
||||||
M.group_by = function(k, seq)
|
|
||||||
local f = function(acc, next)
|
|
||||||
if acc[k] == nil then
|
|
||||||
acc[k] = {next}
|
|
||||||
else
|
|
||||||
acc[k][#acc[k]] = next
|
|
||||||
end
|
|
||||||
return acc
|
|
||||||
end
|
|
||||||
return M.reduce(f, {}, seq)
|
|
||||||
end
|
|
||||||
|
|
||||||
M.concat = function(...)
|
M.concat = function(...)
|
||||||
return M.flatten({...})
|
return M.flatten({...})
|
||||||
end
|
end
|
||||||
|
|
189
src/sys.lua
189
src/sys.lua
|
@ -178,15 +178,8 @@ M.get_core_number = function()
|
||||||
return __tonumber(i_o.read_file('/proc/cpuinfo', 'cpu cores%s+:%s(%d+)'))
|
return __tonumber(i_o.read_file('/proc/cpuinfo', 'cpu cores%s+:%s(%d+)'))
|
||||||
end
|
end
|
||||||
|
|
||||||
M.get_cpu_number = function(topology)
|
M.get_cpu_number = function()
|
||||||
local n = 0
|
return __tonumber(i_o.execute_cmd('nproc', nil, '*n'))
|
||||||
for g = 1, #topology do
|
|
||||||
for c = 1, #topology[g] do
|
|
||||||
n = n + #topology[g][c].cpus
|
|
||||||
end
|
|
||||||
end
|
|
||||||
return n
|
|
||||||
-- return __tonumber(i_o.execute_cmd('nproc', nil, '*n'))
|
|
||||||
end
|
end
|
||||||
|
|
||||||
local get_coretemp_dir = function()
|
local get_coretemp_dir = function()
|
||||||
|
@ -195,123 +188,27 @@ local get_coretemp_dir = function()
|
||||||
return pure.fmap_maybe(dirname, s)
|
return pure.fmap_maybe(dirname, s)
|
||||||
end
|
end
|
||||||
|
|
||||||
-- return a table with keys corresponding to physcial core id and values to
|
|
||||||
-- the number of threads of each core (usually 1 or 2)
|
|
||||||
M.get_core_threads = function()
|
|
||||||
local cmd = 'lscpu -y -p=core | grep -v \'^#\' | sort -k1,1n | uniq -c'
|
|
||||||
local flip = function(c) return {__tonumber(c[2]), __tonumber(c[1])} end
|
|
||||||
local make_indexer = pure.compose(
|
|
||||||
pure.array_to_map,
|
|
||||||
pure.partial(pure.map, flip),
|
|
||||||
pure.partial(gmatch_to_tableN, '(%d+) (%d+)')
|
|
||||||
)
|
|
||||||
return pure.fmap_maybe(make_indexer, i_o.execute_cmd(cmd))
|
|
||||||
end
|
|
||||||
|
|
||||||
|
|
||||||
-- map cores to integer values starting at 1; this is necessary since some cpus
|
-- map cores to integer values starting at 1; this is necessary since some cpus
|
||||||
-- don't report their core id's as a sequence of integers starting at 0
|
-- don't report their core id's as a sequence of integers starting at 0
|
||||||
-- local get_core_id_indexer = function()
|
local get_core_id_indexer = function()
|
||||||
-- local make_indexer = pure.compose(
|
local make_indexer = pure.compose(
|
||||||
-- pure.array_to_map,
|
|
||||||
-- pure.partial(pure.imap, function(i, c) return {__tonumber(c), i} end),
|
|
||||||
-- pure.partial(gmatch_to_table1, '(%d+)')
|
|
||||||
-- )
|
|
||||||
-- return pure.fmap_maybe(
|
|
||||||
-- make_indexer,
|
|
||||||
-- i_o.execute_cmd('lscpu -p=CORE | tail -n+5 | sort -k1,1n')
|
|
||||||
-- )
|
|
||||||
-- end
|
|
||||||
|
|
||||||
-- conky_core_idx: the ID of the dial to be drawn for this core
|
|
||||||
-- conky_thread_idx: the ID of the individual indicator within one dial
|
|
||||||
-- corresponding to one thread in a core (starting at 1 for each core)
|
|
||||||
|
|
||||||
local get_coretemp_mapper = function()
|
|
||||||
local d = get_coretemp_dir()
|
|
||||||
i_o.assert_exe_exists('grep')
|
|
||||||
local get_labels = pure.compose(
|
|
||||||
i_o.execute_cmd,
|
|
||||||
pure.partial(__string_format, 'grep Core %s/temp*_label', true)
|
|
||||||
)
|
|
||||||
local to_tuple = function(m)
|
|
||||||
return {__tonumber(m[2]), __string_format('%s/%s_input', d, m[1])}
|
|
||||||
end
|
|
||||||
local to_map = pure.compose(
|
|
||||||
pure.array_to_map,
|
pure.array_to_map,
|
||||||
pure.partial(pure.map, to_tuple),
|
pure.partial(pure.imap, function(i, c) return {__tonumber(c), i} end),
|
||||||
pure.partial(gmatch_to_tableN, '/([^/\n]+)_label:Core (%d+)\n')
|
pure.partial(gmatch_to_table1, '(%d+)')
|
||||||
)
|
)
|
||||||
return pure.maybe({}, to_map, pure.fmap_maybe(get_labels, d))
|
return pure.fmap_maybe(
|
||||||
end
|
make_indexer,
|
||||||
|
i_o.execute_cmd('lscpu -p=CORE | tail -n+5 | sort | uniq')
|
||||||
M.get_core_topology = function()
|
|
||||||
local coretemp_paths = get_coretemp_mapper()
|
|
||||||
local assign_cpu = function(i, x)
|
|
||||||
return {
|
|
||||||
lgl_cpu_id = i,
|
|
||||||
phy_core_id = __tonumber(x[1]),
|
|
||||||
phy_cpu_id = __tonumber(x[2])
|
|
||||||
}
|
|
||||||
end
|
|
||||||
local assign_core = function(acc, next)
|
|
||||||
local g = acc.grouped
|
|
||||||
local max_lgl_core_id = #g
|
|
||||||
local new_phy_core_id = next.phy_core_id
|
|
||||||
local new_cpu = {phy_cpu_id = next.phy_cpu_id, lgl_cpu_id = next.lgl_cpu_id}
|
|
||||||
if acc.prev_phy_core_id == new_phy_core_id then
|
|
||||||
local max_thread = #acc.grouped[max_lgl_core_id].cpus
|
|
||||||
acc.grouped[max_lgl_core_id].cpus[max_thread + 1] = new_cpu
|
|
||||||
else
|
|
||||||
local new_lgl_core_id = max_lgl_core_id + 1
|
|
||||||
acc.grouped[new_lgl_core_id] = {
|
|
||||||
phy_core_id = new_phy_core_id,
|
|
||||||
lgl_core_id = new_lgl_core_id,
|
|
||||||
coretemp_path = coretemp_paths[new_phy_core_id],
|
|
||||||
cpus = {new_cpu}
|
|
||||||
}
|
|
||||||
acc.prev_phy_core_id = new_phy_core_id
|
|
||||||
end
|
|
||||||
return acc
|
|
||||||
end
|
|
||||||
local get_threads = function(x)
|
|
||||||
return #x.cpus
|
|
||||||
end
|
|
||||||
local f = pure.compose(
|
|
||||||
pure.partial(pure.group_with, get_threads, pure.id),
|
|
||||||
pure.partial(pure.get, 'grouped'),
|
|
||||||
pure.partial(pure.reduce, assign_core, {prev_phy_core_id = -1, grouped = {}}),
|
|
||||||
pure.partial(pure.imap, assign_cpu),
|
|
||||||
pure.partial(gmatch_to_tableN, '(%d+),(%d+)')
|
|
||||||
)
|
)
|
||||||
local out =
|
|
||||||
i_o.execute_cmd('lscpu -y -p=core,cpu | grep -v \'^#\' | sort -k1,1n')
|
|
||||||
return pure.fmap_maybe(f, out)
|
|
||||||
end
|
end
|
||||||
|
|
||||||
-- for t, k in pairs(get_core_topology()) do
|
|
||||||
-- print(t)
|
|
||||||
-- for x, y in pairs(k) do
|
|
||||||
-- print(x, y.phy_core_id, y.coretemp_path, #y.cpus)
|
|
||||||
-- -- for _, z in pairs(y.cpus) do
|
|
||||||
-- -- print(x,z.cpu,z.conky_cpu)
|
|
||||||
-- -- end
|
|
||||||
-- end
|
|
||||||
-- end
|
|
||||||
|
|
||||||
local get_core_mappings = function()
|
local get_core_mappings = function()
|
||||||
local core_threads = M.get_core_threads()
|
local ncores = M.get_core_number()
|
||||||
local assign_cpus = function(x)
|
|
||||||
return {
|
|
||||||
cpu_id = __tonumber(x[1]),
|
|
||||||
core_id = __tonumber(x[2])
|
|
||||||
}
|
|
||||||
end
|
|
||||||
local map_ids = function(indexer)
|
local map_ids = function(indexer)
|
||||||
local f = function(acc, next)
|
local f = function(acc, next)
|
||||||
local cpu_id = __tonumber(next[1]) + 1
|
local cpu_id = __tonumber(next[1]) + 1
|
||||||
local core_id = __tonumber(next[2])
|
local core_id = next[2]
|
||||||
local conky_core_idx = indexer[core_id]
|
local conky_core_idx = indexer[__tonumber(core_id)]
|
||||||
acc.mappings[cpu_id] = {
|
acc.mappings[cpu_id] = {
|
||||||
conky_core_idx = conky_core_idx,
|
conky_core_idx = conky_core_idx,
|
||||||
conky_thread_id = acc.thread_ids[conky_core_idx],
|
conky_thread_id = acc.thread_ids[conky_core_idx],
|
||||||
|
@ -322,12 +219,12 @@ local get_core_mappings = function()
|
||||||
local cpu_to_core_map = pure.maybe(
|
local cpu_to_core_map = pure.maybe(
|
||||||
{},
|
{},
|
||||||
pure.partial(gmatch_to_tableN, '(%d+),(%d+)'),
|
pure.partial(gmatch_to_tableN, '(%d+),(%d+)'),
|
||||||
i_o.execute_cmd('lscpu -y -p=cpu,core | grep -v \'^#\' | sort -k1,1n')
|
i_o.execute_cmd('lscpu -p=cpu,CORE | tail -n+5')
|
||||||
)
|
)
|
||||||
local init = {mappings = {}, _conky_core_index = 0, _thread_ids = {}}
|
local init = {mappings = {}, thread_ids = pure.rep(ncores, 1)}
|
||||||
return pure.reduce(f, init, cpu_to_core_map).mappings
|
return pure.reduce(f, init, cpu_to_core_map).mappings
|
||||||
end
|
end
|
||||||
-- return pure.fmap_maybe(map_ids, )
|
return pure.fmap_maybe(map_ids, get_core_id_indexer())
|
||||||
end
|
end
|
||||||
|
|
||||||
M.get_coretemp_paths = function()
|
M.get_coretemp_paths = function()
|
||||||
|
@ -409,49 +306,35 @@ M.read_hwp = function(hwp_paths)
|
||||||
return mixed and 'Mixed' or (HWP_MAP[hwp_pref] or 'Unknown')
|
return mixed and 'Mixed' or (HWP_MAP[hwp_pref] or 'Unknown')
|
||||||
end
|
end
|
||||||
|
|
||||||
M.init_cpu_loads = function(topo)
|
M.init_cpu_loads = function()
|
||||||
-- -- local m = get_core_mappings()
|
local m = get_core_mappings()
|
||||||
-- local topo = get_core_topology()
|
|
||||||
local cpu_loads = {}
|
local cpu_loads = {}
|
||||||
for core_group_id, core_group in pairs(topo) do
|
for cpu_id, core in pairs(m) do
|
||||||
cpu_loads[core_group_id] = {}
|
cpu_loads[cpu_id] = {
|
||||||
for lgl_core_id, core in pairs(core_group) do
|
active_prev = 0,
|
||||||
cpu_loads[core_group_id][lgl_core_id] = {}
|
total_prev = 0,
|
||||||
for thread_id = 1, #core.cpus do
|
percent_active = 0,
|
||||||
cpu_loads[core_group_id][lgl_core_id][thread_id] = {
|
conky_core_idx = core.conky_core_idx,
|
||||||
active_prev = 0,
|
conky_thread_id = core.conky_thread_id,
|
||||||
total_prev = 0,
|
}
|
||||||
percent_active = 0,
|
|
||||||
-- core_id = lgl_core_id,
|
|
||||||
-- thread_id = thread_id,
|
|
||||||
}
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
return cpu_loads
|
return cpu_loads
|
||||||
end
|
end
|
||||||
|
|
||||||
M.read_cpu_loads = function(cpu_loads)
|
M.read_cpu_loads = function(cpu_loads)
|
||||||
|
local ncpus = #cpu_loads
|
||||||
local iter = io.lines('/proc/stat')
|
local iter = io.lines('/proc/stat')
|
||||||
iter() -- ignore first line
|
iter() -- ignore first line
|
||||||
for group_id = 1, #cpu_loads do
|
for i = 1, ncpus do
|
||||||
local group = cpu_loads[group_id]
|
local ln = iter()
|
||||||
for lgl_core_id = 1, #group do
|
local user, system, idle = __string_match(ln, '%d+ (%d+) %d+ (%d+) (%d+)', 4)
|
||||||
local core = group[lgl_core_id]
|
local active = user + system
|
||||||
for thread_id = 1, #core do
|
local total = active + idle
|
||||||
local ln = iter()
|
local c = cpu_loads[i]
|
||||||
local user, system, idle =
|
if total > c.total_prev then -- guard against 1/0 errors
|
||||||
__string_match(ln, '%d+ (%d+) %d+ (%d+) (%d+)', 4)
|
c.percent_active = (active - c.active_prev) / (total - c.total_prev)
|
||||||
local active = user + system
|
c.active_prev = active
|
||||||
local total = active + idle
|
c.total_prev = total
|
||||||
local thread = core[thread_id]
|
|
||||||
if total > thread.total_prev then -- guard against 1/0 errors
|
|
||||||
thread.percent_active =
|
|
||||||
(active - thread.active_prev) / (total - thread.total_prev)
|
|
||||||
thread.active_prev = active
|
|
||||||
thread.total_prev = total
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
return cpu_loads
|
return cpu_loads
|
||||||
|
|
Loading…
Reference in New Issue