diff options
| author | Kalagmitan <121934419+Kalagmitan@users.noreply.github.com> | 2026-03-15 14:49:37 +0800 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2026-03-15 17:49:37 +1100 |
| commit | dd2d7dceabc25fc667db59dec8fe0dd253b15ad1 (patch) | |
| tree | 41a270d230081c4b7e7bcb250665064b12a120c1 | |
| parent | fix: missing serialization for hiddenIcons (#1263) (diff) | |
| download | caelestia-shell-dd2d7dceabc25fc667db59dec8fe0dd253b15ad1.tar.gz caelestia-shell-dd2d7dceabc25fc667db59dec8fe0dd253b15ad1.tar.bz2 caelestia-shell-dd2d7dceabc25fc667db59dec8fe0dd253b15ad1.zip | |
systemusage: optimized storage aggregation and improved device filtering (#1261)
* refactor: Optimized storage aggregation
+ The storage aggregation logic doesn't account more complex storage
setups and relied too much on risky string parsing to guess where
partitions are. For example, in my case, I had a LUKS-encrypted drive
which lives inside a "crypt," because it couldn't match the type (it
only matched "disk" and "part"), it did not include my entire drive at
all. Also, Linux devices names aren't always predictable (take mapper
devices or complex NVMe paths), so if the RegEx doesn't match the name
of those devices, the data just dissapears.
I decided to go for a JSON approach making the code shorter and safer.
Everything should work about the same.
* systemusage: More intuitive filtering for storage devices
+ Removes "useless" drives from being show on the storage dashboard
+ Prioritizes the root disk to be shown first
* refactor: formatted code properly
| -rw-r--r-- | services/SystemUsage.qml | 129 |
1 files changed, 58 insertions, 71 deletions
diff --git a/services/SystemUsage.qml b/services/SystemUsage.qml index 1b07454..5085644 100644 --- a/services/SystemUsage.qml +++ b/services/SystemUsage.qml @@ -138,91 +138,78 @@ Singleton { Process { id: storage - // Get physical disks with aggregated usage from their partitions - // lsblk outputs: NAME SIZE TYPE FSUSED FSSIZE in bytes - command: ["lsblk", "-b", "-o", "NAME,SIZE,TYPE,FSUSED,FSSIZE", "-P"] + // -J triggers JSON output. -b triggers bytes. + command: ["lsblk", "-J", "-b", "-o", "NAME,SIZE,TYPE,FSUSED,FSSIZE,MOUNTPOINT"] + stdout: StdioCollector { onStreamFinished: { - const diskMap = {}; // Map disk name -> { name, totalSize, used, fsTotal } - const lines = text.trim().split("\n"); - - for (const line of lines) { - if (line.trim() === "") - continue; - - // Parse KEY="VALUE" format - const nameMatch = line.match(/NAME="([^"]+)"/); - const sizeMatch = line.match(/SIZE="([^"]+)"/); - const typeMatch = line.match(/TYPE="([^"]+)"/); - const fsusedMatch = line.match(/FSUSED="([^"]*)"/); - const fssizeMatch = line.match(/FSSIZE="([^"]*)"/); - - if (!nameMatch || !typeMatch) - continue; - - const name = nameMatch[1]; - const type = typeMatch[1]; - const size = parseInt(sizeMatch?.[1] || "0", 10); - const fsused = parseInt(fsusedMatch?.[1] || "0", 10); - const fssize = parseInt(fssizeMatch?.[1] || "0", 10); + const data = JSON.parse(text); + const diskList = []; + const seenDevices = new Set(); - if (type === "disk") { - // Skip zram (swap) devices - if (name.startsWith("zram")) - continue; + // Helper to recursively sum usage from children (partitions, crypt, lvm) + const aggregateUsage = dev => { + let used = 0; + let size = 0; + let isRoot = dev.mountpoint === "/" || (dev.mountpoints && dev.mountpoints.includes("/")); - // Initialize disk entry - if (!diskMap[name]) { - diskMap[name] = { - name: name, - totalSize: size, - used: 0, - fsTotal: 0 - }; - } - } else if (type === "part") { - // Find parent disk (remove trailing numbers/p+numbers) - let parentDisk = name.replace(/p?\d+$/, ""); - // For nvme devices like nvme0n1p1, parent is nvme0n1 - if (name.match(/nvme\d+n\d+p\d+/)) - parentDisk = name.replace(/p\d+$/, ""); + if (!seenDevices.has(dev.name)) { + // lsblk returns null for empty/unformatted partitions, which parses to 0 here + used = parseInt(dev.fsused) || 0; + size = parseInt(dev.fssize) || 0; + seenDevices.add(dev.name); + } - // Aggregate partition usage to parent disk - if (diskMap[parentDisk]) { - diskMap[parentDisk].used += fsused; - diskMap[parentDisk].fsTotal += fssize; + if (dev.children) { + for (const child of dev.children) { + const stats = aggregateUsage(child); + used += stats.used; + size += stats.size; + if (stats.isRoot) + isRoot = true; } } - } + return { + used, + size, + isRoot + }; + }; - // Convert map to sorted array - const diskList = []; - let totalUsed = 0; - let totalSize = 0; + for (const dev of data.blockdevices) { + // Only process physical disks at the top level + if (dev.type === "disk" && !dev.name.startsWith("zram")) { + const stats = aggregateUsage(dev); - for (const diskName of Object.keys(diskMap).sort()) { - const disk = diskMap[diskName]; - // Use filesystem total if available, otherwise use disk size - const total = disk.fsTotal > 0 ? disk.fsTotal : disk.totalSize; - const used = disk.used; - const perc = total > 0 ? used / total : 0; + if (stats.size === 0) { + continue; + } - // Convert bytes to KiB for consistency with formatKib - diskList.push({ - mount: disk.name // Using 'mount' property for compatibility - , - used: used / 1024, - total: total / 1024, - free: (total - used) / 1024, - perc: perc - }); + const total = stats.size; + const used = stats.used; - totalUsed += used; - totalSize += total; + diskList.push({ + mount: dev.name, + used: used / 1024 // KiB + , + total: total / 1024 // KiB + , + free: (total - used) / 1024, + perc: total > 0 ? used / total : 0, + hasRoot: stats.isRoot + }); + } } - root.disks = diskList; + // Sort by putting the disk with root first, then sort the rest alphabetically + root.disks = diskList.sort((a, b) => { + if (a.hasRoot && !b.hasRoot) + return -1; + if (!a.hasRoot && b.hasRoot) + return 1; + return a.mount.localeCompare(b.mount); + }); } } } |