Various improvements on recovering kernel type information (#3240)

* refactoring

* buddy improvements

* field check

* buddy improvements

* slab improvements

* kversion improvments

* config sysfs detection

* fix buddy

* fix kernel version handling

* cleaning up

* removed todo

* improved arg handling

* fixes based on comments
pull/3248/head
jxuanli 4 months ago committed by GitHub
parent a24f5b471f
commit e43542852a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -41,10 +41,9 @@ def BIT(shift: int):
return 1 << shift
@pwndbg.lib.cache.cache_until("objfile")
def has_debug_symbols(required=[], checkall=True) -> bool:
if len(required) == 0:
return pwndbg.aglib.symbol.lookup_symbol("commit_creds") is not None
def has_debug_symbols(*required: str, checkall: bool = True) -> bool:
if not required:
required = ("commit_creds",)
if checkall:
return all(pwndbg.aglib.symbol.lookup_symbol(sym) is not None for sym in required)
# check any
@ -61,12 +60,12 @@ def has_debug_info() -> bool:
def requires_debug_symbols(
required: List[str], checkall=False, default: D = None
*required: str, checkall=False, default: D = None
) -> Callable[[Callable[P, T]], Callable[P, T | D]]:
def decorator(f: Callable[P, T]) -> Callable[P, T | D]:
@functools.wraps(f)
def func(*args: P.args, **kwargs: P.kwargs) -> T | D:
if has_debug_symbols(required, checkall):
if has_debug_symbols(*required, checkall=checkall):
return f(*args, **kwargs)
# If the user doesn't want an exception thrown when debug symbols are
@ -75,7 +74,7 @@ def requires_debug_symbols(
return default
raise Exception(
f"Function {f.__name__} requires {'all' if checkall else 'any'} of the following debug symbols: {required}"
f"Function {f.__name__} requires {'all' if checkall else 'any'} of the following symbols: {required}"
)
return func
@ -102,7 +101,7 @@ def requires_debug_info(default: D = None) -> Callable[[Callable[P, T]], Callabl
return decorator
@requires_debug_symbols(["nr_cpu_ids"], default=1)
@requires_debug_symbols("nr_cpu_ids", default=1)
def nproc() -> int:
"""Returns the number of processing units available, similar to nproc(1)"""
val = pwndbg.aglib.kernel.symbol.try_usymbol("nr_cpu_ids", 32)
@ -110,7 +109,7 @@ def nproc() -> int:
@pwndbg.lib.cache.cache_until("stop")
def get_first_kernel_ro() -> pwndbg.lib.memory.Page | None:
def first_kernel_ro_page() -> pwndbg.lib.memory.Page | None:
"""Returns the first kernel mapping which contains the linux_banner"""
base = kbase()
if base is None:
@ -132,11 +131,11 @@ def get_first_kernel_ro() -> pwndbg.lib.memory.Page | None:
def kconfig() -> pwndbg.lib.kernel.kconfig.Kconfig | None:
global _kconfig
config_start, config_end = None, None
if has_debug_info():
if has_debug_symbols():
config_start = pwndbg.aglib.symbol.lookup_symbol_addr("kernel_config_data")
config_end = pwndbg.aglib.symbol.lookup_symbol_addr("kernel_config_data_end")
else:
mapping = get_first_kernel_ro()
mapping = first_kernel_ro_page()
result = next(pwndbg.search.search(b"IKCFG_ST", mappings=[mapping]), None)
if result is not None:
@ -153,7 +152,7 @@ def kconfig() -> pwndbg.lib.kernel.kconfig.Kconfig | None:
return _kconfig
@requires_debug_symbols(["saved_command_line"], default="")
@requires_debug_symbols("saved_command_line", default="")
@pwndbg.lib.cache.cache_until("start")
def kcmdline() -> str:
addr = pwndbg.aglib.symbol.lookup_symbol_addr("saved_command_line")
@ -164,14 +163,14 @@ def kcmdline() -> str:
@pwndbg.lib.cache.cache_until("start")
def kversion() -> str:
try:
if has_debug_symbols(["linux_banner"]):
if has_debug_symbols("linux_banner"):
version_addr = pwndbg.aglib.symbol.lookup_symbol_addr("linux_banner")
result = pwndbg.aglib.memory.string(version_addr).decode("ascii").strip()
assert len(result) > 0
return result
except Exception:
pass
mapping = get_first_kernel_ro()
mapping = first_kernel_ro_page()
version_addr = next(pwndbg.search.search(b"Linux version", mappings=[mapping]), None)
return pwndbg.aglib.memory.string(version_addr).decode("ascii").strip()
@ -214,7 +213,7 @@ class ArchOps(ABC):
# in the page_to_pfn() and pfn_to_page() methods in the future.
@abstractmethod
def per_cpu(self, addr: pwndbg.dbg_mod.Value, cpu=None) -> pwndbg.dbg_mod.Value:
def per_cpu(self, addr: int | pwndbg.dbg_mod.Value, cpu=None) -> pwndbg.dbg_mod.Value:
raise NotImplementedError()
@abstractmethod
@ -325,7 +324,9 @@ class i386Ops(x86Ops):
def virt_to_phys(self, virt: int) -> int:
return (virt - self.page_offset) % (1 << 32)
def per_cpu(self, addr: pwndbg.dbg_mod.Value, cpu: int | None = None) -> pwndbg.dbg_mod.Value:
def per_cpu(
self, addr: int | pwndbg.dbg_mod.Value, cpu: int | None = None
) -> pwndbg.dbg_mod.Value:
raise NotImplementedError()
def pfn_to_page(self, pfn: int) -> int:
@ -343,8 +344,10 @@ class x86_64Ops(x86Ops):
def ptr_size(self) -> int:
return 64
@requires_debug_symbols(["__per_cpu_offset", "nr_iowait_cpu"], checkall=False)
def per_cpu(self, addr: pwndbg.dbg_mod.Value, cpu: int | None = None) -> pwndbg.dbg_mod.Value:
@requires_debug_symbols("__per_cpu_offset", "nr_iowait_cpu", checkall=False)
def per_cpu(
self, addr: int | pwndbg.dbg_mod.Value, cpu: int | None = None
) -> pwndbg.dbg_mod.Value:
if cpu is None:
cpu = pwndbg.dbg.selected_thread().index() - 1
@ -352,7 +355,9 @@ class x86_64Ops(x86Ops):
offset = pwndbg.aglib.memory.u(per_cpu_offset + (cpu * 8))
per_cpu_addr = (int(addr) + offset) % 2**64
if isinstance(addr, pwndbg.dbg_mod.Value):
return pwndbg.dbg.selected_inferior().create_value(per_cpu_addr, addr.type)
return pwndbg.dbg.selected_inferior().create_value(per_cpu_addr)
def virt_to_phys(self, virt: int) -> int:
if virt < self.kbase:
@ -375,8 +380,10 @@ class Aarch64Ops(ArchOps):
def ptr_size(self):
return 64
@requires_debug_symbols(["__per_cpu_offset", "nr_iowait_cpu"], checkall=False)
def per_cpu(self, addr: pwndbg.dbg_mod.Value, cpu: int | None = None) -> pwndbg.dbg_mod.Value:
@requires_debug_symbols("__per_cpu_offset", "nr_iowait_cpu", checkall=False)
def per_cpu(
self, addr: int | pwndbg.dbg_mod.Value, cpu: int | None = None
) -> pwndbg.dbg_mod.Value:
if cpu is None:
cpu = pwndbg.dbg.selected_thread().index() - 1
@ -384,7 +391,9 @@ class Aarch64Ops(ArchOps):
offset = pwndbg.aglib.memory.u(per_cpu_offset + (cpu * 8))
per_cpu_addr = (int(addr) + offset) % 2**64
if isinstance(addr, pwndbg.dbg_mod.Value):
return pwndbg.dbg.selected_inferior().create_value(per_cpu_addr, addr.type)
return pwndbg.dbg.selected_inferior().create_value(per_cpu_addr)
def virt_to_phys(self, virt: int) -> int:
return virt - self.page_offset
@ -474,7 +483,7 @@ def page_size() -> int:
raise NotImplementedError()
def per_cpu(addr: pwndbg.dbg_mod.Value, cpu: int | None = None) -> pwndbg.dbg_mod.Value:
def per_cpu(addr: int | pwndbg.dbg_mod.Value, cpu: int | None = None) -> pwndbg.dbg_mod.Value:
ops = arch_ops()
if ops:
return ops.per_cpu(addr, cpu)
@ -620,7 +629,7 @@ def paging_enabled() -> bool:
raise NotImplementedError()
@requires_debug_symbols(["node_states"], default=1)
@requires_debug_symbols("node_states", default=1)
def num_numa_nodes() -> int:
"""Returns the number of NUMA nodes that are online on the system"""
kc = kconfig()

@ -15,76 +15,14 @@ import pwndbg.aglib.typeinfo
MAX_ORDER = 11
def get_pcp_struct(pcp_sz) -> str:
kconfig = pwndbg.aglib.kernel.kconfig()
defs = []
if not pwndbg.aglib.kernel.krelease() < (5, 14):
if pwndbg.aglib.kernel.krelease() < (6, 7):
defs.append("BETWEEN_V5_14_AND_V6_6")
else:
defs.append("BEFORE_V5_14")
if not pwndbg.aglib.kernel.krelease() < (6, 0):
defs.append("SINCE_V6_0")
if not pwndbg.aglib.kernel.krelease() < (6, 7):
defs.append("SINCE_V6_7")
for config in (
"CONFIG_NUMA",
"CONFIG_SMP",
):
if config in kconfig:
defs.append(config)
result = "\n".join(f"#define {s}" for s in defs)
result += f"""
struct per_cpu_pages {{
#ifdef SINCE_V6_0
spinlock_t lock;/* Protects lists field, MOST OF THE TIME IT IS 4 BYTES */
#endif
int count; /* number of pages in the list */
int high; /* high watermark, emptying needed */
#ifdef SINCE_V6_7
int high_min; /* min high watermark */
int high_max; /* max high watermark */
#endif
int batch; /* chunk size for buddy add/remove */
#ifdef SINCE_V6_7
u8 flags; /* protected by pcp->lock */
u8 alloc_factor; /* batch scaling factor during allocate */
#ifdef CONFIG_NUMA
u8 expire; /* When 0, remote pagesets are drained */
#endif
short free_count; /* consecutive free count */
#endif
#ifdef BETWEEN_V5_14_AND_V6_6
short free_factor; /* batch scaling factor during free */
#ifdef CONFIG_NUMA
short expire; /* When 0, remote pagesets are drained */
#endif
#else
#endif
/* Lists of pages, one per migrate type stored on the pcp-lists */
struct list_head lists[{pwndbg.aglib.kernel.symbol.npcplist()}]; // constant is sufficient for now
}};
#ifdef BEFORE_V5_14
struct per_cpu_pageset {{
union {{
struct per_cpu_pages pcp;
char _pad[{pcp_sz}];
}};
}};
#endif
"""
return result
def find_zone_offsets() -> Tuple[int, int, int, int, int]:
pcp_off, name_off, freelist_off, pcp_sz, zone_sz = None, None, None, None, None
start_idx = 10
pcp_off, name_off, freelist_off, pcp_pad, zone_sz = None, None, None, None, None
node_data0 = pwndbg.aglib.kernel.node_data()
if "CONFIG_NUMA" in pwndbg.aglib.kernel.kconfig():
node_data0 = node_data0.dereference()
ptr = int(node_data0) + start_idx * 8
for i in range(start_idx, 20): # the pcp offset should exist in those range
node_data0 = int(node_data0)
ptr = node_data0
for i in range(20): # the pcp offset should exist in those range
val = pwndbg.aglib.memory.u64(ptr)
ptr += 8
if pwndbg.aglib.memory.is_kernel(val):
@ -92,25 +30,13 @@ def find_zone_offsets() -> Tuple[int, int, int, int, int]:
pcp_off = (i + 1) * 8
break
assert pcp_off, "can't find pcp offset"
if pwndbg.aglib.kernel.krelease() < (5, 14):
pcp_ptr = pwndbg.aglib.kernel.per_cpu(
pwndbg.aglib.memory.get_typed_pointer("struct page", pwndbg.aglib.memory.u64(ptr))
)
first_pcp_ptr, second_pcp_ptr = None, None
prev = 0
for i in range(30):
addr = int(pcp_ptr) + i * 8
cur = pwndbg.aglib.memory.u64(addr)
if prev >> 56 == 0 and cur >> 56 == 0xFF:
if not first_pcp_ptr:
first_pcp_ptr = addr
else:
second_pcp_ptr = addr
pcp_ptr = int(pwndbg.aglib.kernel.per_cpu(pwndbg.aglib.memory.u64(node_data0 + pcp_off)))
for i in range(6):
val = pwndbg.aglib.memory.u64(pcp_ptr + i * 8)
if pwndbg.aglib.memory.is_kernel(val):
pcp_pad = i * 8
break
prev = cur
assert first_pcp_ptr and second_pcp_ptr, "can't determine pcp ptrs"
pcp_sz = second_pcp_ptr - first_pcp_ptr
assert 0 < pcp_sz < 0x100, "can't determine pcp_sz"
assert pcp_pad, "can't find pcp pad"
for i in range(20):
char_ptr = pwndbg.aglib.memory.u64(ptr)
ptr += 8
@ -126,8 +52,7 @@ def find_zone_offsets() -> Tuple[int, int, int, int, int]:
for i in range(1, 20):
cur = pwndbg.aglib.memory.u64(ptr)
ptr += 8
# prev is the write cache padding followed by the freelist
if prev == 0 and pwndbg.aglib.memory.is_kernel(cur):
if not pwndbg.aglib.memory.is_kernel(prev) and pwndbg.aglib.memory.is_kernel(cur):
freelist_off = (i + 1) * 8 + name_off
break
prev = cur
@ -141,12 +66,12 @@ def find_zone_offsets() -> Tuple[int, int, int, int, int]:
ptr += 8
if pwndbg.aglib.memory.is_kernel(val):
# we have found `zone_pgdat`
zone_sz = ptr - pcp_off - int(node_data0)
zone_sz = ptr - pcp_off - node_data0
break
assert (
zone_sz and zone_sz < 0x4000 and zone_sz & 0xF == 0
), f"can't determine sizeof(struct zone) = {zone_sz}" # just to make sure it is sane
return pcp_off, name_off, freelist_off, pcp_sz, zone_sz
return pcp_off, name_off, freelist_off, pcp_pad, zone_sz
def load_buddydump_typeinfo():
@ -154,53 +79,57 @@ def load_buddydump_typeinfo():
return
nmtypes = pwndbg.aglib.kernel.symbol.nmtypes()
nzones = pwndbg.aglib.kernel.symbol.nzones()
if not nmtypes or not nzones:
return
nnodes = pwndbg.aglib.kernel.num_numa_nodes()
npcplist = pwndbg.aglib.kernel.symbol.npcplist()
pwndbg.aglib.kernel.symbol.load_common_structs()
pglist_data = f"""
typedef struct pglist_data {{
struct zone node_zones[{nzones}];
// ... the rest of the fields are not important
// but make the struct dynamic
char _pad[];
}} pg_data_t;
"""
pcp_off, name_off, freearea_off, pcp_sz, zone_sz = find_zone_offsets()
per_cpu_pages = get_pcp_struct(pcp_sz)
zone = ""
if pwndbg.aglib.kernel.krelease() < (5, 14):
zone = "#define BEFORE_V5_14\n"
result = f"#define KVERSION {pwndbg.aglib.kernel.symbol.kversion_cint()}\n"
result += pwndbg.aglib.kernel.symbol.COMMON_TYPES
pcp_off, name_off, freearea_off, pcp_pad, zone_sz = find_zone_offsets()
if "CONFIG_NUMA" in pwndbg.aglib.kernel.kconfig():
zone += "#define CONFIG_NUMA\n"
zone += f"""
result += "#define CONFIG_NUMA\n"
result += f"""
struct free_area {{
struct list_head free_list[{nmtypes}];
unsigned long nr_free;
}};
"""
result += f"""
struct per_cpu_pages {{
char _pad[{pcp_pad}];
/* Lists of pages, one per migrate type stored on the pcp-lists */
struct list_head lists[{npcplist}]; // constant is sufficient for now
}};
#if KVERSION < KERNEL_VERSION(5, 14, 0)
struct per_cpu_pageset {{
struct per_cpu_pages pcp;
}};
#endif
/* custom type for page list data */
#ifdef CONFIG_NUMA
typedef struct pglist_data *node_data_t[1]; // just support 1 node for now, the most common case
typedef struct pglist_data *node_data_t[{nnodes}];
#else
typedef struct pglist_data node_data_t;
#endif
struct zone {{
char _pad1[{hex(pcp_off)}];
#ifdef BEFORE_V5_14
char _pad1[{pcp_off}];
#if KVERSION < KERNEL_VERSION(5, 14, 0)
struct per_cpu_pageset *pageset;
#else
struct per_cpu_pages *per_cpu_pageset;
#endif
char _pad2[{hex(name_off - pcp_off - 8)}];
char _pad2[{name_off - pcp_off - 8}];
char* name;
char _pad3[{hex(freearea_off - name_off - 8)}];
char _pad3[{freearea_off - name_off - 8}];
struct free_area free_area[{MAX_ORDER}]; // just defaults to 11 is sufficient here
char _pad[{hex(zone_sz - freearea_off - (MAX_ORDER * (nmtypes * 0x10 + 8)))}];
char _pad[{zone_sz - freearea_off - (MAX_ORDER * (nmtypes * 0x10 + 8))}];
}};
"""
free_area = f"""
struct free_area {{
struct list_head free_list[{nmtypes}];
unsigned long nr_free;
}};
result += f"""
typedef struct pglist_data {{
struct zone node_zones[{nzones}];
// ... the rest of the fields are not important
}} pg_data_t;
"""
result = (
pwndbg.aglib.kernel.symbol.COMMON_TYPES + free_area + zone + per_cpu_pages + pglist_data
)
header_file_path = pwndbg.commands.cymbol.create_temp_header_file(result)
pwndbg.commands.cymbol.add_structure_from_header(header_file_path, "buddydump_structs", True)

@ -44,7 +44,7 @@ class Kallsyms:
self.kallsyms: Dict[str, Tuple[int, str]] = {}
self.kbase = pwndbg.aglib.kernel.kbase()
mapping = pwndbg.aglib.kernel.get_first_kernel_ro()
mapping = pwndbg.aglib.kernel.first_kernel_ro_page()
assert mapping is not None, "kernel memory mappings are missing"
self.r_base = mapping.vaddr

@ -22,9 +22,8 @@ def get_memory_map_raw() -> Tuple[pwndbg.lib.memory.Page, ...]:
return pwndbg.aglib.kernel.vmmap.kernel_vmmap(False)
def guess_physmap():
# this is mostly true
# https://www.kernel.org/doc/Documentation/x86/x86_64/mm.txt
@pwndbg.lib.cache.cache_until("stop")
def first_kernel_page_start():
for page in get_memory_map_raw():
if page.start and pwndbg.aglib.memory.is_kernel(page.start):
return page.start
@ -52,7 +51,7 @@ class ArchPagingInfo:
pagetableptr_cache: Dict[int, pwndbg.dbg_mod.Value] = {}
@property
@pwndbg.lib.cache.cache_until("start")
@pwndbg.lib.cache.cache_until("objfile")
def STRUCT_PAGE_SIZE(self):
a = pwndbg.aglib.typeinfo.load("struct page")
if a is None:
@ -61,7 +60,7 @@ class ArchPagingInfo:
return a.sizeof
@property
@pwndbg.lib.cache.cache_until("start")
@pwndbg.lib.cache.cache_until("objfile")
def STRUCT_PAGE_SHIFT(self):
# needs to be rounded up (consider the layout of vmemmap)
return math.ceil(math.log2(self.STRUCT_PAGE_SIZE))
@ -156,6 +155,7 @@ class ArchPagingInfo:
class x86_64PagingInfo(ArchPagingInfo):
# constants are taken from https://www.kernel.org/doc/Documentation/x86/x86_64/mm.txt
def __init__(self):
self.va_bits = 48 if self.paging_level == 4 else 51
# https://blog.zolutal.io/understanding-paging/
@ -183,7 +183,7 @@ class x86_64PagingInfo(ArchPagingInfo):
result = None
try:
target = self.physmap.to_bytes(8, byteorder="little")
mapping = pwndbg.aglib.kernel.get_first_kernel_ro()
mapping = pwndbg.aglib.kernel.first_kernel_ro_page()
result = next(pwndbg.search.search(target, mappings=[mapping]), None)
except Exception as e:
print(e)
@ -197,13 +197,14 @@ class x86_64PagingInfo(ArchPagingInfo):
@property
@pwndbg.lib.cache.cache_until("stop")
def physmap(self):
pob = pwndbg.aglib.symbol.lookup_symbol_addr("page_offset_base")
result = None
if pob is not None:
if pwndbg.aglib.memory.peek(pob):
result = pwndbg.aglib.memory.u64(pob)
result = pwndbg.aglib.kernel.symbol.try_usymbol("page_offset_base")
if result is None:
return guess_physmap()
result = INVALID_ADDR
min = 0xFFFF888000000000 if self.paging_level == 4 else 0xFF11000000000000
for page in get_memory_map_raw():
if page.start and page.start >= min:
result = page.start
break
return result
@property
@ -240,27 +241,11 @@ class x86_64PagingInfo(ArchPagingInfo):
return 0xFFD4000000000000 if self.paging_level == 5 else 0xFFFFEA0000000000
@property
@pwndbg.lib.cache.cache_until("stop")
def paging_level(self) -> int:
if pwndbg.aglib.kernel.has_debug_info():
# https://elixir.bootlin.com/linux/v6.2/source/arch/x86/include/asm/cpufeatures.h#L381
X86_FEATURE_LA57 = 16 * 32 + 16
feature = X86_FEATURE_LA57
# Separate to avoid using kconfig if possible
boot_cpu_data = pwndbg.aglib.symbol.lookup_symbol("boot_cpu_data")
assert boot_cpu_data is not None, "Symbol boot_cpu_data not exists"
boot_cpu_data = boot_cpu_data.dereference()
capabilities = boot_cpu_data["x86_capability"]
cpu_feature_capability = (int(capabilities[feature // 32]) >> (feature % 32)) & 1 == 1
if not cpu_feature_capability or "no5lvl" in pwndbg.aglib.kernel.kcmdline():
return 4
return 5
# CONFIG_X86_5LEVEL is only a hint -- whether 5lvl paging is used depends on the hardware
# see also: https://www.kernel.org/doc/html/next/x86/x86_64/mm.html
pages = get_memory_map_raw()
for page in pages:
if pwndbg.aglib.memory.is_kernel(page.start):
if page.start < (0xFFF << (4 * 13)):
if first_kernel_page_start() < (0xFFF << (4 * 13)):
return 5
return 4
@ -382,9 +367,9 @@ class Aarch64PagingInfo(ArchPagingInfo):
def physmap(self):
# addr = pwndbg.aglib.symbol.lookup_symbol_addr("memstart_addr")
# if addr is None:
# return guess_physmap()
# return first_kernel_page_start()
# return pwndbg.aglib.memory.u(addr)
return guess_physmap()
return first_kernel_page_start()
@property
@pwndbg.lib.cache.cache_until("stop")
@ -501,7 +486,7 @@ class Aarch64PagingInfo(ArchPagingInfo):
raise NotImplementedError()
@property
@pwndbg.lib.cache.cache_until("stop")
@pwndbg.lib.cache.cache_until("forever")
def paging_level(self):
# https://www.kernel.org/doc/html/v5.3/arm64/memory.html
if self.page_shift == 16:

@ -3,6 +3,7 @@ from __future__ import annotations
from typing import Generator
from typing import List
from typing import Set
from typing import Tuple
import pwndbg
import pwndbg.aglib.kernel.symbol
@ -453,7 +454,22 @@ def find_containing_slab_cache(addr: int) -> SlabCache | None:
#########################################
def kmem_cache_pad_sz(kconfig) -> int:
def kmem_cache_node_pad_sz(val):
for j in range(8):
nr_partial = pwndbg.aglib.memory.u32(val)
next = pwndbg.aglib.memory.u64(val + 0x8)
prev = pwndbg.aglib.memory.u64(val + 0x10)
val += 0x8
if (
nr_partial < 0x20
and pwndbg.aglib.memory.is_kernel(next)
and pwndbg.aglib.memory.is_kernel(prev)
):
return j * 8
return None
def kmem_cache_pad_sz(kconfig) -> Tuple[int, int]:
# find the distance between the first kmem_cache's name and its first node cache
# the name for the first kmem_cache (most likely) has the name "kmem_cache"
# and the global var is also named "kmem_cache"
@ -461,25 +477,54 @@ def kmem_cache_pad_sz(kconfig) -> int:
name_off = None
slab_caches = pwndbg.aglib.kernel.slab_caches()
assert slab_caches, "can't find slab_caches"
kmem_cache = int(slab_caches["prev"]) & ~0xFFF
kmem_cache = int(slab_caches["prev"]) & ~0xFF
for i in range(0x20):
val = pwndbg.aglib.memory.u64(kmem_cache + i * 8)
if pwndbg.aglib.memory.string(val) == name.encode():
name_off = i * 8
break
assert name_off, "can't determine kmem_cache name offset"
distance = None
for i in range(3, 0x20):
val = pwndbg.aglib.memory.u64(kmem_cache + i * 8 + name_off)
if pwndbg.aglib.memory.peek(val):
nr_partial = pwndbg.aglib.memory.u64(val + 0x8)
next = pwndbg.aglib.memory.u64(val + 0x10)
prev = pwndbg.aglib.memory.u64(val + 0x18)
if (
nr_partial < 0x20
and pwndbg.aglib.memory.is_kernel(next)
and pwndbg.aglib.memory.is_kernel(prev)
if pwndbg.aglib.kernel.krelease() >= (6, 2) and all(
config not in kconfig
for config in (
"CONFIG_HARDENED_USERCOPY",
"CONFIG_KASAN",
)
):
if all(
config not in kconfig
for config in (
"CONFIG_SYSFS",
"CONFIG_SLAB_FREELIST_HARDENED",
"CONFIG_NUMA",
)
):
node_cache_pad = kmem_cache_node_pad_sz(
kmem_cache + name_off + 0x8 * 3
) # name ptr + 2 list ptrs
assert node_cache_pad, "can't determine kmem cache node padding size"
distance = 8 if "CONFIG_SLAB_FREELIST_RANDOM" in kconfig else 0
return distance, node_cache_pad
elif "CONFIG_SLAB_FREELIST_RANDOM" in kconfig:
for i in range(3, 0x20):
ptr = kmem_cache + name_off + i * 8
val = pwndbg.aglib.memory.u64(ptr)
if pwndbg.aglib.memory.is_kernel(val):
distance = (i + 1) * 8
node_cache_pad = kmem_cache_node_pad_sz(kmem_cache + name_off + distance)
assert node_cache_pad, "can't determine kmem cache node padding size"
return distance, node_cache_pad
distance, node_cache_pad = None, None
for i in range(3, 0x20):
ptr = kmem_cache + name_off + i * 8
val = pwndbg.aglib.memory.u64(ptr - 8)
if pwndbg.aglib.memory.peek(val) is not None:
continue
val = pwndbg.aglib.memory.u64(ptr)
if pwndbg.aglib.memory.peek(val) is None:
continue
node_cache_pad = kmem_cache_node_pad_sz(val)
if node_cache_pad is not None:
distance = i * 8
break
assert distance, "can't find kmem_cache node"
@ -499,25 +544,32 @@ def kmem_cache_pad_sz(kconfig) -> int:
if "CONFIG_HARDENED_USERCOPY" in kconfig or pwndbg.aglib.kernel.krelease() < (6, 2):
distance -= 8
assert distance < 0x1000, "cannot find kmem_cache padding size"
return distance
return distance, node_cache_pad
def kmem_cache_structs():
to_define = None
if pwndbg.aglib.kernel.krelease() < (5, 17):
to_define = "BEFORE_V5_17"
elif pwndbg.aglib.kernel.krelease() < (6, 2):
to_define = "BETWEEN_V5_17_AND_V6_1"
else:
to_define = "SINCE_V6_2"
result = f"#define {to_define}\n"
def kmem_cache_structs(node_cache_pad):
result = f"#define KVERSION {pwndbg.aglib.kernel.symbol.kversion_cint()}\n"
if "CONFIG_SLUB_CPU_PARTIAL" in pwndbg.aglib.kernel.kconfig():
result += "#define CONFIG_SLUB_CPU_PARTIAL\n"
result += """
struct kmem_cache_node {
spinlock_t list_lock;
result += f"""
struct kmem_cache_node {{
char _pad[{node_cache_pad}];
unsigned long nr_partial;
struct list_head partial;
}};
"""
result += """
struct kasan_cache {
#if !((KERNEL_VERSION(6, 1, 0) <= KVERSION && KVERSION < KERNEL_VERSION(6, 3, 0)))
int alloc_meta_offset;
int free_meta_offset;
#elif defined(CONFIG_KASAN_GENERIC)
int alloc_meta_offset;
int free_meta_offset;
#endif
#if KERNEL_VERSION(5, 12, 0) <= KVERSION && KVERSION < KERNEL_VERSION(6, 3, 0)
bool is_kmalloc;
#endif
};
struct kmem_cache_order_objects {
unsigned int x;
@ -528,10 +580,10 @@ def kmem_cache_structs():
};
typedef unsigned int gfp_t;
typedef unsigned int slab_flags_t;
#ifndef BEFORE_V5_17
#if KVERSION >= KERNEL_VERSION(5, 17, 0)
struct slab {
unsigned long __page_flags;
#ifdef SINCE_V6_2
#if KVERSION >= KERNEL_VERSION(6, 2, 0)
struct kmem_cache *slab_cache;
#endif
union {
@ -543,7 +595,7 @@ def kmem_cache_structs():
};
#endif
};
#ifdef BETWEEN_V5_17_AND_V6_1
#if KVERSION < KERNEL_VERSION(6, 2, 0)
struct kmem_cache *slab_cache;
#endif
void *freelist; /* first free object */
@ -582,16 +634,6 @@ def load_slab_typeinfo():
pwndbg.aglib.kernel.symbol.load_common_structs()
kconfig = pwndbg.aglib.kernel.kconfig()
defs = []
if pwndbg.aglib.kernel.krelease() < (6, 2):
defs.append("BEFORE_V6_2")
if pwndbg.aglib.kernel.krelease() >= (6, 3):
defs.append("SINCE_V6_3")
if pwndbg.aglib.kernel.krelease() < (5, 19):
defs.append("BEFORE_V5_19")
if pwndbg.aglib.kernel.krelease() >= (5, 16):
defs.append("SINCE_V5_16")
if pwndbg.aglib.kernel.krelease() >= (5, 9):
defs.append("SINCE_V5_9")
configs = (
"CONFIG_SLUB_TINY",
"CONFIG_SLUB_CPU_PARTIAL",
@ -605,14 +647,15 @@ def load_slab_typeinfo():
for config in configs:
if config in kconfig:
defs.append(config)
sz = kmem_cache_pad_sz(kconfig)
result = "\n".join(f"#define {s}" for s in defs)
sz, node_cache_pad = kmem_cache_pad_sz(kconfig)
result = f"#define KVERSION {pwndbg.aglib.kernel.symbol.kversion_cint()}\n"
result += "\n".join(f"#define {s}" for s in defs)
result += pwndbg.aglib.kernel.symbol.COMMON_TYPES
result += kmem_cache_structs()
result += kmem_cache_structs(node_cache_pad)
# this is the kmem_cache SLUB representation for all 5.x and 6.x
result += f"""
struct kmem_cache {{
#if !defined(CONFIG_SLUB_TINY) || defined(BEFORE_V6_2)
#if !defined(CONFIG_SLUB_TINY) || KVERSION < KERNEL_VERSION(6, 2, 0)
struct kmem_cache_cpu *cpu_slab;
#endif
/* Used for retrieving partial slabs, etc. */
@ -620,14 +663,14 @@ def load_slab_typeinfo():
unsigned long min_partial;
unsigned int size; /* Object size including metadata */
unsigned int object_size; /* Object size without metadata */
#if defined(SINCE_V5_9)
#if KVERSION >= KERNEL_VERSION(5, 9, 0)
struct reciprocal_value reciprocal_size;
#endif
unsigned int offset; /* Free pointer offset */
#ifdef CONFIG_SLUB_CPU_PARTIAL
/* Number of per cpu partial objects to keep around */
unsigned int cpu_partial;
#ifdef SINCE_V5_16
#if KVERSION >= KERNEL_VERSION(5, 16, 0)
/* Number of per cpu partial slabs to keep around */
unsigned int cpu_partial_{slab_struct_type()}s;
#endif
@ -635,7 +678,7 @@ def load_slab_typeinfo():
struct kmem_cache_order_objects oo;
/* Allocation and freeing of slabs */
struct kmem_cache_order_objects min;
#ifdef BEFORE_V5_19
#if KVERSION < KERNEL_VERSION(5, 19, 0)
struct kmem_cache_order_objects max;
#endif
gfp_t allocflags; /* gfp flags to use on each alloc */
@ -657,10 +700,10 @@ def load_slab_typeinfo():
#ifdef CONFIG_SLAB_FREELIST_RANDOM
unsigned int *random_seq;
#endif
#if (defined(SINCE_V6_3) && defined(CONFIG_KASAN_GENERIC) || (!defined(SINCE_V6_3) && defined(CONFIG_KASAN)))
char _pad2[8]; // the kasan_cache struct includes only 2 int's
#if (KVERSION >= KERNEL_VERSION(6, 3, 0) && defined(CONFIG_KASAN_GENERIC) || (KVERSION < KERNEL_VERSION(6, 3, 0) && defined(CONFIG_KASAN)))
struct kasan_cache kasan_info;
#endif
#if defined(BEFORE_V6_2) || defined(CONFIG_HARDENED_USERCOPY)
#if KVERSION < KERNEL_VERSION(6, 2, 0) || defined(CONFIG_HARDENED_USERCOPY)
unsigned int useroffset; /* Usercopy region offset */
unsigned int usersize; /* Usercopy region size */
#endif

@ -61,7 +61,7 @@ def try_usymbol(name: str, size=pwndbg.aglib.kernel.ptr_size) -> int:
return None
@pwndbg.aglib.kernel.requires_debug_symbols(["zone_names"], default=4)
@pwndbg.aglib.kernel.requires_debug_symbols("zone_names", default=4)
def nzones() -> int:
_zone_names = pwndbg.aglib.symbol.lookup_symbol_addr("zone_names")
for i in range(len(POSSIBLE_ZONE_NAMES) + 1):
@ -78,7 +78,7 @@ def nmtypes() -> int:
def npcplist() -> int:
"""returns NR_PCP_LISTS (https://elixir.bootlin.com/linux/v6.13/source/include/linux/mmzone.h#L671)"""
if (
not pwndbg.aglib.kernel.has_debug_symbols(["node_zones"])
not pwndbg.aglib.kernel.has_debug_symbols("node_zones")
or not pwndbg.aglib.kernel.has_debug_info()
):
if pwndbg.aglib.kernel.krelease() < (5, 14):
@ -99,29 +99,38 @@ def npcplist() -> int:
return 0
def kversion_cint(kversion=None):
if kversion is None:
kversion = pwndbg.aglib.kernel.krelease()
x, y, z = kversion
return ((x) * 65536) + ((y) * 256) + (z)
#########################################
# common structurs
#
#########################################
COMMON_TYPES = """
#include <stdint.h>
#include <linux/version.h>
typedef unsigned char u8;
typedef char s8;
typedef unsigned short u16;
typedef unsigned int u32;
typedef unsigned int spinlock_t;
typedef long long s64;
#define bool int
#if UINTPTR_MAX == 0xffffffff
typedef int16_t arch_word_t;
#else
typedef int32_t arch_word_t;
#endif
typedef struct {
int counter;
} atomic_t;
struct list_head {
struct list_head *next, *prev;
};
typedef struct {
int counter;
} atomic_t;
struct kmem_cache;
enum pageflags {
PG_locked, /* Page is locked. Don't touch. */
@ -145,6 +154,7 @@ enum pageflags {
PG_unevictable, /* Page is "unevictable" */
PG_dropbehind, /* drop pages on IO completion */
};
"""
@ -154,21 +164,14 @@ def load_common_structs():
if pwndbg.aglib.typeinfo.lookup_types("struct page") is not None:
return
defs = []
if pwndbg.aglib.kernel.krelease() < (5, 17):
defs.append("BEFORE_V5_17")
if pwndbg.aglib.kernel.krelease() < (5, 16):
defs.append("BEFORE_V5_16")
if pwndbg.aglib.kernel.krelease() < (6, 7):
defs.append("BEFORE_V6_7")
if pwndbg.aglib.kernel.krelease() >= (6, 1):
defs.append("SINCE_V6_1")
for config in (
"CONFIG_MEMCG",
"CONFIG_KASAN",
):
if config in pwndbg.aglib.kernel.kconfig():
defs.append(config)
result = "\n".join(f"#define {s}" for s in defs)
result = f"#define KVERSION {kversion_cint()}\n"
result += "\n".join(f"#define {s}" for s in defs)
result += COMMON_TYPES
result += """
struct page { // just a simplied page struct with relavent fields
@ -188,14 +191,14 @@ def load_common_structs():
};
};
};
#ifdef BEFORE_V5_17
#if KVERSION < KERNEL_VERSION(5, 17, 0)
struct { /* slab, slob and slub */
union {
struct list_head slab_list;
struct { /* Partial pages */
struct page *next;
arch_word_t pages; /* Nr of pages left */
#ifdef BEFORE_V5_16
#if KVERSION < KERNEL_VERSION(5, 16, 0)
arch_word_t pobjects; /* Approximate count */
#endif
};
@ -227,17 +230,17 @@ def load_common_structs():
#if defined(WANT_PAGE_VIRTUAL) /* never set for x86 and arm */
void *virtual;
#endif /* WANT_PAGE_VIRTUAL */
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
#ifndef BEFORE_V6_7 /* TODO: seems never got set for all the kernel builds I have worked with */
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS /* TODO: seems never got set for all the kernel builds I have worked with */
#if KVERSION >= KERNEL_VERSION(6, 7, 0)
int _last_cpupid;
#endif
#endif
#if defined(CONFIG_KASAN) && defined(SINCE_V6_1)
#if defined(CONFIG_KASAN) && KVERSION >= KERNEL_VERSION(6, 1, 0)
struct page *kmsan_shadow;
struct page *kmsan_origin;
#endif
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
#ifdef BEFORE_V6_7
#if KVERSION < KERNEL_VERSION(6, 7, 0)
int _last_cpupid;
#endif
#endif
@ -329,6 +332,15 @@ class x86_64Symbols(ArchSymbols):
return int(result.group(1), 16)
return None
def qword_mov_reg_ripoff(self, disass):
result = self.regex(
"".join(disass.splitlines()),
r".*?\bmov.*\[rip\s\+\s(0x[0-9a-f]+)\].*?(0x[0-9a-f]{16})\s\<",
)
if result is not None:
return int(result.group(1), 16) + int(result.group(2), 16)
return None
def _node_data(self):
disass = self.disass("first_online_pgdat")
result = self.dword_mov_reg_memoff(disass)
@ -345,7 +357,10 @@ class x86_64Symbols(ArchSymbols):
result = self.dword_add_reg_memoff(disass)
if result is not None:
return result
return self.qword_mov_reg_const(disass)
result = self.qword_mov_reg_const(disass)
if result is not None:
return result
return self.qword_mov_reg_ripoff(disass)
class Aarch64Symbols(ArchSymbols):

@ -225,10 +225,6 @@ def print_pcp_set(pba: ParsedBuddyArgs, cbp: CurrentBuddyParams):
if cbp.zone.type.has_field("per_cpu_pageset"):
pcp = per_cpu(cbp.zone["per_cpu_pageset"], pba.cpu)
pcp_lists = pcp["lists"]
cbp.sections[1] = (
"per_cpu_pageset",
f"number of pages {cbp.indent.aux_hex(int(pcp['count']))}",
)
elif cbp.zone.type.has_field("pageset"):
pcp = per_cpu(cbp.zone["pageset"], pba.cpu)
pcp_lists = pcp["pcp"]["lists"]

@ -60,6 +60,8 @@ class Kconfig(UserDict): # type: ignore[type-arg]
self.data["CONFIG_MEMORY_ISOLATION"] = "y"
if self.CONFIG_KASAN:
self.data["CONFIG_KASAN"] = "y"
if self.CONFIG_SYSFS:
self.data["CONFIG_SYSFS"] = "y"
def get_key(self, name: str) -> str | None:
# First attempt to lookup the value assuming the user passed in a name
@ -161,6 +163,10 @@ class Kconfig(UserDict): # type: ignore[type-arg]
def CONFIG_MEMORY_ISOLATION(self) -> bool:
return pwndbg.aglib.symbol.lookup_symbol("start_isolate_page_range") is not None
@property
def CONFIG_SYSFS(self) -> bool:
return pwndbg.aglib.symbol.lookup_symbol("sysfs_kf_seq_show") is not None
def update_with_file(self, file_path):
for line in open(file_path, "r").read().splitlines():
split = line.split("=")

Loading…
Cancel
Save