mirror of https://github.com/pwndbg/pwndbg.git
You cannot select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
391 lines
14 KiB
Python
391 lines
14 KiB
Python
#!/usr/bin/env python
|
|
from __future__ import annotations
|
|
|
|
import json
|
|
import os
|
|
import sys
|
|
import textwrap
|
|
from typing import Dict
|
|
from typing import Tuple
|
|
|
|
from mdutils.mdutils import MdUtils
|
|
|
|
from scripts._docs.command_docs_common import BASE_PATH
|
|
from scripts._docs.command_docs_common import ExtractedCommand
|
|
from scripts._docs.command_docs_common import category_to_folder_name
|
|
from scripts._docs.command_docs_common import extracted_filename
|
|
from scripts._docs.gen_docs_generic import ALL_DEBUGGERS
|
|
from scripts._docs.gen_docs_generic import strip_ansi_color
|
|
from scripts._docs.gen_docs_generic import verify_existence
|
|
|
|
AUTOGEN_END_MARKER1 = "<!-- END OF AUTOGENERATED PART. Do not modify this line or the line below, they mark the end of the auto-generated part of the file. If you want to extend the documentation in a way which cannot easily be done by adding to the command help description, write below the following line. -->\n"
|
|
AUTOGEN_END_MARKER2 = "<!-- ------------\\>8---- ----\\>8---- ----\\>8------------ -->\n"
|
|
|
|
|
|
def get_markdown_body(cmd: ExtractedCommand) -> str:
|
|
mdFile = MdUtils(cmd.filename)
|
|
|
|
# usage
|
|
mdFile.insert_code(cmd.usage, language="text")
|
|
# description
|
|
mdFile.new_paragraph(cmd.description + "\n")
|
|
# aliases
|
|
if cmd.aliases:
|
|
alias_txt = "Alias" + ("es" if len(cmd.aliases) > 1 else "") + ":"
|
|
mdFile.write(f"\n**{alias_txt}** " + ", ".join(cmd.aliases) + "\n")
|
|
|
|
# positional arguments
|
|
if cmd.positionals:
|
|
positionals = [("Positional Argument", "Help")] + cmd.positionals
|
|
# flatten the list
|
|
positionals = [item for tup in positionals for item in tup]
|
|
positionals = [x.replace("\n", " ") for x in positionals]
|
|
|
|
mdFile.write("### Positional arguments\n")
|
|
mdFile.new_table(
|
|
columns=2,
|
|
rows=len(positionals) // 2,
|
|
text=positionals,
|
|
text_align="left",
|
|
)
|
|
mdFile.write("\n")
|
|
|
|
# optional arguments
|
|
if cmd.optionals:
|
|
optionals = [("Short", "Long", "Help")] + cmd.optionals
|
|
# flatten the list
|
|
optionals = [item for tup in optionals for item in tup]
|
|
optionals = [x.replace("\n", " ") for x in optionals]
|
|
|
|
mdFile.write("### Optional arguments\n")
|
|
mdFile.new_table(
|
|
columns=3,
|
|
rows=len(optionals) // 3,
|
|
text=optionals,
|
|
text_align="left",
|
|
)
|
|
mdFile.write("\n")
|
|
|
|
if cmd.examples:
|
|
# Put the examples into a code block so they are formatted sensically.
|
|
mdFile.write("### Examples\n```text\n" + cmd.examples + "\n```\n")
|
|
|
|
if cmd.notes:
|
|
# The author of the note should make sure it make sense in markdown.
|
|
mdFile.write("### Notes\n" + cmd.notes + "\n")
|
|
|
|
if cmd.pure_epilog:
|
|
mdFile.write("### Extra\n" + cmd.pure_epilog + "\n")
|
|
|
|
return "\n" + strip_ansi_color(mdFile.get_md_text().strip()) + "\n"
|
|
|
|
|
|
def convert_all_to_markdown(
|
|
extracted: list[Tuple[str, Dict[str, ExtractedCommand]]],
|
|
) -> Dict[str, str]:
|
|
result = {}
|
|
|
|
# Enumerate all the files we need to create.
|
|
all_filenames: set[str] = set()
|
|
for _, data in extracted:
|
|
for filename in data.keys():
|
|
all_filenames.add(filename)
|
|
|
|
# Generate markdown for those files.
|
|
for filename in all_filenames:
|
|
# Make a (debugger name, command) list in case some
|
|
# debuggers disagree on what some command should
|
|
# display. We won't add debuggers that don't have the
|
|
# command.
|
|
cmd_variants: list[Tuple[str, ExtractedCommand]] = []
|
|
|
|
for debugger, data in extracted:
|
|
if filename in data:
|
|
cmd_variants.append((debugger, data[filename]))
|
|
|
|
assert cmd_variants
|
|
|
|
# command title
|
|
markdown = f"# {cmd_variants[0][1].name}\n"
|
|
|
|
# Note about supported debuggers if the command isn't
|
|
# available everywhere.
|
|
if len(cmd_variants) != len(ALL_DEBUGGERS):
|
|
supported_list = ", ".join([x[0].upper() for x in cmd_variants])
|
|
markdown += '<small style="color: lightgray;">'
|
|
markdown += f"(only in {supported_list})"
|
|
markdown += "</small>\n"
|
|
|
|
debuggers_agree = all(x[1] == cmd_variants[0][1] for x in cmd_variants)
|
|
|
|
if debuggers_agree:
|
|
markdown += get_markdown_body(cmd_variants[0][1])
|
|
else:
|
|
for debugger, dcmd in sorted(cmd_variants):
|
|
# Content tabs
|
|
# https://squidfunk.github.io/mkdocs-material/reference/content-tabs/
|
|
markdown += f'\n=== "{debugger.upper()}"'
|
|
|
|
body = get_markdown_body(dcmd)
|
|
body = textwrap.indent(body, " ")
|
|
markdown += "\n\n " + body
|
|
|
|
autogen_warning = "<!-- THIS PART OF THIS FILE IS AUTOGENERATED. DO NOT MODIFY IT. See scripts/generate-docs.sh -->"
|
|
result[filename] = autogen_warning + "\n" + markdown
|
|
|
|
return result
|
|
|
|
|
|
def generate_index(
|
|
extracted: list[Tuple[str, Dict[str, ExtractedCommand]]],
|
|
) -> str:
|
|
# Make a dict of all commands.
|
|
all_cmds: Dict[str, ExtractedCommand] = {}
|
|
for _, data in extracted:
|
|
for filename in data.keys():
|
|
if filename not in all_cmds:
|
|
all_cmds[filename] = data[filename]
|
|
|
|
# Make a map from categories to those commands.
|
|
category_to_filename: Dict[str, list[str]] = {}
|
|
for filename, cmd in all_cmds.items():
|
|
if cmd.category not in category_to_filename:
|
|
category_to_filename[cmd.category] = []
|
|
category_to_filename[cmd.category].append(filename)
|
|
|
|
mdFile = MdUtils(os.path.join(BASE_PATH, "index.md"))
|
|
mdFile.new_header(level=1, title="Commands")
|
|
|
|
# Make sure we sort everything so the order is consistent.
|
|
for cat in sorted(category_to_filename.keys()):
|
|
mdFile.new_header(level=2, title=f"{cat}")
|
|
|
|
items = []
|
|
# Ditto.
|
|
for filename in sorted(category_to_filename[cat]):
|
|
cmd = all_cmds[filename]
|
|
name = cmd.name
|
|
short_desc = cmd.description.splitlines()[0]
|
|
folder = category_to_folder_name(cat)
|
|
items.append(f" [{name}]({folder}/{name}.md) - {short_desc}")
|
|
|
|
mdFile.new_list(items=items)
|
|
|
|
index_autogen_warning = (
|
|
"<!-- THIS FILE IS AUTOGENERATED. DO NOT EDIT IT. See scripts/generate-docs.sh -->\n"
|
|
)
|
|
return index_autogen_warning + mdFile.get_md_text()
|
|
|
|
|
|
def verify_files(filename_to_markdown: Dict[str, str]) -> str | None:
|
|
"""
|
|
Verify all the markdown files are up to date with the sources.
|
|
|
|
Returns:
|
|
None if everything is up-to-date.
|
|
A string containing the error message if something is not.
|
|
"""
|
|
|
|
for filename, markdown in filename_to_markdown.items():
|
|
print(f"Checking {filename} ..")
|
|
|
|
if not os.path.exists(filename):
|
|
return f"File {filename} does not exist."
|
|
|
|
file_data = ""
|
|
with open(filename, "r") as file:
|
|
file_data = file.readlines()
|
|
|
|
markdown = [x + "\n" for x in markdown.splitlines()]
|
|
mkdlen = len(markdown)
|
|
|
|
if len(file_data) < (mkdlen + 3):
|
|
return (
|
|
f"File {filename} is too short. Expected {mkdlen + 3} lines, got {len(file_data)}."
|
|
)
|
|
|
|
if not (
|
|
file_data[mkdlen + 1] == AUTOGEN_END_MARKER1
|
|
and file_data[mkdlen + 2] == AUTOGEN_END_MARKER2
|
|
):
|
|
return f'Expected autogenerated end markers in {filename} @ lines {mkdlen} and {mkdlen + 1}. Instead found "{file_data[mkdlen]}" and "{file_data[mkdlen + 1]}".'
|
|
|
|
for i in range(mkdlen):
|
|
if file_data[i] != markdown[i]:
|
|
return f'File {filename} differs from autogenerated on line {i}.\nFile: "{file_data[i]}".\nAutogenerated: "{markdown[i]}".'
|
|
|
|
return None
|
|
|
|
|
|
def update_files(filename_to_markdown: Dict[str, str]):
|
|
"""
|
|
Fix files so they are up to date with the sources. This also
|
|
creates new files/directories if needed.
|
|
"""
|
|
for filename, markdown in filename_to_markdown.items():
|
|
print(f"Updating {filename} ..")
|
|
|
|
if not os.path.exists(filename):
|
|
# Simple case, just create the file and write it.
|
|
os.makedirs(os.path.dirname(filename), exist_ok=True)
|
|
with open(filename, "w") as file:
|
|
file.write(markdown + "\n" + AUTOGEN_END_MARKER1 + AUTOGEN_END_MARKER2)
|
|
continue
|
|
|
|
# Need to find the marker in the file, and edit only above that part.
|
|
with open(filename, "r+") as file:
|
|
file_data = file.readlines()
|
|
marker_idx = -1
|
|
for i in reversed(range(len(file_data))):
|
|
if file_data[i] == AUTOGEN_END_MARKER2:
|
|
if i == 0 or file_data[i - 1] != AUTOGEN_END_MARKER1:
|
|
print(
|
|
f"ERROR: In file {filename} found the second autogen marker, but couldn't find the first ({AUTOGEN_END_MARKER1})."
|
|
)
|
|
sys.exit(6)
|
|
marker_idx = i - 1
|
|
break
|
|
|
|
if marker_idx == -1:
|
|
print(
|
|
f"ERROR: In file {filename} couldn't find autogen marker ({AUTOGEN_END_MARKER2})."
|
|
)
|
|
sys.exit(7)
|
|
|
|
handwritten_doc = "".join(file_data[marker_idx:]) # Includes the autogen markers
|
|
|
|
final = markdown + "\n" + handwritten_doc
|
|
file.seek(0)
|
|
file.write(final)
|
|
file.truncate()
|
|
|
|
|
|
def file_has_handwritten(filename: str) -> bool:
|
|
"""
|
|
Returns if a file has a hand-written part.
|
|
|
|
Also returns true if the autogen markers are malformed or
|
|
don't exist.
|
|
"""
|
|
with open(filename, "r+") as file:
|
|
file_data = file.readlines()
|
|
marker_idx = -1
|
|
for i in reversed(range(len(file_data))):
|
|
if file_data[i] == AUTOGEN_END_MARKER2:
|
|
if i == 0 or file_data[i - 1] != AUTOGEN_END_MARKER1:
|
|
return True
|
|
|
|
marker_idx = i - 1
|
|
break
|
|
|
|
if marker_idx == -1:
|
|
return True
|
|
|
|
if len(file_data) == marker_idx + 2:
|
|
# there is nothing after the markers
|
|
return False
|
|
|
|
handwritten_doc = "".join(file_data[marker_idx + 2 :])
|
|
if handwritten_doc.strip():
|
|
# There is some non-whitespace after the markers
|
|
return True
|
|
# There is only whitespace after the markers, we won't
|
|
# complain about this.
|
|
return False
|
|
|
|
|
|
def read_extracted() -> list[Tuple[str, Dict[str, ExtractedCommand]]]:
|
|
"""
|
|
Read json files from disk.
|
|
|
|
Returns:
|
|
A list of tuples of the form: (debugger name, filename-mapped
|
|
extracted commands for that debugger).
|
|
"""
|
|
result: list[Tuple[str, Dict[str, ExtractedCommand]]] = []
|
|
|
|
for debugger in ALL_DEBUGGERS:
|
|
filepath = extracted_filename(debugger)
|
|
print(f"Consuming {filepath}..")
|
|
|
|
with open(filepath, "r") as file:
|
|
raw_data = json.loads(file.read())
|
|
|
|
# Convert the dict objs to ExtractedCommands
|
|
data: Dict[str, ExtractedCommand] = {}
|
|
for filename, cmd_dict in raw_data.items():
|
|
data[filename] = ExtractedCommand(**cmd_dict)
|
|
|
|
result.append((debugger, data))
|
|
|
|
# We consumed the temporary file, we can delete it now.
|
|
os.remove(filepath)
|
|
|
|
return result
|
|
|
|
|
|
def main():
|
|
if len(sys.argv) > 1:
|
|
print("This script doesn't accept any arguments.")
|
|
print("See top of the file for usage.")
|
|
sys.exit(1)
|
|
|
|
just_verify = False
|
|
if os.getenv("PWNDBG_DOCGEN_VERIFY"):
|
|
just_verify = True
|
|
|
|
print("\n==== Command Documentation ====")
|
|
|
|
extracted = read_extracted()
|
|
markdowned = convert_all_to_markdown(extracted)
|
|
markdowned[os.path.join(BASE_PATH, "index.md")] = generate_index(extracted)
|
|
|
|
if just_verify:
|
|
print("Checking if all files are in place..")
|
|
missing, extra = verify_existence(list(markdowned.keys()), BASE_PATH)
|
|
if missing or extra:
|
|
print("To fix this please run ./scripts/generate-docs.sh.")
|
|
sys.exit(2)
|
|
print("Every file is where it should be!")
|
|
|
|
print("Verifying contents...")
|
|
err = verify_files(markdowned)
|
|
if err:
|
|
print("VERIFICATION FAILED. The files differ from what would be auto-generated.")
|
|
print("Error:", err)
|
|
print("Please run ./scripts/generate-docs.sh from project root and commit the changes.")
|
|
sys.exit(3)
|
|
|
|
print("Verification successful!")
|
|
else:
|
|
print("Updating files...")
|
|
update_files(markdowned)
|
|
print("Update successful.")
|
|
|
|
missing, extra = verify_existence(list(markdowned.keys()), BASE_PATH)
|
|
assert not missing and "Some files are missing, which should be impossible."
|
|
if extra:
|
|
print("Take care! Deleting these extra files:")
|
|
not_deleted = []
|
|
for e in extra:
|
|
if file_has_handwritten(e):
|
|
not_deleted.append(e)
|
|
else:
|
|
print(e)
|
|
os.remove(e)
|
|
|
|
if not_deleted:
|
|
print("\nSome files were not auto-deleted as they contain a hand-written part")
|
|
print("(or the markers for the hand-written part are malformed). Please delete")
|
|
print("them manually, probably after transferring the hand-written part to a")
|
|
print("new file.")
|
|
print(f"Files ({len(not_deleted)}):")
|
|
print("\n".join(not_deleted))
|
|
exit(18)
|
|
else:
|
|
print("Deleted successfully.")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|