nixos-render-docs: add support for figures

This commit is contained in:
pennae 2023-06-21 18:55:13 +02:00
parent e5e738b72a
commit 8fb4cf8b7c
4 changed files with 73 additions and 20 deletions

View File

@ -237,6 +237,26 @@ class HTMLRenderer(Renderer):
f'<img src="{escape(src, True)}" {alt} {title} />'
'</div>'
)
def figure_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
if anchor := cast(str, token.attrs.get('id', '')):
anchor = f'<a id="{escape(anchor, True)}"></a>'
return f'<div class="figure">{anchor}'
def figure_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return (
' </div>'
'</div><br class="figure-break" />'
)
def figure_title_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return (
'<p class="title">'
' <strong>'
)
def figure_title_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return (
' </strong>'
'</p>'
'<div class="figure-contents">'
)
def _make_hN(self, level: int) -> tuple[str, str]:
return f"h{min(6, max(1, level + self._hlevel_offset))}", ""

View File

@ -426,6 +426,19 @@ class ManualHTMLRenderer(RendererMixin, HTMLRenderer):
if next_level:
result.append(f'<dd><dl>{"".join(next_level)}</dl></dd>')
return result
def build_list(kind: str, id: str, lst: Sequence[TocEntry]) -> str:
if not lst:
return ""
entries = [
f'<dt>{i}. <a href="{e.target.href()}">{e.target.toc_html}</a></dt>'
for i, e in enumerate(lst, start=1)
]
return (
f'<div class="{id}">'
f'<p><strong>List of {kind}</strong></p>'
f'<dl>{"".join(entries)}</dl>'
'</div>'
)
# we don't want to generate the "Title of Contents" header for sections,
# docbook doesn't and it's only distracting clutter unless it's the main table.
# we also want to generate tocs only for a top-level section (ie, one that is
@ -442,18 +455,8 @@ class ManualHTMLRenderer(RendererMixin, HTMLRenderer):
toc_depth = self._html_params.toc_depth
if not (items := walk_and_emit(toc, toc_depth)):
return ""
examples = ""
if toc.examples:
examples_entries = [
f'<dt>{i + 1}. <a href="{ex.target.href()}">{ex.target.toc_html}</a></dt>'
for i, ex in enumerate(toc.examples)
]
examples = (
'<div class="list-of-examples">'
'<p><strong>List of Examples</strong></p>'
f'<dl>{"".join(examples_entries)}</dl>'
'</div>'
)
figures = build_list("Figures", "list-of-figures", toc.figures)
examples = build_list("Examples", "list-of-examples", toc.examples)
return "".join([
f'<div class="toc">',
' <p><strong>Table of Contents</strong></p>' if print_title else "",
@ -461,6 +464,7 @@ class ManualHTMLRenderer(RendererMixin, HTMLRenderer):
f' {"".join(items)}'
f' </dl>'
f'</div>'
f'{figures}'
f'{examples}'
])
@ -612,6 +616,8 @@ class HTMLConverter(BaseConverter[ManualHTMLRenderer]):
result += self._collect_ids(sub, sub_file, subtyp, si == 0 and sub_file != target_file)
elif bt.type == 'example_open' and (id := cast(str, bt.attrs.get('id', ''))):
result.append((id, 'example', tokens[i + 2], target_file, False))
elif bt.type == 'figure_open' and (id := cast(str, bt.attrs.get('id', ''))):
result.append((id, 'figure', tokens[i + 2], target_file, False))
elif bt.type == 'inline':
assert bt.children
result += self._collect_ids(bt.children, target_file, typ, False)
@ -636,8 +642,8 @@ class HTMLConverter(BaseConverter[ManualHTMLRenderer]):
title = prefix + title_html
toc_html = f"{n}. {title_html}"
title_html = f"Appendix&nbsp;{n}"
elif typ == 'example':
# skip the prepended `Example N. ` from numbering
elif typ in ['example', 'figure']:
# skip the prepended `{Example,Figure} N. ` from numbering
toc_html, title = self._renderer.renderInline(inlines.children[2:]), title_html
# xref title wants only the prepended text, sans the trailing colon and space
title_html = self._renderer.renderInline(inlines.children[0:1])
@ -653,6 +659,7 @@ class HTMLConverter(BaseConverter[ManualHTMLRenderer]):
def _postprocess(self, infile: Path, outfile: Path, tokens: Sequence[Token]) -> None:
self._number_block('example', "Example", tokens)
self._number_block('figure', "Figure", tokens)
xref_queue = self._collect_ids(tokens, outfile.name, 'book', True)
failed = False

View File

@ -14,7 +14,7 @@ from .utils import Freezeable
FragmentType = Literal['preface', 'part', 'chapter', 'section', 'appendix']
# in the TOC all fragments are allowed, plus the all-encompassing book.
TocEntryType = Literal['book', 'preface', 'part', 'chapter', 'section', 'appendix', 'example']
TocEntryType = Literal['book', 'preface', 'part', 'chapter', 'section', 'appendix', 'example', 'figure']
def is_include(token: Token) -> bool:
return token.type == "fence" and token.info.startswith("{=include=} ")
@ -128,6 +128,7 @@ class TocEntry(Freezeable):
children: list[TocEntry] = dc.field(default_factory=list)
starts_new_chunk: bool = False
examples: list[TocEntry] = dc.field(default_factory=list)
figures: list[TocEntry] = dc.field(default_factory=list)
@property
def root(self) -> TocEntry:
@ -142,7 +143,7 @@ class TocEntry(Freezeable):
@classmethod
def collect_and_link(cls, xrefs: dict[str, XrefTarget], tokens: Sequence[Token]) -> TocEntry:
entries, examples = cls._collect_entries(xrefs, tokens, 'book')
entries, examples, figures = cls._collect_entries(xrefs, tokens, 'book')
def flatten_with_parent(this: TocEntry, parent: TocEntry | None) -> Iterable[TocEntry]:
this.parent = parent
@ -160,6 +161,7 @@ class TocEntry(Freezeable):
paths_seen.add(c.target.path)
flat[0].examples = examples
flat[0].figures = figures
for c in flat:
c.freeze()
@ -168,21 +170,23 @@ class TocEntry(Freezeable):
@classmethod
def _collect_entries(cls, xrefs: dict[str, XrefTarget], tokens: Sequence[Token],
kind: TocEntryType) -> tuple[TocEntry, list[TocEntry]]:
kind: TocEntryType) -> tuple[TocEntry, list[TocEntry], list[TocEntry]]:
# we assume that check_structure has been run recursively over the entire input.
# list contains (tag, entry) pairs that will collapse to a single entry for
# the full sequence.
entries: list[tuple[str, TocEntry]] = []
examples: list[TocEntry] = []
figures: list[TocEntry] = []
for token in tokens:
if token.type.startswith('included_') and (included := token.meta.get('included')):
fragment_type_str = token.type[9:].removesuffix('s')
assert fragment_type_str in get_args(TocEntryType)
fragment_type = cast(TocEntryType, fragment_type_str)
for fragment, _path in included:
subentries, subexamples = cls._collect_entries(xrefs, fragment, fragment_type)
subentries, subexamples, subfigures = cls._collect_entries(xrefs, fragment, fragment_type)
entries[-1][1].children.append(subentries)
examples += subexamples
figures += subfigures
elif token.type == 'heading_open' and (id := cast(str, token.attrs.get('id', ''))):
while len(entries) > 1 and entries[-1][0] >= token.tag:
entries[-2][1].children.append(entries.pop()[1])
@ -191,7 +195,9 @@ class TocEntry(Freezeable):
token.meta['TocEntry'] = entries[-1][1]
elif token.type == 'example_open' and (id := cast(str, token.attrs.get('id', ''))):
examples.append(TocEntry('example', xrefs[id]))
elif token.type == 'figure_open' and (id := cast(str, token.attrs.get('id', ''))):
figures.append(TocEntry('figure', xrefs[id]))
while len(entries) > 1:
entries[-2][1].children.append(entries.pop()[1])
return (entries[0][1], examples)
return (entries[0][1], examples, figures)

View File

@ -40,7 +40,7 @@ def md_make_code(code: str, info: str = "", multiline: Optional[bool] = None) ->
ticks, sep = ('`' * (longest + (3 if multiline else 1)), '\n' if multiline else ' ')
return f"{ticks}{info}{sep}{code}{sep}{ticks}"
AttrBlockKind = Literal['admonition', 'example']
AttrBlockKind = Literal['admonition', 'example', 'figure']
AdmonitionKind = Literal["note", "caution", "tip", "important", "warning"]
@ -91,6 +91,10 @@ class Renderer:
"example_title_open": self.example_title_open,
"example_title_close": self.example_title_close,
"image": self.image,
"figure_open": self.figure_open,
"figure_close": self.figure_close,
"figure_title_open": self.figure_title_open,
"figure_title_close": self.figure_title_close,
}
self._admonitions = {
@ -228,6 +232,14 @@ class Renderer:
raise RuntimeError("md token not supported", token)
def image(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def figure_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def figure_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def figure_title_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def figure_title_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def _is_escaped(src: str, pos: int) -> bool:
found = 0
@ -270,6 +282,8 @@ def _parse_blockattrs(info: str) -> Optional[tuple[AttrBlockKind, Optional[str],
return ('admonition', id, classes)
if classes == ['example']:
return ('example', id, classes)
elif classes == ['figure']:
return ('figure', id, classes)
return None
def _attr_span_plugin(md: markdown_it.MarkdownIt) -> None:
@ -419,6 +433,11 @@ def _block_attr(md: markdown_it.MarkdownIt) -> None:
if id is not None:
token.attrs['id'] = id
stack.append('example_close')
elif kind == 'figure':
token.type = 'figure_open'
if id is not None:
token.attrs['id'] = id
stack.append('figure_close')
else:
assert_never(kind)
elif token.type == 'container_blockattr_close':
@ -501,6 +520,7 @@ class Converter(ABC, Generic[TR]):
self._md.use(_compact_list_attr)
self._md.use(_block_attr)
self._md.use(_block_titles("example"))
self._md.use(_block_titles("figure"))
self._md.enable(["smartquotes", "replacements"])
def _parse(self, src: str) -> list[Token]: