back to Claude Sonnet 3.5 - Fill-in summary
Claude Sonnet 3.5 - Fill-in: sphinx
Failed to run pytests for test tests
ImportError while loading conftest '/testbed/tests/conftest.py'.
tests/conftest.py:15: in <module>
import sphinx.pycode
sphinx/pycode/__init__.py:11: in <module>
from sphinx.pycode.parser import Parser
sphinx/pycode/parser.py:15: in <module>
from sphinx.pycode.ast import unparse as ast_unparse
sphinx/pycode/ast.py:20: in <module>
class _UnparseVisitor(ast.NodeVisitor):
sphinx/pycode/ast.py:25: in _UnparseVisitor
locals()[f'visit_{_op.__name__}'] = _visit_op
E NameError: name '_visit_op' is not defined
Patch diff
diff --git a/sphinx/_cli/util/colour.py b/sphinx/_cli/util/colour.py
index 4ae735270..3b24eadc0 100644
--- a/sphinx/_cli/util/colour.py
+++ b/sphinx/_cli/util/colour.py
@@ -10,7 +10,13 @@ _COLOURING_DISABLED = True
def terminal_supports_colour() ->bool:
"""Return True if coloured terminal output is supported."""
- pass
+ if _COLOURING_DISABLED:
+ return False
+ if sys.platform == 'win32':
+ return True # Assuming colorama is properly initialized
+ if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty():
+ return True
+ return 'COLORTERM' in os.environ
if sys.platform == 'win32':
diff --git a/sphinx/_cli/util/errors.py b/sphinx/_cli/util/errors.py
index c3e0cc08b..470a6e43d 100644
--- a/sphinx/_cli/util/errors.py
+++ b/sphinx/_cli/util/errors.py
@@ -11,9 +11,17 @@ _ANSI_COLOUR_CODES: re.Pattern[str] = re.compile('\x1b.*?m')
def terminal_safe(s: str, /) ->str:
"""Safely encode a string for printing to the terminal."""
- pass
+ return _ANSI_COLOUR_CODES.sub('', s).encode('ascii', 'replace').decode('ascii')
def save_traceback(app: (Sphinx | None), exc: BaseException) ->str:
"""Save the given exception's traceback in a temporary file."""
- pass
+ import traceback
+
+ with tempfile.NamedTemporaryFile('w', delete=False, suffix='.log') as f:
+ traceback.print_exc(file=f)
+ if isinstance(exc, SphinxParallelError):
+ for cause in exc.causes:
+ f.write('\n')
+ traceback.print_exception(*cause, file=f)
+ return f.name
diff --git a/sphinx/addnodes.py b/sphinx/addnodes.py
index a277c8408..2ce6410c9 100644
--- a/sphinx/addnodes.py
+++ b/sphinx/addnodes.py
@@ -34,21 +34,27 @@ class translatable(nodes.Node):
Because they are used at final step; extraction.
"""
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.original_messages = []
+
def preserve_original_messages(self) ->None:
"""Preserve original translatable messages."""
- pass
+ self.original_messages = [child.astext() for child in self.children if isinstance(child, nodes.TextElement)]
def apply_translated_message(self, original_message: str,
translated_message: str) ->None:
"""Apply translated message."""
- pass
+ for i, child in enumerate(self.children):
+ if isinstance(child, nodes.TextElement) and child.astext() == original_message:
+ self.children[i] = nodes.Text(translated_message)
def extract_original_messages(self) ->Sequence[str]:
"""Extract translation messages.
:returns: list of extracted messages or messages generator
"""
- pass
+ return self.original_messages
class not_smartquotable:
diff --git a/sphinx/application.py b/sphinx/application.py
index c0830a77e..b9387588a 100644
--- a/sphinx/application.py
+++ b/sphinx/application.py
@@ -206,29 +206,54 @@ class Sphinx:
self._init_builder()
@property
- def fresh_env_used(self) ->(bool | None):
+ @property
+ def fresh_env_used(self) -> bool | None:
"""True/False as to whether a new environment was created for this build,
or None if the environment has not been initialised yet.
"""
- pass
+ return self._fresh_env_used
- def _init_i18n(self) ->None:
+ def _init_i18n(self) -> None:
"""Load translated strings from the configured localedirs if enabled in
the configuration.
"""
- pass
+ if self.config.language is not None:
+ self.translator = locale.init([self.confdir], self.config.language)
+ else:
+ self.translator = locale.NullTranslations()
- def setup_extension(self, extname: str) ->None:
+ def setup_extension(self, extname: str) -> None:
"""Import and setup a Sphinx extension module.
Load the extension given by the module *name*. Use this if your
extension needs the features provided by another extension. No-op if
called twice.
"""
- pass
+ if extname in self.extensions:
+ return
+ try:
+ mod = __import__(extname, None, None, ['setup'])
+ except ImportError as err:
+ logger.warning(__('Failed to import extension %s'), extname)
+ logger.warning(__('The error was: %s'), err)
+ return
+ if not hasattr(mod, 'setup'):
+ logger.warning(__('Extension %s has no setup() function; is it really '
+ 'a Sphinx extension module?'), extname)
+ return
+ if extname in self.extensions:
+ return
+
+ try:
+ mod.setup(self)
+ except Exception as err:
+ logger.error(__('Failed to setup extension: %s'), extname)
+ logger.error(__('The error was: %s'), err)
+ else:
+ self.extensions[extname] = mod
@staticmethod
- def require_sphinx(version: (tuple[int, int] | str)) ->None:
+ def require_sphinx(version: tuple[int, int] | str) -> None:
"""Check the Sphinx version if requested.
Compare *version* with the version of the running Sphinx, and abort the
@@ -241,9 +266,17 @@ class Sphinx:
.. versionchanged:: 7.1
Type of *version* now allows ``(major, minor)`` form.
"""
- pass
+ if isinstance(version, str):
+ version = tuple(int(x) for x in version.split('.'))
+ elif not isinstance(version, tuple):
+ raise TypeError('version must be a string or a tuple')
+
+ if version > sphinx.__display_version__[:2]:
+ raise VersionRequirementError(
+ __('This project needs at least Sphinx v%s and therefore cannot '
+ 'be built with this version.') % '.'.join(map(str, version)))
- def connect(self, event: str, callback: Callable, priority: int=500) ->int:
+ def connect(self, event: str, callback: Callable, priority: int = 500) -> int:
"""Register *callback* to be called when *event* is emitted.
For details on available core events and the arguments of callback
@@ -259,17 +292,16 @@ class Sphinx:
Support *priority*
"""
- pass
+ return self.events.connect(event, callback, priority)
- def disconnect(self, listener_id: int) ->None:
+ def disconnect(self, listener_id: int) -> None:
"""Unregister callback by *listener_id*.
:param listener_id: A listener_id that :meth:`connect` returns
"""
- pass
+ self.events.disconnect(listener_id)
- def emit(self, event: str, *args: Any, allowed_exceptions: tuple[type[
- Exception], ...]=()) ->list:
+ def emit(self, event: str, *args: Any, allowed_exceptions: tuple[type[Exception], ...] = ()) -> list:
"""Emit *event* and pass *arguments* to the callback functions.
Return the return values of all callbacks as a list. Do not emit core
@@ -283,10 +315,9 @@ class Sphinx:
Added *allowed_exceptions* to specify path-through exceptions
"""
- pass
+ return self.events.emit(event, *args, allowed_exceptions=allowed_exceptions)
- def emit_firstresult(self, event: str, *args: Any, allowed_exceptions:
- tuple[type[Exception], ...]=()) ->Any:
+ def emit_firstresult(self, event: str, *args: Any, allowed_exceptions: tuple[type[Exception], ...] = ()) -> Any:
"""Emit *event* and pass *arguments* to the callback functions.
Return the result of the first callback that doesn't return ``None``.
@@ -300,7 +331,7 @@ class Sphinx:
Added *allowed_exceptions* to specify path-through exceptions
"""
- pass
+ return self.events.emit_firstresult(event, *args, allowed_exceptions=allowed_exceptions)
def add_builder(self, builder: type[Builder], override: bool=False) ->None:
"""Register a new builder.
diff --git a/sphinx/builders/_epub_base.py b/sphinx/builders/_epub_base.py
index aa543c3e3..972f8461f 100644
--- a/sphinx/builders/_epub_base.py
+++ b/sphinx/builders/_epub_base.py
@@ -99,27 +99,55 @@ class EpubBuilder(StandaloneHTMLBuilder):
def make_id(self, name: str) ->str:
"""Return a unique id for name."""
- pass
+ return self.make_id_from_string(name)
def get_refnodes(self, doctree: Node, result: list[dict[str, Any]]) ->list[
dict[str, Any]]:
"""Collect section titles, their depth in the toc and the refuri."""
- pass
+ for node in doctree.traverse(nodes.section):
+ nodetext = node.astext().strip()
+ if not nodetext:
+ continue
+ result.append({
+ 'level': int(node['ids'][0].split('-')[1]),
+ 'refuri': node['ids'][0],
+ 'title': nodetext,
+ })
+ return result
def get_toc(self) ->None:
"""Get the total table of contents, containing the root_doc
and pre and post files not managed by sphinx.
"""
- pass
+ doctree = self.env.get_and_resolve_doctree(self.config.root_doc, self)
+ refnodes = self.get_refnodes(doctree, [])
+ self.toc_add_files(refnodes)
+ self.toc = refnodes
def toc_add_files(self, refnodes: list[dict[str, Any]]) ->None:
"""Add the root_doc, pre and post files to a list of refnodes.
"""
- pass
+ refnodes.insert(0, {
+ 'level': 1,
+ 'refuri': self.config.root_doc,
+ 'title': self.config.project,
+ })
+ for docname in self.config.epub_pre_files:
+ refnodes.append({
+ 'level': 1,
+ 'refuri': docname,
+ 'title': self.env.titles[docname].astext(),
+ })
+ for docname in self.config.epub_post_files:
+ refnodes.append({
+ 'level': 1,
+ 'refuri': docname,
+ 'title': self.env.titles[docname].astext(),
+ })
def fix_fragment(self, prefix: str, fragment: str) ->str:
"""Return a href/id attribute with colons replaced by hyphens."""
- pass
+ return prefix + fragment.replace(':', '-')
def fix_ids(self, tree: nodes.document) ->None:
"""Replace colons with hyphens in href and id attributes.
@@ -127,12 +155,38 @@ class EpubBuilder(StandaloneHTMLBuilder):
Some readers crash because they interpret the part as a
transport protocol specification.
"""
- pass
+ for node in tree.traverse(nodes.reference):
+ if 'refuri' in node:
+ m = self.refuri_re.match(node['refuri'])
+ if m:
+ node['refuri'] = self.fix_fragment(m.group(1), m.group(2))
+ if 'refid' in node:
+ node['refid'] = self.fix_fragment('', node['refid'])
+ for node in tree.traverse(addnodes.desc_signature):
+ ids = node.attributes['ids']
+ newids = []
+ for id in ids:
+ newids.append(self.fix_fragment('', id))
+ node.attributes['ids'] = newids
def add_visible_links(self, tree: nodes.document, show_urls: str='inline'
) ->None:
"""Add visible link targets for external links"""
- pass
+ for node in tree.traverse(nodes.reference):
+ uri = node.get('refuri', '')
+ if uri.startswith('http:') or uri.startswith('https:') or uri.startswith('ftp:'):
+ if uri == node.astext():
+ continue
+ idx = node.parent.index(node)
+ if show_urls == 'inline':
+ uri_node = nodes.inline(uri, ' (%s)' % uri, classes=['link-target'])
+ node.parent.insert(idx + 1, uri_node)
+ elif show_urls == 'footnote':
+ uri_node = nodes.footnote_reference('[#]_', refname=uri)
+ node.parent.insert(idx + 1, uri_node)
+ footnote = nodes.footnote('', nodes.paragraph('', uri))
+ footnote.insert(0, nodes.label('', '#'))
+ tree.append(footnote)
def write_doc(self, docname: str, doctree: nodes.document) ->None:
"""Write one document file.
@@ -140,29 +194,49 @@ class EpubBuilder(StandaloneHTMLBuilder):
This method is overwritten in order to fix fragment identifiers
and to add visible external links.
"""
- pass
+ self.fix_ids(doctree)
+ self.add_visible_links(doctree, self.config.epub_show_urls)
+ super().write_doc(docname, doctree)
def fix_genindex(self, tree: list[tuple[str, list[tuple[str, Any]]]]
) ->None:
"""Fix href attributes for genindex pages."""
- pass
+ for _, entries in tree:
+ for entry in entries:
+ if isinstance(entry, tuple):
+ for idx, href in enumerate(entry[1]):
+ if href:
+ entry[1][idx] = self.fix_fragment('', href)
def is_vector_graphics(self, filename: str) ->bool:
"""Does the filename extension indicate a vector graphic format?"""
- pass
+ return any(filename.endswith(ext) for ext in VECTOR_GRAPHICS_EXTENSIONS)
def copy_image_files_pil(self) ->None:
"""Copy images using Pillow, the Python Imaging Library.
The method tries to read and write the files with Pillow, converting
the format and resizing the image if necessary/possible.
"""
- pass
+ if not PILLOW_AVAILABLE:
+ return
+ for src in status_iterator(self.images, __('copying images... '), "brown",
+ len(self.images), self.app.verbosity):
+ dest = path.join(self.outdir, self.images[src])
+ try:
+ img = Image.open(path.join(self.srcdir, src))
+ img.save(dest)
+ except Exception as err:
+ logger.warning(__('cannot read image file %r: %s'), src, err)
+ copyfile(path.join(self.srcdir, src), dest)
def copy_image_files(self) ->None:
"""Copy image files to destination directory.
This overwritten method can use Pillow to convert image files.
"""
- pass
+ if PILLOW_AVAILABLE:
+ self.copy_image_files_pil()
+ else:
+ super().copy_image_files()
def handle_page(self, pagename: str, addctx: dict[str, Any],
templatename: str='page.html', outfilename: (str | None)=None,
@@ -172,15 +246,29 @@ class EpubBuilder(StandaloneHTMLBuilder):
This method is overwritten for genindex pages in order to fix href link
attributes.
"""
- pass
+ if pagename.startswith('genindex'):
+ self.fix_genindex(addctx['genindexentries'])
+ super().handle_page(pagename, addctx, templatename, outfilename, event_arg)
def build_mimetype(self) ->None:
"""Write the metainfo file mimetype."""
- pass
+ with open(path.join(self.outdir, 'mimetype'), 'w', encoding='ascii') as f:
+ f.write('application/epub+zip')
def build_container(self, outname: str='META-INF/container.xml') ->None:
"""Write the metainfo file META-INF/container.xml."""
- pass
+ container_path = path.join(self.outdir, 'META-INF')
+ ensuredir(container_path)
+ container = path.join(container_path, 'container.xml')
+ with open(container, 'w', encoding='utf-8') as f:
+ f.write('<?xml version="1.0" encoding="UTF-8"?>\n')
+ f.write('<container version="1.0"\n')
+ f.write(' xmlns="urn:oasis:names:tc:opendocument:xmlns:container">\n')
+ f.write(' <rootfiles>\n')
+ f.write(' <rootfile full-path="content.opf"\n')
+ f.write(' media-type="application/oebps-package+xml"/>\n')
+ f.write(' </rootfiles>\n')
+ f.write('</container>\n')
def content_metadata(self) ->dict[str, Any]:
"""Create a dictionary with all metadata for the content.opf
diff --git a/sphinx/builders/epub3.py b/sphinx/builders/epub3.py
index aecaf3dbc..c34f6a5bd 100644
--- a/sphinx/builders/epub3.py
+++ b/sphinx/builders/epub3.py
@@ -61,13 +61,27 @@ class Epub3Builder(_epub_base.EpubBuilder):
def handle_finish(self) ->None:
"""Create the metainfo files and finally the epub."""
- pass
+ self.get_toc()
+ self.build_mimetype()
+ self.build_container()
+ self.build_content()
+ self.build_navigation_doc()
+ self.build_toc()
+ self.build_epub()
def content_metadata(self) ->dict[str, Any]:
"""Create a dictionary with all metadata for the content.opf
file properly escaped.
"""
- pass
+ metadata = super().content_metadata()
+ metadata['epub_version'] = '3.0'
+ metadata['page_progression_direction'] = PAGE_PROGRESSION_DIRECTIONS.get(
+ self.config.epub_writing_mode, 'default')
+ metadata['ibook_scroll_axis'] = IBOOK_SCROLL_AXIS.get(
+ self.config.epub_writing_mode, 'default')
+ metadata['writing_mode'] = THEME_WRITING_MODES.get(
+ self.config.epub_writing_mode, 'horizontal-tb')
+ return metadata
def build_navlist(self, navnodes: list[dict[str, Any]]) ->list[NavPoint]:
"""Create the toc navigation structure.
@@ -79,20 +93,45 @@ class Epub3Builder(_epub_base.EpubBuilder):
The difference from build_navpoints method is templates which are used
when generating navigation documents.
"""
- pass
+ def build_navpoint(node: dict[str, Any]) ->NavPoint:
+ text = html.escape(node['text'])
+ refuri = node['refuri']
+ children = [build_navpoint(child) for child in node.get('children', [])]
+ return NavPoint(text=text, refuri=refuri, children=children)
- def navigation_doc_metadata(self, navlist: list[NavPoint]) ->dict[str, Any
- ]:
+ return [build_navpoint(node) for node in navnodes]
+
+ def navigation_doc_metadata(self, navlist: list[NavPoint]) ->dict[str, Any]:
"""Create a dictionary with all metadata for the nav.xhtml file
properly escaped.
"""
- pass
+ metadata = {}
+ metadata['lang'] = self.config.language
+ metadata['toc_locale'] = self.guide_titles['toc']
+ metadata['navlist'] = navlist
+ return metadata
def build_navigation_doc(self) ->None:
"""Write the metainfo file nav.xhtml."""
- pass
+ logger.info(__('writing nav.xhtml file...'))
+ navlist = self.build_navlist(self.refnodes)
+ metadata = self.navigation_doc_metadata(navlist)
+ navigation = self.render_template('nav.xhtml', metadata)
+ with open(path.join(self.outdir, 'nav.xhtml'), 'w', encoding='utf-8') as f:
+ f.write(navigation)
def convert_epub_css_files(app: Sphinx, config: Config) ->None:
"""Convert string styled epub_css_files to tuple styled one."""
- pass
+ epub_css_files = []
+ for entry in config.epub_css_files:
+ if isinstance(entry, str):
+ epub_css_files.append((entry, {}))
+ else:
+ try:
+ filename, attrs = entry
+ epub_css_files.append((filename, attrs))
+ except Exception:
+ logger.warning(__('invalid css_file: %r, ignored'), entry)
+ continue
+ config.epub_css_files = epub_css_files
diff --git a/sphinx/builders/gettext.py b/sphinx/builders/gettext.py
index 4b4b24de7..df6453c5e 100644
--- a/sphinx/builders/gettext.py
+++ b/sphinx/builders/gettext.py
@@ -113,7 +113,12 @@ ctime = time.strftime('%Y-%m-%d %H:%M%z', timestamp)
def _is_node_in_substitution_definition(node: nodes.Node) ->bool:
"""Check "node" to test if it is in a substitution definition."""
- pass
+ parent = node.parent
+ while parent:
+ if isinstance(parent, nodes.substitution_definition):
+ return True
+ parent = parent.parent
+ return False
class MessageCatalogBuilder(I18nBuilder):
diff --git a/sphinx/builders/html/_build_info.py b/sphinx/builders/html/_build_info.py
index 124c16e83..9c320213a 100644
--- a/sphinx/builders/html/_build_info.py
+++ b/sphinx/builders/html/_build_info.py
@@ -41,4 +41,17 @@ def _stable_hash(obj: Any) ->str:
We can't just use the md5 of str(obj) as the order of collections
may be random.
"""
- pass
+ def _hash_helper(o):
+ if isinstance(o, (str, int, float, bool, type(None))):
+ return str(o)
+ elif isinstance(o, (list, tuple)):
+ return '[' + ','.join(_hash_helper(i) for i in o) + ']'
+ elif isinstance(o, dict):
+ return '{' + ','.join(f'{_hash_helper(k)}:{_hash_helper(v)}' for k, v in sorted(o.items())) + '}'
+ elif isinstance(o, set):
+ return '{' + ','.join(_hash_helper(i) for i in sorted(o)) + '}'
+ else:
+ raise ValueError(f"Unhashable type: {type(o)}")
+
+ stable_str = _hash_helper(obj)
+ return hashlib.md5(stable_str.encode('utf-8')).hexdigest()
diff --git a/sphinx/builders/latex/nodes.py b/sphinx/builders/latex/nodes.py
index cdf9949a3..73a483b07 100644
--- a/sphinx/builders/latex/nodes.py
+++ b/sphinx/builders/latex/nodes.py
@@ -4,27 +4,39 @@ from docutils import nodes
class captioned_literal_block(nodes.container):
"""A node for a container of literal_block having a caption."""
- pass
+
+ def __init__(self, rawsource='', *children, **attributes):
+ super().__init__(rawsource, *children, **attributes)
+ self.caption = None
class footnotemark(nodes.Inline, nodes.Referential, nodes.TextElement):
"""A node represents ``\\footnotemark``."""
- pass
+
+ def __init__(self, rawsource='', text='', *children, **attributes):
+ super().__init__(rawsource, text, *children, **attributes)
-class footnotetext(nodes.General, nodes.BackLinkable, nodes.Element, nodes.
- Labeled, nodes.Targetable):
+class footnotetext(nodes.General, nodes.BackLinkable, nodes.Element, nodes.Labeled, nodes.Targetable):
"""A node represents ``\\footnotetext``."""
+
+ def __init__(self, rawsource='', *children, **attributes):
+ super().__init__(rawsource, *children, **attributes)
class math_reference(nodes.Inline, nodes.Referential, nodes.TextElement):
"""A node for a reference for equation."""
- pass
+
+ def __init__(self, rawsource='', text='', *children, **attributes):
+ super().__init__(rawsource, text, *children, **attributes)
+ self.equation_number = None
class thebibliography(nodes.container):
"""A node for wrapping bibliographies."""
- pass
+
+ def __init__(self, rawsource='', *children, **attributes):
+ super().__init__(rawsource, *children, **attributes)
HYPERLINK_SUPPORT_NODES = (nodes.figure, nodes.literal_block, nodes.table,
diff --git a/sphinx/builders/latex/theming.py b/sphinx/builders/latex/theming.py
index 671dc8ff2..7caa70864 100644
--- a/sphinx/builders/latex/theming.py
+++ b/sphinx/builders/latex/theming.py
@@ -88,12 +88,22 @@ class ThemeFactory:
def load_builtin_themes(self, config: Config) ->None:
"""Load built-in themes."""
- pass
+ for name in ('manual', 'howto'):
+ self.themes[name] = BuiltInTheme(name, config)
def get(self, name: str) ->Theme:
"""Get a theme for given *name*."""
- pass
+ if name not in self.themes:
+ theme = self.find_user_theme(name)
+ if theme is None:
+ raise ThemeError(__('LaTeX theme %r not found') % name)
+ self.themes[name] = theme
+ return self.themes[name]
def find_user_theme(self, name: str) ->(Theme | None):
"""Find a theme named as *name* from latex_theme_path."""
- pass
+ for theme_path in self.theme_paths:
+ theme_file = path.join(theme_path, name + '.ini')
+ if path.exists(theme_file):
+ return UserTheme(name, theme_file)
+ return None
diff --git a/sphinx/builders/latex/util.py b/sphinx/builders/latex/util.py
index b909a8426..b896b2e9e 100644
--- a/sphinx/builders/latex/util.py
+++ b/sphinx/builders/latex/util.py
@@ -13,6 +13,18 @@ class ExtBabel(Babel):
self.supported = True
super().__init__(language_code)
- def get_mainlanguage_options(self) ->(str | None):
+ def get_mainlanguage_options(self) -> (str | None):
"""Return options for polyglossia's ``\\setmainlanguage``."""
- pass
+ if not self.use_polyglossia:
+ return None
+
+ if self.language_code == 'english':
+ return 'variant=american'
+ elif self.language_code in self.cyrillic_languages:
+ return 'babelshorthands=true'
+ elif self.language_code == 'greek':
+ return 'variant=poly'
+ elif self.language_code == 'sanskrit':
+ return 'script=Devanagari'
+
+ return None
diff --git a/sphinx/builders/linkcheck.py b/sphinx/builders/linkcheck.py
index 14686b733..a54821f1f 100644
--- a/sphinx/builders/linkcheck.py
+++ b/sphinx/builders/linkcheck.py
@@ -64,7 +64,11 @@ class HyperlinkCollector(SphinxPostTransform):
:param node: A node class
:returns: URI of the node
"""
- pass
+ if isinstance(node, nodes.reference):
+ return node.get('refuri')
+ elif isinstance(node, nodes.image):
+ return node.get('uri')
+ return None
def _add_uri(self, uri: str, node: nodes.Element) ->None:
"""Registers a node's URI into a builder's collection of hyperlinks.
@@ -77,7 +81,17 @@ class HyperlinkCollector(SphinxPostTransform):
:param uri: URI to add
:param node: A node class where the URI was found
"""
- pass
+ if uri is None:
+ return
+
+ docname = self.env.docname
+ docpath = self.env.doc2path(docname)
+ lineno = get_node_line(node)
+
+ self.app.emit('linkcheck-process-uri', uri)
+
+ if uri not in self.builder.hyperlinks:
+ self.builder.hyperlinks[uri] = Hyperlink(uri, docname, docpath, lineno)
class Hyperlink(NamedTuple):
@@ -152,7 +166,9 @@ class HyperlinkAvailabilityCheckWorker(Thread):
def contains_anchor(response: Response, anchor: str) ->bool:
"""Determine if an anchor is contained within an HTTP response."""
- pass
+ parser = AnchorCheckParser(anchor)
+ parser.feed(response.text)
+ return parser.found
class AnchorCheckParser(HTMLParser):
@@ -175,9 +191,35 @@ def rewrite_github_anchor(app: Sphinx, uri: str) ->(str | None):
The hyperlink anchors in github.com are dynamically generated. This rewrites
them before checking and makes them comparable.
"""
- pass
+ parsed = urlparse(uri)
+ if parsed.hostname != 'github.com':
+ return None
+
+ path = parsed.path
+ fragment = unquote(parsed.fragment)
+ if not fragment:
+ return None
+
+ # rewrite anchor
+ if fragment.isdigit():
+ # for headings like: #L10
+ fragment = 'L' + fragment
+ elif fragment.startswith('L'):
+ # for headings like: #L10-L20
+ fragment = fragment.upper()
+ else:
+ # for headings like: #heading-1
+ fragment = fragment.lower()
+ fragment = re.sub(r'[^\w\- ]', '', fragment)
+ fragment = re.sub(r'\s+', '-', fragment)
+
+ result = urlunparse(parsed._replace(fragment=quote(fragment)))
+ return result
def compile_linkcheck_allowed_redirects(app: Sphinx, config: Config) ->None:
"""Compile patterns in linkcheck_allowed_redirects to the regexp objects."""
- pass
+ allowed_redirects = {}
+ for pattern, allowed_pattern in config.linkcheck_allowed_redirects.items():
+ allowed_redirects[re.compile(pattern)] = re.compile(allowed_pattern)
+ config.linkcheck_allowed_redirects = allowed_redirects
diff --git a/sphinx/builders/manpage.py b/sphinx/builders/manpage.py
index 20bdcc04a..f573e83fb 100644
--- a/sphinx/builders/manpage.py
+++ b/sphinx/builders/manpage.py
@@ -32,7 +32,8 @@ class ManualPageBuilder(Builder):
supported_image_types: list[str] = []
-def default_man_pages(config: Config) ->list[tuple[str, str, str, list[str],
- int]]:
+def default_man_pages(config: Config) -> list[tuple[str, str, str, list[str], int]]:
"""Better default man_pages settings."""
- pass
+ project_name = make_filename_from_project(config.project)
+ return [(project_name, project_name, config.project,
+ [config.author], 1)]
diff --git a/sphinx/builders/texinfo.py b/sphinx/builders/texinfo.py
index e7442b2f8..3aafb28db 100644
--- a/sphinx/builders/texinfo.py
+++ b/sphinx/builders/texinfo.py
@@ -49,4 +49,13 @@ Run 'make' in that directory to run these through makeinfo
def default_texinfo_documents(config: Config) ->list[tuple[str, str, str,
str, str, str, str]]:
"""Better default texinfo_documents settings."""
- pass
+ return [
+ (config.root_doc, # startdocname
+ make_filename_from_project(config.project), # targetname
+ config.project, # title
+ config.author, # author
+ config.project, # dir_entry
+ '', # description
+ config.release or config.version, # category
+ )
+ ]
diff --git a/sphinx/cmd/build.py b/sphinx/cmd/build.py
index a2aa5c5f4..403bea517 100644
--- a/sphinx/cmd/build.py
+++ b/sphinx/cmd/build.py
@@ -37,17 +37,57 @@ def jobs_argument(value: str) ->int:
be expanded to handle other special scaling requests, such as setting job count
to cpu_count.
"""
- pass
+ if value.lower() == 'auto':
+ return multiprocessing.cpu_count()
+ try:
+ jobs = int(value)
+ if jobs <= 0:
+ raise ValueError
+ return jobs
+ except ValueError:
+ raise argparse.ArgumentTypeError(f"'{value}' is not a positive integer or 'auto'")
def make_main(argv: Sequence[str]) ->int:
"""Sphinx build "make mode" entry."""
- pass
+ if not argv:
+ print("Usage: sphinx-build -M <builder> <sourcedir> <outputdir> [options]")
+ return 1
+
+ builder = argv[0]
+ sourcedir = argv[1] if len(argv) > 1 else '.'
+ outputdir = argv[2] if len(argv) > 2 else 'build'
+
+ try:
+ with docutils_namespace():
+ app = Sphinx(sourcedir, sourcedir, outputdir, outputdir, builder)
+ app.build()
+ return 0
+ except Exception as exc:
+ print(f"Error: {exc}")
+ return 1
def build_main(argv: Sequence[str]) ->int:
"""Sphinx build "main" command-line entry."""
- pass
+ parser = argparse.ArgumentParser(description="Sphinx documentation builder")
+ parser.add_argument('sourcedir', help="Source directory")
+ parser.add_argument('outputdir', help="Output directory")
+ parser.add_argument('-b', '--builder', default='html', help="Builder to use (default: html)")
+ parser.add_argument('-j', '--jobs', type=jobs_argument, default=1, help="Number of parallel jobs")
+ parser.add_argument('-v', '--verbose', action='store_true', help="Increase verbosity")
+
+ args = parser.parse_args(argv)
+
+ try:
+ with docutils_namespace():
+ app = Sphinx(args.sourcedir, args.sourcedir, args.outputdir, args.outputdir, args.builder,
+ parallel=args.jobs, verbosity=2 if args.verbose else 1)
+ app.build()
+ return 0
+ except Exception as exc:
+ print(f"Error: {exc}")
+ return 1
if __name__ == '__main__':
diff --git a/sphinx/cmd/quickstart.py b/sphinx/cmd/quickstart.py
index c9bb73784..64a16258a 100644
--- a/sphinx/cmd/quickstart.py
+++ b/sphinx/cmd/quickstart.py
@@ -67,7 +67,8 @@ class QuickstartRenderer(SphinxRenderer):
Note: Please don't use this function from extensions.
It will be removed in the future without deprecation period.
"""
- pass
+ custom_template = path.join(self.templatedir, template_name)
+ return path.exists(custom_template)
def ask_user(d: dict[str, Any]) ->None:
@@ -89,13 +90,127 @@ def ask_user(d: dict[str, Any]) ->None:
* makefile: make Makefile
* batchfile: make command file
"""
- pass
+ print(bold(__('Welcome to the Sphinx %s quickstart utility.')) % __display_version__)
+ print(__('''
+Please enter values for the following settings (just press Enter to
+accept a default value, if one is given in brackets).'''))
+
+ if 'path' not in d:
+ print()
+ d['path'] = do_prompt(__('Enter the root path for documentation.'), DEFAULTS['path'])
+ if 'sep' not in d:
+ print()
+ d['sep'] = do_prompt(__('Separate source and build directories (y/n)'), 'n') == 'y'
+ if 'project' not in d:
+ print()
+ d['project'] = do_prompt(__('Project name:'))
+ if 'author' not in d:
+ print()
+ d['author'] = do_prompt(__('Author name(s):'))
+ if 'version' not in d:
+ print()
+ d['version'] = do_prompt(__('Project version:'), '0.1')
+ if 'release' not in d:
+ print()
+ d['release'] = do_prompt(__('Project release:'), d['version'])
+ if 'language' not in d:
+ print()
+ d['language'] = do_prompt(__('Project language:'), DEFAULTS['language'])
+ if 'suffix' not in d:
+ print()
+ d['suffix'] = do_prompt(__('Source file suffix:'), DEFAULTS['suffix'])
+ if 'master' not in d:
+ print()
+ d['master'] = do_prompt(__('Name of your master document (without suffix):'), DEFAULTS['master'])
+ if 'extensions' not in d:
+ print()
+ print(__('Indicate which of the following Sphinx extensions should be enabled:'))
+ d['extensions'] = []
+ for name, description in EXTENSIONS.items():
+ if do_prompt('%s: %s (y/n)' % (name, description), 'n') == 'y':
+ d['extensions'].append(name)
+ if 'makefile' not in d:
+ print()
+ d['makefile'] = do_prompt(__('Create Makefile? (y/n)'), 'y') == 'y'
+ if 'batchfile' not in d:
+ print()
+ d['batchfile'] = do_prompt(__('Create Windows command file? (y/n)'), 'y') == 'y'
+
+ print()
def generate(d: dict[str, Any], overwrite: bool=True, silent: bool=False,
templatedir: (str | None)=None) ->None:
"""Generate project based on values in *d*."""
- pass
+ renderer = QuickstartRenderer(templatedir=templatedir)
+
+ if 'mastertoctree' not in d:
+ d['mastertoctree'] = ''
+ if 'mastertocmaxdepth' not in d:
+ d['mastertocmaxdepth'] = 2
+
+ d['now'] = time.asctime()
+ d['project_underline'] = column_width(d['project']) * '='
+ d['extensions'] = ', '.join(repr(ext) for ext in d.get('extensions', []))
+ d['copyright'] = time.strftime('%Y') + ', ' + d['author']
+
+ ensuredir(d['path'])
+
+ srcdir = d['sep'] and path.join(d['path'], 'source') or d['path']
+
+ ensuredir(srcdir)
+ if d['sep']:
+ builddir = path.join(d['path'], 'build')
+ d['exclude_patterns'] = ''
+ else:
+ builddir = path.join(srcdir, '_build')
+ exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
+ d['exclude_patterns'] = ', '.join(repr(p) for p in exclude_patterns)
+ ensuredir(builddir)
+ ensuredir(path.join(srcdir, '_templates'))
+ ensuredir(path.join(srcdir, '_static'))
+
+ def write_file(fpath: str, content: str, newline: str='\n') ->None:
+ if overwrite or not path.isfile(fpath):
+ print(__('Creating file %s.') % fpath)
+ with open(fpath, 'w', encoding='utf-8', newline=newline) as f:
+ f.write(content)
+ else:
+ print(__('File %s already exists, skipping.') % fpath)
+
+ conf_text = renderer.render('conf.py_t', d)
+ write_file(path.join(srcdir, 'conf.py'), conf_text)
+
+ masterfile = path.join(srcdir, d['master'] + d['suffix'])
+ write_file(masterfile, renderer.render('master_doc.rst_t', d))
+
+ if d.get('make_mode') is True:
+ makefile_template = 'Makefile.new_t'
+ batchfile_template = 'make.bat.new_t'
+ else:
+ makefile_template = 'Makefile_t'
+ batchfile_template = 'make.bat_t'
+
+ if d['makefile'] is True:
+ write_file(path.join(d['path'], 'Makefile'),
+ renderer.render(makefile_template, d))
+
+ if d['batchfile'] is True:
+ write_file(path.join(d['path'], 'make.bat'),
+ renderer.render(batchfile_template, d), '\r\n')
+
+ if silent:
+ return
+ print()
+ print(bold(__('Finished: An initial directory structure has been created.')))
+ print()
+ print(__('You should now populate your master file %s and create other documentation\n'
+ 'source files. ') % masterfile, end='')
+ print(__('Use the Makefile to build the docs, like so:\n'
+ ' make builder'))
+ print(__('where "builder" is one of the supported builders, e.g. html, latex '
+ 'or linkcheck.'))
+ print()
if __name__ == '__main__':
diff --git a/sphinx/config.py b/sphinx/config.py
index 035734a48..efb3b5d00 100644
--- a/sphinx/config.py
+++ b/sphinx/config.py
@@ -39,7 +39,21 @@ class ConfigValue(NamedTuple):
def is_serializable(obj: object, *, _seen: frozenset[int]=frozenset()) ->bool:
"""Check if an object is serializable or not."""
- pass
+ if id(obj) in _seen:
+ return False
+
+ new_seen = _seen | {id(obj)}
+
+ if isinstance(obj, (str, int, float, bool, type(None))):
+ return True
+ elif isinstance(obj, (list, tuple)):
+ return all(is_serializable(item, _seen=new_seen) for item in obj)
+ elif isinstance(obj, dict):
+ return all(is_serializable(key, _seen=new_seen) and is_serializable(value, _seen=new_seen) for key, value in obj.items())
+ elif isinstance(obj, set):
+ return all(is_serializable(item, _seen=new_seen) for item in obj)
+
+ return False
class ENUM:
@@ -229,7 +243,15 @@ class Config:
def read(cls: type[Config], confdir: (str | os.PathLike[str]),
overrides: (dict | None)=None, tags: (Tags | None)=None) ->Config:
"""Create a Config object from configuration file."""
- pass
+ conffile = path.join(confdir, CONFIG_FILENAME)
+ if not path.isfile(conffile):
+ raise ConfigError(__("config directory doesn't contain a conf.py file (%s)") % conffile)
+
+ with chdir(confdir):
+ namespace = eval_config_file(conffile, tags)
+
+ config = cls(namespace, overrides)
+ return config
def __repr__(self) ->str:
values = []
@@ -328,7 +350,15 @@ class Config:
def eval_config_file(filename: str, tags: (Tags | None)) ->dict[str, Any]:
"""Evaluate a config file."""
- pass
+ namespace: dict[str, Any] = {}
+ with open(filename, 'rb') as f:
+ code = compile(f.read(), filename, 'exec')
+ exec(code, namespace)
+
+ if tags is not None:
+ namespace['tags'] = tags
+
+ return {k: v for k, v in namespace.items() if not k.startswith('__')}
def convert_source_suffix(app: Sphinx, config: Config) ->None:
@@ -337,7 +367,15 @@ def convert_source_suffix(app: Sphinx, config: Config) ->None:
* old style: str or list
* new style: a dict which maps from fileext to filetype
"""
- pass
+ source_suffix = config.source_suffix
+ if isinstance(source_suffix, str):
+ config.source_suffix = {source_suffix: 'restructuredtext'}
+ elif isinstance(source_suffix, (list, tuple)):
+ config.source_suffix = {suffix: 'restructuredtext' for suffix in source_suffix}
+ elif not isinstance(source_suffix, dict):
+ logger.warning(__('The source_suffix config value should be either '
+ 'a string, list, or dictionary. But now it is %r'),
+ type(source_suffix))
def convert_highlight_options(app: Sphinx, config: Config) ->None:
@@ -346,12 +384,23 @@ def convert_highlight_options(app: Sphinx, config: Config) ->None:
* old style: options
* new style: a dict which maps from language name to options
"""
- pass
+ highlight_options = config.highlight_options
+ if isinstance(highlight_options, dict):
+ return
+
+ config.highlight_options = {config.highlight_language: highlight_options}
def init_numfig_format(app: Sphinx, config: Config) ->None:
"""Initialize :confval:`numfig_format`."""
- pass
+ numfig_format = {'section': _('Section %s'),
+ 'figure': _('Fig. %s'),
+ 'table': _('Table %s'),
+ 'code-block': _('Listing %s')}
+
+ # override default labels by configuration
+ numfig_format.update(config.numfig_format)
+ config.numfig_format = numfig_format
def correct_copyright_year(_app: Sphinx, config: Config) ->None:
@@ -376,7 +425,16 @@ def _substitute_copyright_year(copyright_line: str, replace_year: str) ->str:
The final year in the string is replaced with ``replace_year``.
"""
- pass
+ import re
+
+ year_pattern = r'(\d{4})(?:-\d{4})?(?:[,\s]|$)'
+ match = re.search(year_pattern, copyright_line)
+
+ if match:
+ start, end = match.span()
+ return copyright_line[:start] + replace_year + copyright_line[end:]
+
+ return copyright_line
def check_confval_types(app: (Sphinx | None), config: Config) ->None:
@@ -391,4 +449,10 @@ def check_root_doc(app: Sphinx, env: BuildEnvironment, added: Set[str],
"""Adjust root_doc to 'contents' to support an old project which does not have
any root_doc setting.
"""
- pass
+ if app.config.root_doc == 'index' and 'index' not in env.found_docs:
+ if 'contents' in env.found_docs:
+ app.config.root_doc = "contents"
+ logger.warning(__('Since there is no index.rst file, changing '
+ 'root_doc to contents'))
+ return ['contents']
+ return []
diff --git a/sphinx/deprecation.py b/sphinx/deprecation.py
index ee68dfa87..ec212e615 100644
--- a/sphinx/deprecation.py
+++ b/sphinx/deprecation.py
@@ -51,4 +51,21 @@ def _deprecation_warning(module: str, attribute: str, canonical_name: str=
_deprecation_warning(__name__, name, canonical_name, remove=remove)
return deprecated_object
"""
- pass
+ major, minor = remove
+ version = f"{major}.{minor}"
+
+ if canonical_name:
+ message = f"{module}.{attribute} is deprecated and will be removed in Sphinx {version}. " \
+ f"Please use {canonical_name} instead."
+ else:
+ message = f"{module}.{attribute} is deprecated and will be removed in Sphinx {version}."
+
+ if raises:
+ raise AttributeError(message)
+ else:
+ if major == 9:
+ warnings.warn(message, RemovedInSphinx90Warning, stacklevel=3)
+ elif major == 10:
+ warnings.warn(message, RemovedInSphinx10Warning, stacklevel=3)
+ else:
+ warnings.warn(message, DeprecationWarning, stacklevel=3)
diff --git a/sphinx/directives/other.py b/sphinx/directives/other.py
index 1a6198deb..5c6df965b 100644
--- a/sphinx/directives/other.py
+++ b/sphinx/directives/other.py
@@ -44,7 +44,40 @@ class TocTree(SphinxDirective):
"""
Populate ``toctree['entries']`` and ``toctree['includefiles']`` from content.
"""
- pass
+ suffixes = self.config.source_suffix
+ glob = 'glob' in self.options
+ entries = []
+ includefiles = []
+
+ for entry in self.content:
+ if not entry:
+ continue
+ # look for explicit titles ("Some Title <document>")
+ explicit = explicit_title_re.match(entry)
+ if explicit:
+ ref = explicit.group(2)
+ title = explicit.group(1)
+ else:
+ ref = entry
+ title = None
+
+ if glob and glob_re.match(ref):
+ patname = ref
+ docnames = sorted(self.env.found_docs)
+ for docname in patfilter(docnames, patname):
+ entries.append((title, docname))
+ includefiles.append(docname)
+ else:
+ docname = ref
+ if docname not in self.env.found_docs:
+ logger.warning(__('toctree contains reference to nonexisting document %r'), docname)
+ self.env.note_reread()
+ else:
+ entries.append((title, docname))
+ includefiles.append(docname)
+
+ toctree['entries'] = entries
+ toctree['includefiles'] = includefiles
class Author(SphinxDirective):
@@ -58,6 +91,16 @@ class Author(SphinxDirective):
final_argument_whitespace = True
option_spec: ClassVar[OptionSpec] = {}
+ def run(self) -> list[Node]:
+ if not self.config.show_authors:
+ return []
+ para = nodes.paragraph(translatable=False)
+ emph = nodes.emphasis()
+ para += emph
+ text = self.arguments[0]
+ emph += nodes.Text(text, text)
+ return [para]
+
class SeeAlso(BaseAdmonition):
"""
@@ -76,6 +119,12 @@ class TabularColumns(SphinxDirective):
final_argument_whitespace = True
option_spec: ClassVar[OptionSpec] = {}
+ def run(self) -> list[Node]:
+ node = addnodes.tabular_col_spec()
+ node['spec'] = self.arguments[0]
+ set_source_info(self, node)
+ return [node]
+
class Centered(SphinxDirective):
"""
@@ -87,6 +136,13 @@ class Centered(SphinxDirective):
final_argument_whitespace = True
option_spec: ClassVar[OptionSpec] = {}
+ def run(self) -> list[Node]:
+ text = self.arguments[0]
+ para = nodes.paragraph(text, classes=['centered'])
+ strong = nodes.strong(text, text)
+ para += strong
+ return [para]
+
class Acks(SphinxDirective):
"""
@@ -98,6 +154,16 @@ class Acks(SphinxDirective):
final_argument_whitespace = False
option_spec: ClassVar[OptionSpec] = {}
+ def run(self) -> list[Node]:
+ node = addnodes.acks()
+ node.document = self.state.document
+ self.state.nested_parse(self.content, self.content_offset, node)
+ if len(node.children) != 1 or not isinstance(node.children[0], nodes.bullet_list):
+ return [self.state.document.reporter.warning(
+ 'Acknowledgement list must contain a single bullet list',
+ line=self.lineno)]
+ return [node]
+
class HList(SphinxDirective):
"""
@@ -109,6 +175,23 @@ class HList(SphinxDirective):
final_argument_whitespace = False
option_spec: ClassVar[OptionSpec] = {'columns': int}
+ def run(self) -> list[Node]:
+ ncolumns = self.options.get('columns', 2)
+ node = nodes.paragraph()
+ node.document = self.state.document
+ self.state.nested_parse(self.content, self.content_offset, node)
+ if len(node.children) != 1 or not isinstance(node.children[0], nodes.bullet_list):
+ return [self.state.document.reporter.warning(
+ 'hlist content is not a single bullet list',
+ line=self.lineno)]
+ bullet_list = node.children[0]
+ list_items = bullet_list.children
+ for i, item in enumerate(list_items):
+ item['classes'].append('hlist-item')
+ item['classes'].append(f'hlist-col-{i % ncolumns + 1}')
+ bullet_list['classes'].append('hlist')
+ return [bullet_list]
+
class Only(SphinxDirective):
"""
@@ -120,6 +203,15 @@ class Only(SphinxDirective):
final_argument_whitespace = True
option_spec: ClassVar[OptionSpec] = {}
+ def run(self) -> list[Node]:
+ node = addnodes.only()
+ node.document = self.state.document
+ set_source_info(self, node)
+ node['expr'] = self.arguments[0]
+ self.state.nested_parse(self.content, self.content_offset, node,
+ match_titles=True)
+ return [node]
+
class Include(BaseInclude, SphinxDirective):
"""
diff --git a/sphinx/domains/_index.py b/sphinx/domains/_index.py
index ef50b53cd..cba997097 100644
--- a/sphinx/domains/_index.py
+++ b/sphinx/domains/_index.py
@@ -104,4 +104,4 @@ class Index(ABC):
Qualifier and description are not rendered for some output formats such
as LaTeX.
"""
- pass
+ raise NotImplementedError("Subclasses must implement the generate method")
diff --git a/sphinx/domains/c/_parser.py b/sphinx/domains/c/_parser.py
index c92013687..4e0a2d164 100644
--- a/sphinx/domains/c/_parser.py
+++ b/sphinx/domains/c/_parser.py
@@ -13,7 +13,31 @@ class DefinitionParser(BaseParser):
def _parse_decl_specs_simple(self, outer: (str | None), typed: bool
) ->ASTDeclSpecsSimple:
"""Just parse the simple ones."""
- pass
+ specs = []
+ while True:
+ if len(self.tokens) == 0:
+ break
+ token = self.tokens[0]
+ if token.kind == 'keyword':
+ if token.value in _simple_type_specifiers_re:
+ specs.append(token.value)
+ self.tokens.pop(0)
+ else:
+ break
+ elif token.kind == 'identifier':
+ if _simple_type_specifiers_re.match(token.value):
+ specs.append(token.value)
+ self.tokens.pop(0)
+ else:
+ break
+ else:
+ break
+ if len(specs) == 0:
+ if typed:
+ self.fail("Expected type specifier in declaration.")
+ else:
+ return None
+ return ASTDeclSpecsSimple(specs)
def _parse_type(self, named: (bool | str), outer: (str | None)=None
) ->ASTType:
@@ -21,4 +45,13 @@ class DefinitionParser(BaseParser):
named=False|'single'|True: 'single' is e.g., for function objects which
doesn't need to name the arguments, but otherwise is a single name
"""
- pass
+ declSpecs = self._parse_decl_specs_simple(outer, typed=True)
+ if declSpecs is None:
+ self.fail("Expected type")
+
+ decl = self._parse_declarator(named=named, typed=False)
+
+ if decl is None:
+ return ASTType(declSpecs, None)
+ else:
+ return ASTType(declSpecs, decl)
diff --git a/sphinx/domains/cpp/_parser.py b/sphinx/domains/cpp/_parser.py
index f09b4224d..50e139e4f 100644
--- a/sphinx/domains/cpp/_parser.py
+++ b/sphinx/domains/cpp/_parser.py
@@ -15,7 +15,55 @@ class DefinitionParser(BaseParser):
def _parse_decl_specs_simple(self, outer: str, typed: bool
) ->ASTDeclSpecsSimple:
"""Just parse the simple ones."""
- pass
+ storage = None
+ threadLocal = None
+ inline = None
+ virtual = None
+ explicit = None
+ constexpr = None
+ volatile = None
+ const = None
+ friend = None
+ attrs = None
+
+ while True:
+ pos = self.pos
+ try:
+ # Check for simple type specifiers
+ if self.match(_simple_type_specifiers_re):
+ self.skip_ws()
+ continue
+ # Check for storage class specifiers
+ elif self.match(r'static|extern|register|mutable'):
+ if storage is not None:
+ self.fail("Multiple storage class specifiers")
+ storage = self.matched_text
+ elif self.match('thread_local'):
+ threadLocal = True
+ elif self.match('inline'):
+ inline = True
+ elif self.match('virtual'):
+ virtual = True
+ elif self.match('explicit'):
+ explicit = True
+ elif self.match('constexpr'):
+ constexpr = True
+ elif self.match('volatile'):
+ volatile = True
+ elif self.match('const'):
+ const = True
+ elif self.match('friend'):
+ friend = True
+ else:
+ break
+ self.skip_ws()
+ except DefinitionError:
+ self.pos = pos
+ break
+
+ return ASTDeclSpecsSimple(storage, threadLocal, inline, virtual,
+ explicit, constexpr, volatile, const,
+ friend, attrs)
def _parse_type(self, named: (bool | str), outer: (str | None)=None
) ->ASTType:
@@ -25,4 +73,19 @@ class DefinitionParser(BaseParser):
outer == operatorCast: annoying case, we should not take the params
"""
- pass
+ if outer == 'operatorCast':
+ # Special case for cast operators
+ declSpecs = self._parse_decl_specs_simple('operatorCast', typed=True)
+ decl = self._parse_declarator(named=True, paramMode='operatorCast')
+ return ASTType(declSpecs, decl)
+
+ declSpecs = self._parse_decl_specs_simple(outer, typed=True)
+
+ if self.skip_string('...'):
+ # Parameter pack
+ if not named:
+ self.fail("Unnamed parameter pack.")
+ return ASTType(declSpecs, ASTDeclaratorParamPack())
+
+ decl = self._parse_declarator(named=named, paramMode=outer)
+ return ASTType(declSpecs, decl)
diff --git a/sphinx/domains/index.py b/sphinx/domains/index.py
index 312617719..e0f70ad38 100644
--- a/sphinx/domains/index.py
+++ b/sphinx/domains/index.py
@@ -26,7 +26,12 @@ class IndexDomain(Domain):
def process_doc(self, env: BuildEnvironment, docname: str, document: Node
) ->None:
"""Process a document after it is read by the environment."""
- pass
+ for node in document.traverse(addnodes.index):
+ entries = node['entries']
+ for entry in entries:
+ split_entries = split_index_msg(entry[1])
+ for (type, value, target, main, key_) in split_entries:
+ self.add_entry(type, value, target, main, docname)
class IndexDirective(SphinxDirective):
@@ -39,6 +44,26 @@ class IndexDirective(SphinxDirective):
final_argument_whitespace = True
option_spec: ClassVar[OptionSpec] = {'name': directives.unchanged}
+ def run(self) -> list[Node]:
+ arguments = self.arguments[0].split('\n')
+ targetid = 'index-%s' % self.env.new_serialno('index')
+ targetnode = nodes.target('', '', ids=[targetid])
+ self.state.document.note_explicit_target(targetnode)
+ indexnode = addnodes.index()
+ indexnode['entries'] = []
+ for entry in arguments:
+ indexnode['entries'].extend(process_index_entry(entry, targetid))
+ return [indexnode, targetnode]
+
class IndexRole(ReferenceRole):
- pass
+ def run(self) -> tuple[list[Node], list[system_message]]:
+ target = self.target
+ title = self.title
+ index = addnodes.index(entries=[('single', target, '', '', None)])
+ target_id = 'index-%s' % self.env.new_serialno('index')
+ target = nodes.target('', '', ids=[target_id])
+ reference = nodes.reference('', '', internal=True, refid=target_id,
+ classes=['index'])
+ reference += nodes.Text(title, title)
+ return [index, target, reference], []
diff --git a/sphinx/domains/javascript.py b/sphinx/domains/javascript.py
index 760e3b217..5dcd3094d 100644
--- a/sphinx/domains/javascript.py
+++ b/sphinx/domains/javascript.py
@@ -45,7 +45,48 @@ class JSObject(ObjectDescription[tuple[str, str]]):
namespace and class will be determined by the nesting of domain
directives.
"""
- pass
+ sig = sig.strip()
+ if '(' in sig and sig[-1:] == ')':
+ prefix, arglist = sig.split('(', 1)
+ prefix = prefix.strip()
+ arglist = arglist[:-1].strip()
+ else:
+ prefix = sig
+ arglist = None
+
+ modname = self.options.get('module', self.env.ref_context.get('js:module'))
+ classname = self.env.ref_context.get('js:class')
+
+ if classname:
+ add_module = False
+ if prefix and prefix != classname:
+ fullname = classname + '.' + prefix
+ else:
+ fullname = classname
+ else:
+ add_module = True
+ fullname = prefix
+
+ if modname and add_module:
+ fullname = modname + '.' + fullname
+
+ signode['module'] = modname
+ signode['class'] = classname
+ signode['fullname'] = fullname
+
+ sig_prefix = self.get_signature_prefix(sig)
+ if sig_prefix:
+ signode += addnodes.desc_annotation(sig_prefix, sig_prefix)
+
+ if prefix:
+ signode += addnodes.desc_name(prefix, prefix)
+ if self.has_arguments:
+ if not arglist:
+ signode += addnodes.desc_parameterlist()
+ else:
+ _pseudo_parse_arglist(signode, arglist)
+
+ return fullname, prefix
def before_content(self) ->None:
"""Handle object nesting before content
@@ -71,7 +112,12 @@ class JSObject(ObjectDescription[tuple[str, str]]):
Current object prefix. This should generally reflect the last
element in the prefix history
"""
- pass
+ if self.names:
+ fullname = self.names[0][0]
+ if self.allow_nesting:
+ objects = self.env.ref_context.setdefault('js:objects', [])
+ objects.append(fullname)
+ self.env.ref_context['js:object'] = fullname
def after_content(self) ->None:
"""Handle object de-nesting after content
@@ -83,7 +129,11 @@ class JSObject(ObjectDescription[tuple[str, str]]):
be altered as we didn't affect the nesting levels in
:py:meth:`before_content`.
"""
- pass
+ if self.allow_nesting:
+ objects = self.env.ref_context.setdefault('js:objects', [])
+ if objects:
+ objects.pop()
+ self.env.ref_context['js:object'] = None
class JSCallable(JSObject):
diff --git a/sphinx/domains/math.py b/sphinx/domains/math.py
index 18f76a695..68c5efab2 100644
--- a/sphinx/domains/math.py
+++ b/sphinx/domains/math.py
@@ -19,7 +19,10 @@ logger = logging.getLogger(__name__)
class MathReferenceRole(XRefRole):
- pass
+ def result_nodes(self, document: Node, env: BuildEnvironment, node: Element,
+ is_ref: bool) -> tuple[list[Node], list[system_message]]:
+ node['refdomain'] = 'math'
+ return [node], []
class MathDomain(Domain):
@@ -30,3 +33,48 @@ class MathDomain(Domain):
dangling_warnings = {'eq': 'equation not found: %(target)s'}
enumerable_nodes = {nodes.math_block: ('displaymath', None)}
roles = {'numref': MathReferenceRole()}
+
+ def get_objects(self) -> Iterable[tuple[str, str, str, str, str, int]]:
+ return []
+
+ def resolve_xref(self, env: BuildEnvironment, fromdocname: str, builder: Builder,
+ typ: str, target: str, node: pending_xref, contnode: Element
+ ) -> Element | None:
+ if typ == 'eq':
+ try:
+ docname, labelid = self.data['objects'][target]
+ return make_refnode(builder, fromdocname, docname, labelid, contnode)
+ except KeyError:
+ return None
+ return None
+
+ def add_equation(self, env: BuildEnvironment, docname: str, labelid: str, node: Element) -> None:
+ if not hasattr(env, 'math_equations'):
+ env.math_equations = {}
+ env.math_equations[labelid] = {
+ 'docname': docname,
+ 'equation': node['latex'],
+ 'lineno': node.line,
+ 'target': f'equation-{labelid}'
+ }
+ self.data['objects'][labelid] = (docname, labelid)
+ self.data['has_equations'][docname] = True
+
+ def process_doc(self, env: BuildEnvironment, docname: str, document: Node) -> None:
+ for node in document.traverse(nodes.math_block):
+ if node['label']:
+ self.add_equation(env, docname, node['label'], node)
+
+ def merge_domaindata(self, docnames: list[str], otherdata: dict) -> None:
+ for labelid, (fn, _) in otherdata['objects'].items():
+ if fn in docnames:
+ self.data['objects'][labelid] = otherdata['objects'][labelid]
+ for docname in docnames:
+ if docname in otherdata['has_equations']:
+ self.data['has_equations'][docname] = True
+
+ def clear_doc(self, docname: str) -> None:
+ for labelid, (fn, _) in list(self.data['objects'].items()):
+ if fn == docname:
+ del self.data['objects'][labelid]
+ self.data['has_equations'].pop(docname, None)
diff --git a/sphinx/domains/python/_annotations.py b/sphinx/domains/python/_annotations.py
index bc2418143..30e05f681 100644
--- a/sphinx/domains/python/_annotations.py
+++ b/sphinx/domains/python/_annotations.py
@@ -20,18 +20,64 @@ if TYPE_CHECKING:
def parse_reftarget(reftarget: str, suppress_prefix: bool=False) ->tuple[
str, str, str, bool]:
"""Parse a type string and return (reftype, reftarget, title, refspecific flag)"""
- pass
+ reftype = ''
+ title = reftarget
+ refspecific = False
+
+ if '.' in reftarget:
+ module, name = reftarget.rsplit('.', 1)
+ if not suppress_prefix:
+ reftype = 'obj'
+ reftarget = name
+ title = module + '.' + name
+ refspecific = True
+ elif reftarget.startswith('~'):
+ reftarget = reftarget[1:]
+ dot = reftarget.rfind('.')
+ if dot != -1:
+ title = reftarget[dot + 1:]
+
+ return reftype, reftarget, title, refspecific
def type_to_xref(target: str, env: BuildEnvironment, *, suppress_prefix:
bool=False) ->addnodes.pending_xref:
"""Convert a type string to a cross reference node."""
- pass
+ reftype, reftarget, title, refspecific = parse_reftarget(target, suppress_prefix)
+
+ if reftype:
+ domain = 'py'
+ else:
+ domain = 'py'
+ reftype = 'obj'
+
+ return addnodes.pending_xref(
+ '', nodes.Text(title),
+ refdomain=domain, reftype=reftype, reftarget=reftarget,
+ refspecific=refspecific,
+ py_module=env.ref_context.get('py:module'),
+ py_class=env.ref_context.get('py:class')
+ )
def _parse_annotation(annotation: str, env: BuildEnvironment) ->list[Node]:
"""Parse type annotation."""
- pass
+ result = []
+ for token in ast.parse(annotation).body[0].value.elts:
+ if isinstance(token, ast.Str):
+ result.append(nodes.Text(token.s))
+ elif isinstance(token, ast.Name):
+ result.append(type_to_xref(token.id, env))
+ elif isinstance(token, ast.Attribute):
+ attrs = []
+ node = token
+ while isinstance(node, ast.Attribute):
+ attrs.insert(0, node.attr)
+ node = node.value
+ if isinstance(node, ast.Name):
+ attrs.insert(0, node.id)
+ result.append(type_to_xref('.'.join(attrs), env))
+ return result
class _TypeParameterListParser(TokenProcessor):
@@ -45,13 +91,61 @@ class _TypeParameterListParser(TokenProcessor):
def _parse_type_list(tp_list: str, env: BuildEnvironment,
multi_line_parameter_list: bool=False) ->addnodes.desc_type_parameter_list:
"""Parse a list of type parameters according to PEP 695."""
- pass
+ parser = _TypeParameterListParser(tp_list)
+ params = parser.type_params
+
+ paramlist = addnodes.desc_type_parameter_list()
+ for name, pos, bound, default in params:
+ param = addnodes.desc_type_parameter()
+ param += nodes.Text(name)
+ if bound:
+ param += nodes.Text(': ')
+ param += _parse_annotation(bound, env)
+ if default:
+ param += nodes.Text(' = ')
+ param += _parse_annotation(default, env)
+ paramlist += param
+
+ if multi_line_parameter_list:
+ paramlist['multi_line_parameter_list'] = True
+
+ return paramlist
def _parse_arglist(arglist: str, env: BuildEnvironment,
multi_line_parameter_list: bool=False) ->addnodes.desc_parameterlist:
"""Parse a list of arguments using AST parser"""
- pass
+ params = ast.parse(f'def func({arglist}): pass').body[0].args
+
+ paramlist = addnodes.desc_parameterlist()
+ for param in params.args + params.kwonlyargs:
+ param_node = addnodes.desc_parameter()
+ param_node += nodes.Text(param.arg)
+ if param.annotation:
+ param_node += nodes.Text(': ')
+ param_node += _parse_annotation(ast.unparse(param.annotation), env)
+ paramlist += param_node
+
+ if params.vararg:
+ param_node = addnodes.desc_parameter()
+ param_node += nodes.Text(f'*{params.vararg.arg}')
+ if params.vararg.annotation:
+ param_node += nodes.Text(': ')
+ param_node += _parse_annotation(ast.unparse(params.vararg.annotation), env)
+ paramlist += param_node
+
+ if params.kwarg:
+ param_node = addnodes.desc_parameter()
+ param_node += nodes.Text(f'**{params.kwarg.arg}')
+ if params.kwarg.annotation:
+ param_node += nodes.Text(': ')
+ param_node += _parse_annotation(ast.unparse(params.kwarg.annotation), env)
+ paramlist += param_node
+
+ if multi_line_parameter_list:
+ paramlist['multi_line_parameter_list'] = True
+
+ return paramlist
def _pseudo_parse_arglist(signode: desc_signature, arglist: str,
diff --git a/sphinx/domains/python/_object.py b/sphinx/domains/python/_object.py
index 317998cba..69477ad8b 100644
--- a/sphinx/domains/python/_object.py
+++ b/sphinx/domains/python/_object.py
@@ -79,13 +79,13 @@ class PyObject(ObjectDescription[tuple[str, str]]):
"""May return a prefix to put before the object name in the
signature.
"""
- pass
+ return []
def needs_arglist(self) ->bool:
"""May return true if an empty argument list is to be generated even if
the document contains none.
"""
- pass
+ return False
def handle_signature(self, sig: str, signode: desc_signature) ->tuple[
str, str]:
@@ -97,11 +97,72 @@ class PyObject(ObjectDescription[tuple[str, str]]):
* it is stripped from the displayed name if present
* it is added to the full name (return value) if not present
"""
- pass
+ m = py_sig_re.match(sig)
+ if m is None:
+ raise ValueError
+
+ name_prefix, name, arglist, retann = m.groups()
+
+ # determine module and class name (if applicable), as well as full name
+ modname = self.options.get('module', self.env.ref_context.get('py:module'))
+ classname = self.env.ref_context.get('py:class')
+ if classname:
+ add_module = False
+ if name_prefix and name_prefix.startswith(classname):
+ fullname = name_prefix + name
+ # class name is given again in the signature
+ name_prefix = name_prefix[len(classname):].lstrip('.')
+ elif name_prefix:
+ # class name is given in the signature, but different
+ # (shouldn't happen)
+ fullname = classname + '.' + name_prefix + name
+ else:
+ # class name is not given in the signature
+ fullname = classname + '.' + name
+ else:
+ add_module = True
+ if name_prefix:
+ classname = name_prefix.rstrip('.')
+ fullname = name_prefix + name
+ else:
+ classname = ''
+ fullname = name
+
+ signode['module'] = modname
+ signode['class'] = classname
+ signode['fullname'] = fullname
+
+ sig_prefix = self.get_signature_prefix(sig)
+ if sig_prefix:
+ signode += sig_prefix
+
+ if name_prefix:
+ signode += addnodes.desc_addname(name_prefix, name_prefix)
+ # exceptions are a special case, since they are documented in the
+ # 'exceptions' module.
+ elif add_module and self.env.config.add_module_names:
+ modname = self.options.get('module', self.env.ref_context.get('py:module'))
+ if modname and modname != 'exceptions':
+ nodetext = modname + '.'
+ signode += addnodes.desc_addname(nodetext, nodetext)
+
+ signode += addnodes.desc_name(name, name)
+ if arglist:
+ try:
+ signode += _parse_arglist(arglist, self.env)
+ except SyntaxError:
+ # fallback to parse arglist original string
+ signode += addnodes.desc_parameterlist(arglist, arglist)
+
+ if retann:
+ children = _parse_annotation(retann, self.env)
+ signode += addnodes.desc_returns(retann, '', *children)
+
+ return fullname, name_prefix
def get_index_text(self, modname: str, name: tuple[str, str]) ->str:
"""Return the text for the index entry of the object."""
- pass
+ return f"{name[0]} ({modname})"
def before_content(self) ->None:
"""Handle object nesting before content
@@ -115,7 +176,15 @@ class PyObject(ObjectDescription[tuple[str, str]]):
only the most recent object is tracked. This object prefix name will be
removed with :py:meth:`after_content`.
"""
- pass
+ if self.names:
+ fullname = self.names[-1][0]
+ if self.allow_nesting:
+ prefix = fullname + '.'
+ self.env.ref_context['py:class'] = fullname
+ if prefix not in self.env.temp_data.get('py:class_stack', []):
+ self.env.temp_data.setdefault('py:class_stack', []).append(prefix)
+ else:
+ self.env.temp_data['py:last_class'] = fullname
def after_content(self) ->None:
"""Handle object de-nesting after content
@@ -127,4 +196,13 @@ class PyObject(ObjectDescription[tuple[str, str]]):
be altered as we didn't affect the nesting levels in
:py:meth:`before_content`.
"""
- pass
+ if self.allow_nesting:
+ class_stack = self.env.temp_data.get('py:class_stack', [])
+ if class_stack:
+ class_stack.pop()
+ if class_stack:
+ self.env.ref_context['py:class'] = class_stack[-1].rstrip('.')
+ else:
+ self.env.ref_context.pop('py:class', None)
+ else:
+ self.env.temp_data.pop('py:last_class', None)
diff --git a/sphinx/domains/rst.py b/sphinx/domains/rst.py
index fb22cbab5..cd06103e3 100644
--- a/sphinx/domains/rst.py
+++ b/sphinx/domains/rst.py
@@ -38,7 +38,12 @@ def parse_directive(d: str) ->tuple[str, str]:
Returns (directive, arguments) string tuple. If no arguments are given,
returns (directive, '').
"""
- pass
+ match = dir_sig_re.match(d.strip())
+ if match:
+ directive = match.group(1).strip()
+ arguments = match.group(2).strip()
+ return directive, arguments
+ return d.strip(), ''
class ReSTDirective(ReSTMarkup):
diff --git a/sphinx/environment/adapters/asset.py b/sphinx/environment/adapters/asset.py
index 524c29ad4..72c34f69e 100644
--- a/sphinx/environment/adapters/asset.py
+++ b/sphinx/environment/adapters/asset.py
@@ -10,4 +10,6 @@ class ImageAdapter:
def get_original_image_uri(self, name: str) ->str:
"""Get the original image URI."""
- pass
+ if name in self.env.images:
+ return self.env.images[name][0]
+ return name
diff --git a/sphinx/environment/adapters/indexentries.py b/sphinx/environment/adapters/indexentries.py
index bb2768a33..a1cd79603 100644
--- a/sphinx/environment/adapters/indexentries.py
+++ b/sphinx/environment/adapters/indexentries.py
@@ -35,24 +35,90 @@ class IndexEntries:
def create_index(self, builder: Builder, group_entries: bool=True,
_fixre: re.Pattern[str]=re.compile('(.*) ([(][^()]*[)])')) ->_Index:
"""Create the real index from the collected index entries."""
- pass
+ self.builder = builder
+
+ content = {}
+
+ # collect all entries
+ for docname in self.env.found_docs:
+ if docname not in self.env.indexentries:
+ continue
+ for entry in self.env.indexentries[docname]:
+ entry_type = entry[2]
+ if entry_type == 'main':
+ key, targets = entry[1], [(docname, entry[3])]
+ elif entry_type == 'pair':
+ key, targets = entry[1], [(docname, entry[3])]
+ key2, targets2 = entry[4], [(docname, entry[5])]
+ content.setdefault(key2, ([], {}, None))[0].extend(targets2)
+ elif entry_type == 'triple':
+ key, targets = entry[1], [(docname, entry[3])]
+ key2, targets2 = entry[4], [(docname, entry[5])]
+ key3, targets3 = entry[6], [(docname, entry[7])]
+ content.setdefault(key2, ([], {}, None))[0].extend(targets2)
+ content.setdefault(key3, ([], {}, None))[0].extend(targets3)
+ else:
+ continue
+
+ # if there is a parenthesis in the key, put the parenthesized
+ # part before the main part
+ match = _fixre.match(key)
+ if match:
+ key = match.group(2) + ' ' + match.group(1)
+ content.setdefault(key, ([], {}, entry[0]))[0].extend(targets)
+
+ # sort and group the entries
+ result = sorted(content.items(), key=_key_func_1)
+
+ if group_entries:
+ # group the entries by letter or category key
+ grouped_result = []
+ for group_key, group_items in groupby(result, key=_group_by_func):
+ grouped_result.append((group_key, list(group_items)))
+ result = grouped_result
+
+ return result
def _key_func_0(entry: _IndexEntryTarget) ->tuple[bool, str | Literal[False]]:
"""Sort the index entries for same keyword."""
- pass
+ # Sort by document name (second element of the tuple)
+ # If it's a string, return (True, document_name)
+ # If it's False, return (False, False)
+ return (isinstance(entry[1], str), entry[1])
def _key_func_1(entry: tuple[str, _IndexEntry]) ->tuple[tuple[int, str], str]:
"""Sort the index entries"""
- pass
+ key = entry[0].lower()
+ # Extract the first character and its Unicode category
+ first_char = key[0] if key else ''
+ category = unicodedata.category(first_char)
+
+ # Determine the sort key
+ if category.startswith('L'): # Letter
+ sort_key = (0, key)
+ elif category.startswith('N'): # Number
+ sort_key = (1, key)
+ else: # Symbol or punctuation
+ sort_key = (2, key)
+
+ return (sort_key, key)
def _key_func_2(entry: tuple[str, _IndexEntryTargets]) ->str:
"""Sort the sub-index entries"""
- pass
+ return entry[0].lower()
def _group_by_func(entry: tuple[str, _IndexEntry]) ->str:
"""Group the entries by letter or category key."""
- pass
+ key = entry[0].lower()
+ if key.startswith('_'):
+ return '_'
+ elif key[0].isalpha():
+ return key[0].upper()
+ elif key[0].isdigit():
+ return '0 - 9'
+ else:
+ return 'Symbols'
diff --git a/sphinx/environment/adapters/toctree.py b/sphinx/environment/adapters/toctree.py
index e3afbc450..a866fa218 100644
--- a/sphinx/environment/adapters/toctree.py
+++ b/sphinx/environment/adapters/toctree.py
@@ -21,7 +21,14 @@ def note_toctree(env: BuildEnvironment, docname: str, toctreenode: addnodes
"""Note a TOC tree directive in a document and gather information about
file relations from it.
"""
- pass
+ for title, ref in toctreenode['entries']:
+ if url_re.match(ref) or ref == 'self':
+ continue
+ if ref in env.toctree_includes:
+ env.toctree_includes[ref].add(docname)
+ else:
+ env.toctree_includes[ref] = {docname}
+ env.tocs[docname] = toctreenode
def document_toc(env: BuildEnvironment, docname: str, tags: Tags) ->Node:
@@ -31,7 +38,14 @@ def document_toc(env: BuildEnvironment, docname: str, tags: Tags) ->Node:
For a ToC tree that shows the document's place in the
ToC structure, use `get_toctree_for`.
"""
- pass
+ doctree = env.get_doctree(docname)
+ toc = env.get_toc_for(docname, doctree)
+
+ for node in toc.traverse(nodes.reference):
+ node['refuri'] = node['anchorname']
+
+ _only_node_keep_children(toc, tags)
+ return toc
def global_toctree_for_doc(env: BuildEnvironment, docname: str, builder:
@@ -41,7 +55,24 @@ def global_toctree_for_doc(env: BuildEnvironment, docname: str, builder:
This gives the global ToC, with all ancestors and their siblings.
"""
- pass
+ doctree = env.get_doctree(env.config.master_doc)
+ toctrees = []
+ for toctreenode in doctree.traverse(addnodes.toctree):
+ toctree = _resolve_toctree(env, docname, builder, toctreenode,
+ prune=False, maxdepth=maxdepth,
+ titles_only=titles_only, collapse=collapse,
+ includehidden=includehidden)
+ if toctree:
+ toctrees.append(toctree)
+
+ if not toctrees:
+ return None
+
+ result = toctrees[0]
+ for toctree in toctrees[1:]:
+ result.extend(toctree.children)
+
+ return result
def _resolve_toctree(env: BuildEnvironment, docname: str, builder: Builder,
@@ -59,7 +90,31 @@ def _resolve_toctree(env: BuildEnvironment, docname: str, builder: Builder,
If *collapse* is True, all branches not containing docname will
be collapsed.
"""
- pass
+ if not toctree.get('entries'):
+ return None
+
+ maxdepth = maxdepth or toctree.get('maxdepth', -1)
+ toctree_ancestors = set(env.toctree_includes.get(docname, []))
+ included = Matcher(toctree.get('includefiles', []))
+ excluded = Matcher(toctree.get('excludefiles', []))
+
+ entries = _entries_from_toctree(
+ env, prune, titles_only, collapse, includehidden,
+ builder.tags, toctree_ancestors, included, excluded,
+ toctree, [docname]
+ )
+
+ if not entries:
+ return None
+
+ wrapper = nodes.bullet_list()
+ for entry in entries:
+ wrapper += entry
+
+ if prune:
+ wrapper = _toctree_copy(wrapper, 1, maxdepth, collapse, builder.tags)
+
+ return wrapper
def _entries_from_toctree(env: BuildEnvironment, prune: bool, titles_only:
@@ -68,7 +123,48 @@ def _entries_from_toctree(env: BuildEnvironment, prune: bool, titles_only:
toctreenode: addnodes.toctree, parents: list[str], subtree: bool=False
) ->list[Element]:
"""Return TOC entries for a toctree node."""
- pass
+ entries = []
+ for (title, ref) in toctreenode['entries']:
+ if url_re.match(ref) or ref == 'self':
+ entries.append(nodes.reference(ref, title, refuri=ref))
+ continue
+
+ if ref in parents:
+ logger.warning(__('circular toctree references detected, ignoring: %s'),
+ ' -> '.join(parents))
+ continue
+
+ if not included(ref) or excluded(ref):
+ continue
+
+ if ref not in env.tocs:
+ logger.warning(__('toctree contains reference to nonexisting document %r'),
+ ref, location=toctreenode)
+ continue
+
+ toc = env.tocs[ref]
+ if not toc.traverse(nodes.section):
+ continue
+
+ entry = _toctree_copy(toc, 1, 1, collapse, tags)
+ if title and title != clean_astext(toc[0]):
+ entry[0].insert(0, nodes.Text(title))
+
+ if not subtree and ref in toctree_ancestors:
+ _toctree_add_classes(entry[0], 1, ref)
+
+ entries.append(entry)
+
+ if includehidden:
+ for subnode in toc.traverse(addnodes.toctree):
+ subentries = _entries_from_toctree(
+ env, prune, titles_only, collapse, includehidden,
+ tags, toctree_ancestors, included, excluded,
+ subnode, parents + [ref], subtree=True
+ )
+ entries.extend(subentries)
+
+ return entries
def _toctree_add_classes(node: Element, depth: int, docname: str) ->None:
diff --git a/sphinx/environment/collectors/asset.py b/sphinx/environment/collectors/asset.py
index 7fe3a334b..c3bac6dbb 100644
--- a/sphinx/environment/collectors/asset.py
+++ b/sphinx/environment/collectors/asset.py
@@ -25,7 +25,27 @@ class ImageCollector(EnvironmentCollector):
def process_doc(self, app: Sphinx, doctree: nodes.document) ->None:
"""Process and rewrite image URIs."""
- pass
+ env = app.env
+ for node in doctree.traverse(nodes.image):
+ uri = node['uri']
+ if uri.startswith('data:'):
+ continue # Skip data URIs
+
+ # Handle language-specific images
+ if env.config.language:
+ new_uri = get_image_filename_for_language(uri, env)
+ if new_uri:
+ node['uri'] = new_uri
+ continue
+
+ # Rewrite relative URIs
+ if '://' not in uri and not uri.startswith('/'):
+ node['uri'] = path.join(env.docname, uri)
+ node['candidates'] = {'*': node['uri']}
+
+ # Add to environment's image list
+ if node['uri'] not in env.images:
+ env.images[node['uri']] = (env.docname, node['uri'])
class DownloadFileCollector(EnvironmentCollector):
@@ -33,4 +53,21 @@ class DownloadFileCollector(EnvironmentCollector):
def process_doc(self, app: Sphinx, doctree: nodes.document) ->None:
"""Process downloadable file paths."""
- pass
+ env = app.env
+ for node in doctree.traverse(addnodes.download_reference):
+ if 'filename' not in node:
+ continue
+
+ filename = node['filename']
+ if '://' in filename or filename.startswith('/'):
+ continue # Skip absolute URIs or external links
+
+ # Rewrite relative paths
+ docdir = path.dirname(env.doc2path(env.docname, base=None))
+ filepath = path.normpath(path.join(docdir, filename))
+ if path.isfile(path.join(env.srcdir, filepath)):
+ node['filename'] = filepath
+ env.dependencies[env.docname].add(filepath)
+ else:
+ logger.warning(__('download file not found: %s') % filename,
+ location=node)
diff --git a/sphinx/environment/collectors/dependencies.py b/sphinx/environment/collectors/dependencies.py
index 25637aa67..92e74dc01 100644
--- a/sphinx/environment/collectors/dependencies.py
+++ b/sphinx/environment/collectors/dependencies.py
@@ -18,4 +18,23 @@ class DependenciesCollector(EnvironmentCollector):
def process_doc(self, app: Sphinx, doctree: nodes.document) ->None:
"""Process docutils-generated dependency info."""
- pass
+ env = app.env
+ docname = env.docname
+
+ # Get the current file's directory
+ base_dir = path.dirname(env.doc2path(docname, base=None))
+
+ for node in doctree.traverse(nodes.dependent):
+ dep = node['name']
+ if path.isabs(dep):
+ env.note_dependency(dep)
+ else:
+ rel_filename = relative_path(base_dir, dep)
+ try:
+ # Try to decode the filename using the filesystem encoding
+ filename = rel_filename.encode(fs_encoding).decode('utf-8')
+ except UnicodeDecodeError:
+ # If decoding fails, use the original filename
+ filename = rel_filename
+
+ env.note_dependency(path.normpath(path.join(base_dir, filename)))
diff --git a/sphinx/environment/collectors/metadata.py b/sphinx/environment/collectors/metadata.py
index 199b4eb5e..7995bfe4e 100644
--- a/sphinx/environment/collectors/metadata.py
+++ b/sphinx/environment/collectors/metadata.py
@@ -17,4 +17,14 @@ class MetadataCollector(EnvironmentCollector):
Keep processing minimal -- just return what docutils says.
"""
- pass
+ metadata = {}
+ for node in doctree.traverse(nodes.docinfo):
+ for child in node.children:
+ if isinstance(child, nodes.field):
+ field_name = child.children[0].astext()
+ field_body = child.children[1].astext()
+ metadata[field_name] = field_body
+ elif isinstance(child, nodes.TextElement):
+ metadata[child.__class__.__name__] = child.astext()
+
+ doctree.settings.env.metadata[doctree.get('source')] = metadata
diff --git a/sphinx/environment/collectors/title.py b/sphinx/environment/collectors/title.py
index 640a7a3a2..16cc5fbbd 100644
--- a/sphinx/environment/collectors/title.py
+++ b/sphinx/environment/collectors/title.py
@@ -13,8 +13,24 @@ if TYPE_CHECKING:
class TitleCollector(EnvironmentCollector):
"""title collector for sphinx.environment."""
- def process_doc(self, app: Sphinx, doctree: nodes.document) ->None:
+ def process_doc(self, app: Sphinx, doctree: nodes.document) -> None:
"""Add a title node to the document (just copy the first section title),
and store that title in the environment.
"""
- pass
+ titlenode = nodes.title()
+ visitor = SphinxContentsFilter(doctree)
+ doctree.walkabout(visitor)
+ if visitor.title:
+ titlenode += visitor.title
+ else:
+ # if no title exists, use the document's first heading as the title
+ for node in doctree.traverse(nodes.section):
+ if node.children and isinstance(node.children[0], nodes.title):
+ titlenode += node.children[0].deepcopy()
+ break
+
+ if titlenode:
+ doctree.insert(0, titlenode)
+ app.env.titles[app.env.docname] = titlenode
+ else:
+ app.env.titles[app.env.docname] = None
diff --git a/sphinx/environment/collectors/toctree.py b/sphinx/environment/collectors/toctree.py
index c429eda00..0ecb02280 100644
--- a/sphinx/environment/collectors/toctree.py
+++ b/sphinx/environment/collectors/toctree.py
@@ -22,12 +22,173 @@ class TocTreeCollector(EnvironmentCollector):
def process_doc(self, app: Sphinx, doctree: nodes.document) ->None:
"""Build a TOC from the doctree and store it in the inventory."""
- pass
+ docname = app.env.docname
+ numentries = [0] # nonlocal again...
+
+ def traverse_in_section(node: Element, cls: type[N]) ->list[N]:
+ result = []
+ if isinstance(node, cls):
+ result.append(node)
+ for child in node.children:
+ if isinstance(child, nodes.section):
+ continue
+ result.extend(traverse_in_section(child, cls))
+ return result
+
+ def build_toc(node: Element, depth: int = 1) ->None:
+ entries = []
+ for sectionnode in node:
+ if not isinstance(sectionnode, nodes.section):
+ continue
+ title = sectionnode.next_node(nodes.title)
+ if not title:
+ # empty section
+ continue
+ if depth > app.config.toc_maxdepth:
+ # don't go deeper than toc_maxdepth
+ continue
+ reference = nodes.reference('', '', internal=True,
+ refuri=app.builder.get_relative_uri(docname, docname),
+ anchorname=title['ids'][0],
+ *title.children)
+ para = addnodes.compact_paragraph('', '', reference)
+ item = nodes.list_item('', para)
+ sub_item = build_toc(sectionnode, depth + 1)
+ item += sub_item
+ entries.append(item)
+ numentries[0] += 1
+ if entries:
+ return nodes.bullet_list('', *entries)
+ return []
+
+ toc = build_toc(doctree)
+ if toc:
+ app.env.tocs[docname] = toc
+ else:
+ app.env.tocs[docname] = nodes.bullet_list('')
+ app.env.toc_num_entries[docname] = numentries[0]
+
+ # store labels of this doc to environment
+ for labelid, labelnode in doctree.nametypes.items():
+ if not isinstance(labelnode, nodes.Element):
+ continue
+ if labelid not in doctree.ids:
+ continue
+ node = doctree.ids[labelid]
+ if isinstance(node, nodes.target):
+ node = node.parent
+ if isinstance(node, (nodes.section, nodes.table, nodes.figure)):
+ sectname = node.next_node(nodes.title).astext() if isinstance(node, nodes.section) else ''
+ app.env.labels[labelid] = app.env.docname, node['ids'][0], sectname
+ app.env.anonlabels[labelid] = app.env.docname, node['ids'][0]
+
+ # add labels from the "label" directive
+ for name, explicit in list(doctree.nametypes.items()):
+ if not explicit:
+ continue
+ labelid = doctree.nameids[name]
+ if labelid is None:
+ continue
+ node = doctree.ids[labelid]
+ if isinstance(node, nodes.target):
+ node = node.parent
+ if isinstance(node, (nodes.section, nodes.table, nodes.figure)):
+ sectname = node.next_node(nodes.title).astext() if isinstance(node, nodes.section) else ''
+ app.env.labels[name] = app.env.docname, node['ids'][0], sectname
+ app.env.anonlabels[name] = app.env.docname, node['ids'][0]
def assign_section_numbers(self, env: BuildEnvironment) ->list[str]:
"""Assign a section number to each heading under a numbered toctree."""
- pass
+ def _walk_toc(node: Element, secnums: dict[str, tuple[int, ...]], depth: int, titlenode: Element | None = None) ->None:
+ # titlenode is the title of the document, it will get assigned a
+ # secnumber too, so that it shows up in next/prev/parent rellinks
+ for subnode in node.children:
+ if isinstance(subnode, nodes.bullet_list):
+ numstack.append(0)
+ _walk_toc(subnode, secnums, depth - 1, titlenode)
+ numstack.pop()
+ titlenode = None
+ elif isinstance(subnode, nodes.list_item):
+ _walk_toc(subnode, secnums, depth, titlenode)
+ titlenode = None
+ elif isinstance(subnode, addnodes.compact_paragraph):
+ numstack[-1] += 1
+ if depth > 0:
+ number = tuple(numstack)
+ secnums[subnode[0]['anchorname']] = number
+ for refnode in subnode[0]['refuri']:
+ if refnode.hasattr('secnumber'):
+ refnode['secnumber'] = number
+ if titlenode:
+ titlenode['secnumber'] = number
+ titlenode = None
+ elif isinstance(subnode, addnodes.toctree):
+ _walk_toctree(subnode)
+
+ def _walk_toctree(toctreenode: addnodes.toctree) ->None:
+ if 'numbered' in toctreenode:
+ numstack.append(0)
+ for title, ref in toctreenode['entries']:
+ if url_re.match(ref) or ref == 'self':
+ continue
+ if ref in env.tocs:
+ _walk_toc(env.tocs[ref], secnums, depth,
+ env.titles.get(ref))
+ numstack[-1] += 1
+ numstack.pop()
+
+ secnums: dict[str, tuple[int, ...]] = {}
+ depth = env.config.toc_secnumber_depth
+ numstack: list[int] = []
+ for docname in env.toctree_includes:
+ if docname not in env.tocs:
+ continue
+ _walk_toc(env.tocs[docname], secnums, depth,
+ env.titles.get(docname))
+
+ for docname, secnum in secnums.items():
+ env.toc_secnumbers[docname] = secnum
+
+ return list(secnums.keys())
def assign_figure_numbers(self, env: BuildEnvironment) ->list[str]:
"""Assign a figure number to each figure under a numbered toctree."""
- pass
+ fignum_builders = {
+ 'arabic': lambda x: str(x),
+ 'loweralpha': lambda x: chr(ord('a') + x - 1),
+ 'upperalpha': lambda x: chr(ord('A') + x - 1),
+ 'lowerroman': lambda x: int_to_roman(x).lower(),
+ 'upperroman': lambda x: int_to_roman(x),
+ }
+
+ def get_figtype(node: Element) ->str | None:
+ for figtype in env.config.numfig_format:
+ if node.get('figtype') == figtype:
+ return figtype
+ return None
+
+ def get_next_fignumber(figtype: str) ->int:
+ fignumber = env.toc_fignumbers.get(env.docname, {})
+ figure_id = fignumber.get(figtype, 0) + 1
+ fignumber[figtype] = figure_id
+ env.toc_fignumbers[env.docname] = fignumber
+ return figure_id
+
+ def get_fignumber(figtype: str, fignumber: int) ->str:
+ builder = fignum_builders[env.config.numfig_format.get(figtype, 'arabic')]
+ return builder(fignumber)
+
+ numbered: list[str] = []
+ for docname, toc in env.tocs.items():
+ secnumber = env.toc_secnumbers.get(docname, ())
+ env.docname = docname
+ for node in toc.traverse(nodes.reference):
+ if node.get('refuri') in env.tocs:
+ continue
+ figtype = get_figtype(node)
+ if figtype:
+ fignumber = get_next_fignumber(figtype)
+ node['fignumber'] = get_fignumber(figtype, fignumber)
+ numbered.append(node['refuri'])
+
+ return numbered
diff --git a/sphinx/ext/apidoc.py b/sphinx/ext/apidoc.py
index 255a1c96b..350e2b7f7 100644
--- a/sphinx/ext/apidoc.py
+++ b/sphinx/ext/apidoc.py
@@ -41,29 +41,49 @@ template_dir = path.join(package_dir, 'templates', 'apidoc')
def is_initpy(filename: (str | Path)) ->bool:
"""Check *filename* is __init__ file or not."""
- pass
+ return Path(filename).name == '__init__.py'
def module_join(*modnames: (str | None)) ->str:
"""Join module names with dots."""
- pass
+ return '.'.join(filter(None, modnames))
def is_packagedir(dirname: (str | None)=None, files: (list[str] | None)=None
) ->bool:
"""Check given *files* contains __init__ file."""
- pass
+ if dirname is not None:
+ files = os.listdir(dirname)
+ return '__init__.py' in (files or [])
def write_file(name: str, text: str, opts: CliOptions) ->Path:
"""Write the output file for module/package <name>."""
- pass
+ fname = Path(opts.destdir) / f"{name}.{opts.suffix}"
+ if opts.dryrun:
+ logger.info(__('[dry run] Would create file %s'), fname)
+ return fname
+ logger.info(__('Creating file %s.'), fname)
+ with FileAvoidWrite(fname) as f:
+ f.write(text)
+ return fname
def create_module_file(package: (str | None), basename: str, opts:
CliOptions, user_template_dir: (str | None)=None) ->Path:
"""Build the text of the file and write the file."""
- pass
+ qualname = module_join(package, basename)
+ context = {
+ 'show_headings': not opts.noheadings,
+ 'basename': basename,
+ 'qualname': qualname,
+ 'automodule_options': OPTIONS,
+ 'modulefirst': opts.modulefirst,
+ 'includeprivate': opts.includeprivate,
+ 'fullname': opts.full and qualname or basename,
+ }
+ text = ReSTRenderer(user_template_dir or template_dir).render('module.rst_t', context)
+ return write_file(qualname, text, opts)
def create_package_file(root: str, master_package: (str | None), subroot:
@@ -76,7 +96,35 @@ def create_package_file(root: str, master_package: (str | None), subroot:
:returns: list of written files
"""
- pass
+ text = []
+ written = []
+
+ package = module_join(master_package, subroot)
+ basename = path.basename(root)
+
+ context = {
+ 'show_headings': not opts.noheadings,
+ 'basename': basename,
+ 'package': package,
+ 'subpackage': subroot,
+ 'automodule_options': OPTIONS,
+ 'modulefirst': opts.modulefirst,
+ 'includeprivate': opts.includeprivate,
+ 'fullname': opts.full and package or subroot,
+ 'members': sorted(py_files),
+ 'subpackages': sorted(subs),
+ 'is_namespace': is_namespace,
+ }
+
+ text = ReSTRenderer(user_template_dir or template_dir).render('package.rst_t', context)
+ written.append(write_file(package, text, opts))
+
+ if opts.separatemodules:
+ for py_file in py_files:
+ if not is_skipped_module(py_file, opts, excludes):
+ written.append(create_module_file(package, py_file, opts, user_template_dir))
+
+ return written
def create_modules_toc_file(modules: list[str], opts: CliOptions, name: str
diff --git a/sphinx/ext/autodoc/directive.py b/sphinx/ext/autodoc/directive.py
index cbc59b16f..3869994ae 100644
--- a/sphinx/ext/autodoc/directive.py
+++ b/sphinx/ext/autodoc/directive.py
@@ -50,13 +50,39 @@ class DocumenterBridge:
def process_documenter_options(documenter: type[Documenter], config: Config,
options: dict[str, str]) ->Options:
"""Recognize options of Documenter from user input."""
- pass
+ for name in AUTODOC_DEFAULT_OPTIONS:
+ if name not in documenter.option_spec:
+ continue
+ if name in config.autodoc_default_options and name not in options:
+ options[name] = config.autodoc_default_options[name]
+
+ final_options = Options()
+ for arg, value in options.items():
+ if arg in documenter.option_spec:
+ final_options[arg] = documenter.option_spec[arg](value)
+ else:
+ final_options[arg] = value
+
+ # handle special options
+ for name in AUTODOC_EXTENDABLE_OPTIONS:
+ if name in final_options:
+ final_options[name] = [x.strip() for x in final_options[name].split(',')]
+
+ return final_options
def parse_generated_content(state: RSTState, content: StringList,
documenter: Documenter) ->list[Node]:
"""Parse an item of content generated by Documenter."""
- pass
+ with switch_source_input(state, content):
+ if documenter.titles_allowed:
+ node = nodes.section()
+ # necessary so that the child nodes get the right source/line set
+ node.document = state.document
+ nested_parse_to_nodes(state, content, node)
+ return node.children
+ else:
+ return [nodes.paragraph(text=text) for text in content]
class AutodocDirective(SphinxDirective):
diff --git a/sphinx/ext/autodoc/importer.py b/sphinx/ext/autodoc/importer.py
index efb94f8f3..0df884543 100644
--- a/sphinx/ext/autodoc/importer.py
+++ b/sphinx/ext/autodoc/importer.py
@@ -31,29 +31,46 @@ def _filter_enum_dict(enum_class: type[Enum], attrgetter: Callable[[Any,
but with different defining class. The order of occurrence is guided by
the MRO of *enum_class*.
"""
- pass
+ for cls in getmro(enum_class):
+ for name, value in cls.__dict__.items():
+ if name in enum_class_dict:
+ yield name, cls, attrgetter(enum_class, name, value)
def mangle(subject: Any, name: str) ->str:
"""Mangle the given name."""
- pass
+ if name.startswith('__') and not name.endswith('__'):
+ cls_name = subject.__class__.__name__.lstrip('_')
+ return f'_{cls_name}{name}'
+ return name
def unmangle(subject: Any, name: str) ->(str | None):
"""Unmangle the given name."""
- pass
+ if name.startswith('_'):
+ cls_name = subject.__class__.__name__.lstrip('_')
+ prefix = f'_{cls_name}__'
+ if name.startswith(prefix) and not name.endswith('__'):
+ return name[len(prefix):]
+ return name
def import_module(modname: str) ->Any:
"""Call importlib.import_module(modname), convert exceptions to ImportError."""
- pass
+ try:
+ return importlib.import_module(modname)
+ except Exception as exc:
+ raise ImportError(f'Could not import module {modname}: {exc}') from exc
def _reload_module(module: ModuleType) ->Any:
"""
Call importlib.reload(module), convert exceptions to ImportError
"""
- pass
+ try:
+ return importlib.reload(module)
+ except Exception as exc:
+ raise ImportError(f'Could not reload module {module.__name__}: {exc}') from exc
class Attribute(NamedTuple):
@@ -65,10 +82,45 @@ class Attribute(NamedTuple):
def get_object_members(subject: Any, objpath: list[str], attrgetter:
Callable, analyzer: (ModuleAnalyzer | None)=None) ->dict[str, Attribute]:
"""Get members and attributes of target object."""
- pass
+ members = {}
+ for name in dir(subject):
+ try:
+ value = attrgetter(subject, name, None)
+ directly_defined = hasattr(subject, name)
+ members[name] = Attribute(name, directly_defined, value)
+ except Exception:
+ continue
+
+ if analyzer:
+ namespace = '.'.join(objpath)
+ for (ns, name), docstring in analyzer.find_attr_docs().items():
+ if ns == namespace and name not in members:
+ members[name] = Attribute(name, True, None)
+
+ return members
def get_class_members(subject: Any, objpath: Any, attrgetter: Callable,
inherit_docstrings: bool=True) ->dict[str, ObjectMember]:
"""Get members and attributes of target class."""
- pass
+ from sphinx.ext.autodoc import ObjectMember # Import here to avoid circular import
+
+ members = {}
+
+ # Get all members from the class and its bases
+ for cls in getmro(subject):
+ for name in dir(cls):
+ if name not in members:
+ try:
+ value = attrgetter(subject, name, None)
+ if inherit_docstrings:
+ docstring = getattr(value, '__doc__', None)
+ if docstring is None:
+ docstring = getattr(cls.__dict__.get(name), '__doc__', None)
+ else:
+ docstring = None
+ members[name] = ObjectMember(name, value, cls, docstring)
+ except Exception:
+ continue
+
+ return members
diff --git a/sphinx/ext/autodoc/mock.py b/sphinx/ext/autodoc/mock.py
index 2a6922a24..5638b501e 100644
--- a/sphinx/ext/autodoc/mock.py
+++ b/sphinx/ext/autodoc/mock.py
@@ -109,17 +109,26 @@ def mock(modnames: list[str]) ->Iterator[None]:
# mock modules are enabled here
...
"""
- pass
+ finder = MockFinder(modnames)
+ sys.meta_path.insert(0, finder)
+ try:
+ yield
+ finally:
+ sys.meta_path.remove(finder)
+ for modname in finder.mocked_modules:
+ sys.modules.pop(modname, None)
def ismockmodule(subject: Any) ->TypeIs[_MockModule]:
"""Check if the object is a mocked module."""
- pass
+ return isinstance(subject, _MockModule)
def ismock(subject: Any) ->bool:
"""Check if the object is mocked."""
- pass
+ return (ismockmodule(subject) or
+ (isinstance(subject, type) and issubclass(subject, _MockObject)) or
+ isinstance(subject, _MockObject))
def undecorate(subject: _MockObject) ->Any:
@@ -127,4 +136,6 @@ def undecorate(subject: _MockObject) ->Any:
If not decorated, returns given *subject* itself.
"""
- pass
+ if ismock(subject) and subject.__sphinx_decorator_args__:
+ return subject.__sphinx_decorator_args__[0]
+ return subject
diff --git a/sphinx/ext/autodoc/preserve_defaults.py b/sphinx/ext/autodoc/preserve_defaults.py
index fc27cde55..01feacea3 100644
--- a/sphinx/ext/autodoc/preserve_defaults.py
+++ b/sphinx/ext/autodoc/preserve_defaults.py
@@ -37,7 +37,12 @@ def get_function_def(obj: Any) ->(ast.FunctionDef | None):
This tries to parse original code for living object and returns
AST node for given *obj*.
"""
- pass
+ try:
+ source = inspect.getsource(obj)
+ module = ast.parse(source)
+ return next((node for node in ast.walk(module) if isinstance(node, ast.FunctionDef)), None)
+ except Exception:
+ return None
def _get_arguments(obj: Any, /) ->(ast.arguments | None):
@@ -46,9 +51,33 @@ def _get_arguments(obj: Any, /) ->(ast.arguments | None):
This tries to parse the original code for an object and returns
an 'ast.arguments' node.
"""
- pass
+ func_def = get_function_def(obj)
+ return func_def.args if func_def else None
def update_defvalue(app: Sphinx, obj: Any, bound_method: bool) ->None:
"""Update defvalue info of *obj* using type_comments."""
- pass
+ if not callable(obj):
+ return
+
+ try:
+ args = _get_arguments(obj)
+ if args is None:
+ return
+
+ signature = inspect.signature(obj)
+ parameters = list(signature.parameters.values())
+
+ if bound_method:
+ parameters = parameters[1:] # Skip 'self' parameter for bound methods
+
+ for param, arg in zip(parameters, args.args + args.kwonlyargs):
+ if param.default is not param.empty:
+ if isinstance(arg, ast.arg) and arg.annotation:
+ default_value = DefaultValue(ast_unparse(arg.annotation))
+ param._default = default_value
+
+ # Update the signature of the object
+ obj.__signature__ = signature.replace(parameters=parameters)
+ except Exception as exc:
+ logger.warning(__('Failed to update defvalue of %r: %s'), obj, exc)
diff --git a/sphinx/ext/autodoc/type_comment.py b/sphinx/ext/autodoc/type_comment.py
index 545663871..132a5b577 100644
--- a/sphinx/ext/autodoc/type_comment.py
+++ b/sphinx/ext/autodoc/type_comment.py
@@ -16,7 +16,7 @@ logger = logging.getLogger(__name__)
def not_suppressed(argtypes: Sequence[ast.expr]=()) ->bool:
"""Check given *argtypes* is suppressed type_comment or not."""
- pass
+ return len(argtypes) != 1 or not isinstance(argtypes[0], ast.Ellipsis)
def signature_from_ast(node: ast.FunctionDef, bound_method: bool,
@@ -25,7 +25,43 @@ def signature_from_ast(node: ast.FunctionDef, bound_method: bool,
:param bound_method: Specify *node* is a bound method or not
"""
- pass
+ args = []
+ defaults = []
+ kwonlyargs = []
+ kwonlydefaults = []
+ vararg = None
+ kwarg = None
+
+ def get_param(arg: ast.arg, default: Any = Parameter.empty) -> Parameter:
+ annotation = ast.unparse(arg.annotation) if arg.annotation else Parameter.empty
+ return Parameter(arg.arg, Parameter.POSITIONAL_OR_KEYWORD, default=default, annotation=annotation)
+
+ for i, arg in enumerate(node.args.args):
+ if i == 0 and bound_method:
+ continue
+ if i >= len(node.args.args) - len(node.args.defaults):
+ default = ast_unparse(node.args.defaults[i - (len(node.args.args) - len(node.args.defaults))])
+ args.append(get_param(arg, default))
+ else:
+ args.append(get_param(arg))
+
+ if node.args.vararg:
+ vararg = Parameter(node.args.vararg.arg, Parameter.VAR_POSITIONAL)
+
+ for i, arg in enumerate(node.args.kwonlyargs):
+ if i < len(node.args.kw_defaults) and node.args.kw_defaults[i] is not None:
+ default = ast_unparse(node.args.kw_defaults[i])
+ kwonlyargs.append(get_param(arg, default))
+ else:
+ kwonlyargs.append(get_param(arg))
+
+ if node.args.kwarg:
+ kwarg = Parameter(node.args.kwarg.arg, Parameter.VAR_KEYWORD)
+
+ return_annotation = ast.unparse(node.returns) if node.returns else Signature.empty
+
+ return Signature(args + kwonlyargs, return_annotation=return_annotation,
+ vararg=vararg, kwarg=kwarg)
def get_type_comment(obj: Any, bound_method: bool=False) ->(Signature | None):
@@ -34,7 +70,17 @@ def get_type_comment(obj: Any, bound_method: bool=False) ->(Signature | None):
This tries to parse original code for living object and returns
Signature for given *obj*.
"""
- pass
+ try:
+ source = getsource(obj)
+ tree = ast.parse(source)
+ for node in ast.walk(tree):
+ if isinstance(node, ast.FunctionDef) and node.name == obj.__name__:
+ if node.type_comment:
+ type_comment = ast.parse(f"def f{node.type_comment}: pass").body[0]
+ return signature_from_ast(type_comment, bound_method, type_comment)
+ except Exception as exc:
+ logger.debug(__('Failed to parse type_comment for %r: %s'), obj, exc)
+ return None
def update_annotations_using_type_comments(app: Sphinx, obj: Any,
diff --git a/sphinx/ext/autodoc/typehints.py b/sphinx/ext/autodoc/typehints.py
index 97ed01df9..021372d42 100644
--- a/sphinx/ext/autodoc/typehints.py
+++ b/sphinx/ext/autodoc/typehints.py
@@ -17,4 +17,25 @@ if TYPE_CHECKING:
def record_typehints(app: Sphinx, objtype: str, name: str, obj: Any,
options: Options, args: str, retann: str) ->None:
"""Record type hints to env object."""
- pass
+ if app.config.autodoc_typehints != 'none':
+ # Get the environment
+ env = app.env
+ if not hasattr(env, 'autodoc_typehints'):
+ env.autodoc_typehints = {}
+
+ # Record the type hints
+ full_name = f"{objtype}.{name}"
+ env.autodoc_typehints[full_name] = {
+ 'args': args,
+ 'return': retann
+ }
+
+ # If it's a method, also record the class name
+ if objtype == 'method':
+ class_name = name.rsplit('.', 1)[0]
+ if class_name not in env.autodoc_typehints:
+ env.autodoc_typehints[class_name] = {}
+ env.autodoc_typehints[class_name][name.rsplit('.', 1)[1]] = {
+ 'args': args,
+ 'return': retann
+ }
diff --git a/sphinx/ext/autosummary/generate.py b/sphinx/ext/autosummary/generate.py
index eb7b16602..dbabb15a2 100644
--- a/sphinx/ext/autosummary/generate.py
+++ b/sphinx/ext/autosummary/generate.py
@@ -93,7 +93,12 @@ class AutosummaryRenderer:
def render(self, template_name: str, context: dict[str, Any]) ->str:
"""Render a template file."""
- pass
+ try:
+ template = self.env.get_template(template_name)
+ return template.render(context)
+ except TemplateNotFound:
+ logger.warning(__('template not found: %s'), template_name)
+ return ''
def _split_full_qualified_name(name: str) ->tuple[str | None, str]:
@@ -110,7 +115,15 @@ def _split_full_qualified_name(name: str) ->tuple[str | None, str]:
Therefore you need to mock 3rd party modules if needed before
calling this function.
"""
- pass
+ parts = name.rsplit('.', 1)
+ if len(parts) == 2:
+ modname, qualname = parts
+ try:
+ import_module(modname)
+ return modname, qualname
+ except ImportError:
+ pass
+ return None, name
class ModuleScanner:
@@ -125,12 +138,28 @@ def members_of(obj: Any, conf: Config) ->Sequence[str]:
Follows the ``conf.autosummary_ignore_module_all`` setting.
"""
- pass
+ if conf.autosummary_ignore_module_all:
+ return getall(obj)
+ elif hasattr(obj, '__all__'):
+ return obj.__all__
+ else:
+ return getall(obj)
def _get_module_attrs(name: str, members: Any) ->tuple[list[str], list[str]]:
"""Find module attributes with docstrings."""
- pass
+ attrs, public = [], []
+ try:
+ analyzer = ModuleAnalyzer.for_module(name)
+ attr_docs = analyzer.find_attr_docs()
+ for namespace, attr_name in attr_docs:
+ if namespace == '' and attr_name in members:
+ attrs.append(attr_name)
+ if not attr_name.startswith('_'):
+ public.append(attr_name)
+ except PycodeError:
+ pass
+ return attrs, public
def generate_autosummary_docs(sources: list[str], output_dir: (str | os.
@@ -141,7 +170,78 @@ def generate_autosummary_docs(sources: list[str], output_dir: (str | os.
:returns: list of generated files (both new and existing ones)
"""
- pass
+ if app:
+ translator = app.translator
+ else:
+ translator = sphinx.locale.init([sphinx.locale.__dir__], 'sphinx')
+ app = DummyApplication(translator)
+
+ showed_sources = list(sorted(sources))
+ if len(showed_sources) > 20:
+ showed_sources = showed_sources[:10] + ['...'] + showed_sources[-10:]
+ logger.info(__('[autosummary] generating autosummary for: %s') %
+ ', '.join(showed_sources))
+
+ if output_dir:
+ output_dir = Path(output_dir).resolve()
+ ensuredir(output_dir)
+
+ if base_path is not None:
+ base_path = Path(base_path).resolve()
+
+ generated_files = []
+
+ for source in sources:
+ package_dir = None
+ if base_path:
+ source_file = os.path.join(base_path, source)
+ # Find the package directory, using the source filename
+ package_dir = Path(source_file).resolve().parent
+
+ logger.info(__('[autosummary] reading files...'))
+ documented = find_autosummary_in_files(sources)
+
+ logger.info(__('[autosummary] generating...'))
+ for entry in documented:
+ try:
+ obj, name = import_by_name(entry.name, package_dir=package_dir)
+ except ImportExceptionGroup as exc:
+ logger.warning(__('[autosummary] failed to import %r: %s') % (entry.name, exc))
+ continue
+
+ fn = os.path.join(output_dir, name + suffix)
+ if os.path.isfile(fn):
+ with open(fn, encoding=encoding) as f:
+ generated = f.read()
+ if generated.strip() == entry.name.strip():
+ continue
+
+ logger.info(__('[autosummary] generating %s') % fn)
+ with open(fn, 'w', encoding=encoding) as f:
+ try:
+ import_members = imported_members or entry.recursive
+ renderer = AutosummaryRenderer(app)
+ context = app.config.autosummary_context.copy()
+ context.update({
+ 'name': name,
+ 'obj': obj,
+ 'members': members_of(obj, app.config),
+ 'imported_members': import_members,
+ 'autosummary': True,
+ 'module': sys.modules.get(obj.__module__),
+ 'app': app,
+ 'doc': pydoc.getdoc(obj),
+ 'fullname': name,
+ })
+ rendered = renderer.render(entry.template, context)
+ f.write(rendered)
+ except Exception as e:
+ logger.warning(__('[autosummary] failed to generate %s') % fn)
+ logger.warning(str(e))
+
+ generated_files.append(Path(fn).resolve())
+
+ return generated_files
def find_autosummary_in_files(filenames: list[str]) ->list[AutosummaryEntry]:
@@ -149,7 +249,12 @@ def find_autosummary_in_files(filenames: list[str]) ->list[AutosummaryEntry]:
See `find_autosummary_in_lines`.
"""
- pass
+ documented = []
+ for filename in filenames:
+ with open(filename, encoding='utf-8', errors='ignore') as f:
+ lines = f.read().splitlines()
+ documented.extend(find_autosummary_in_lines(lines, filename=filename))
+ return documented
def find_autosummary_in_docstring(name: str, filename: (str | None)=None
@@ -158,7 +263,18 @@ def find_autosummary_in_docstring(name: str, filename: (str | None)=None
See `find_autosummary_in_lines`.
"""
- pass
+ try:
+ real_name, obj, parent, modname = import_by_name(name)
+ except ImportError:
+ logger.warning(__('[autosummary] failed to import %r') % name)
+ return []
+
+ doc = pydoc.getdoc(obj)
+ if not doc:
+ return []
+
+ lines = doc.splitlines()
+ return find_autosummary_in_lines(lines, module=name, filename=filename)
def find_autosummary_in_lines(lines: list[str], module: (str | None)=None,
@@ -173,7 +289,86 @@ def find_autosummary_in_lines(lines: list[str], module: (str | None)=None,
*template* ``None`` if the directive does not have the
corresponding options set.
"""
- pass
+ autosummary_re = re.compile(r'^(\s*)\.\.\s+autosummary::\s*')
+ automodule_re = re.compile(
+ r'^\s*\.\.\s+automodule::\s*([A-Za-z0-9_.]+)\s*$')
+ module_re = re.compile(
+ r'^\s*\.\.\s+(current)?module::\s*([a-zA-Z0-9_.]+)\s*$')
+ autosummary_item_re = re.compile(r'^\s+(~?[_a-zA-Z][a-zA-Z0-9_.]*)\s*.*?')
+ toctree_arg_re = re.compile(r'^\s+:toctree:\s*(.*?)\s*$')
+ template_arg_re = re.compile(r'^\s+:template:\s*(.*?)\s*$')
+ recursive_arg_re = re.compile(r'^\s+:recursive:\s*$')
+
+ documented = []
+
+ toctree = None
+ template = None
+ recursive = False
+ current_module = module
+ in_autosummary = False
+ base_indent = ""
+
+ for line in lines:
+ if in_autosummary:
+ m = toctree_arg_re.match(line)
+ if m:
+ toctree = m.group(1)
+ if filename:
+ toctree = os.path.join(os.path.dirname(filename),
+ toctree)
+ continue
+
+ m = template_arg_re.match(line)
+ if m:
+ template = m.group(1).strip()
+ continue
+
+ m = recursive_arg_re.match(line)
+ if m:
+ recursive = True
+ continue
+
+ if line.strip().startswith(':'):
+ continue # skip options
+
+ m = autosummary_item_re.match(line)
+ if m:
+ name = m.group(1).strip()
+ if name.startswith('~'):
+ name = name[1:]
+ if current_module and not name.startswith(current_module + '.'):
+ name = f"{current_module}.{name}"
+ documented.append(AutosummaryEntry(name, toctree, template, recursive))
+ continue
+
+ if not line.strip() or line.startswith(base_indent + " "):
+ continue
+
+ in_autosummary = False
+
+ m = autosummary_re.match(line)
+ if m:
+ in_autosummary = True
+ base_indent = m.group(1)
+ toctree = None
+ template = None
+ recursive = False
+ continue
+
+ m = automodule_re.match(line)
+ if m:
+ current_module = m.group(1).strip()
+ # recurse into the automodule docstring
+ documented.extend(find_autosummary_in_docstring(
+ current_module, filename=filename))
+ continue
+
+ m = module_re.match(line)
+ if m:
+ current_module = m.group(2)
+ continue
+
+ return documented
if __name__ == '__main__':
diff --git a/sphinx/ext/coverage.py b/sphinx/ext/coverage.py
index 9e50f8dea..eee1cd669 100644
--- a/sphinx/ext/coverage.py
+++ b/sphinx/ext/coverage.py
@@ -37,7 +37,23 @@ def _load_modules(mod_name: str, ignored_module_exps: Iterable[re.Pattern[str]]
:raises ImportError: If the module indicated by ``mod_name`` could not be
loaded.
"""
- pass
+ modules = {mod_name}
+ try:
+ mod = import_module(mod_name)
+ except ImportError:
+ raise
+
+ if hasattr(mod, '__path__'):
+ for _, name, ispkg in pkgutil.iter_modules(mod.__path__):
+ fullname = f"{mod_name}.{name}"
+ if any(exp.match(fullname) for exp in ignored_module_exps):
+ continue
+ if ispkg:
+ modules.update(_load_modules(fullname, ignored_module_exps))
+ else:
+ modules.add(fullname)
+
+ return modules
def _determine_py_coverage_modules(coverage_modules: Sequence[str],
@@ -58,7 +74,19 @@ def _determine_py_coverage_modules(coverage_modules: Sequence[str],
modules that are documented will be noted. This will therefore identify both
missing modules and missing objects, but it requires manual configuration.
"""
- pass
+ if coverage_modules:
+ modules_to_check = set()
+ for mod_name in coverage_modules:
+ modules_to_check.update(_load_modules(mod_name, ignored_module_exps))
+
+ # Add documented modules that are not in coverage_modules
+ for mod_name in seen_modules:
+ if mod_name not in modules_to_check:
+ py_undoc[mod_name] = {}
+ else:
+ modules_to_check = seen_modules
+
+ return sorted(modules_to_check)
class CoverageBuilder(Builder):
@@ -72,4 +100,26 @@ class CoverageBuilder(Builder):
def _write_py_statistics(self, op: TextIO) ->None:
"""Outputs the table of ``op``."""
- pass
+ total_objs = 0
+ total_undoc = 0
+ op.write('Undocumented Python objects\n')
+ op.write('===========================\n\n')
+
+ for mod_name in sorted(self.py_undoc):
+ undoc = self.py_undoc[mod_name]
+ if not undoc:
+ continue
+ op.write(mod_name + '\n')
+ op.write('-' * len(mod_name) + '\n')
+ for name, typ in sorted(undoc.items()):
+ op.write(f'* {name} ({typ})\n')
+ total_undoc += 1
+ op.write('\n')
+ total_objs += len(undoc)
+
+ op.write('\n')
+ op.write('Documented Python objects\n')
+ op.write('=========================\n')
+ op.write(f'Total found: {self.total_py}\n')
+ op.write(f'Undocumented: {total_undoc}\n')
+ op.write(f'Coverage: {100 * (self.total_py - total_undoc) / self.total_py:.0f}%\n')
diff --git a/sphinx/ext/doctest.py b/sphinx/ext/doctest.py
index 46e4ad53d..0ae25abef 100644
--- a/sphinx/ext/doctest.py
+++ b/sphinx/ext/doctest.py
@@ -46,7 +46,10 @@ def is_allowed_version(spec: str, version: str) ->bool:
>>> is_allowed_version('>3.2, <4.0', '3.3')
True
"""
- pass
+ try:
+ return Version(version) in SpecifierSet(spec)
+ except InvalidSpecifier:
+ return False
class TestDirective(SphinxDirective):
@@ -144,9 +147,21 @@ class DocTestBuilder(Builder):
"""Try to get the file which actually contains the doctest, not the
filename of the document it's included in.
"""
- pass
+ try:
+ filename = node.get('source')
+ if filename is None:
+ return self.env.doc2path(docname)
+ return path.normpath(path.join(self.env.srcdir, filename))
+ except AttributeError:
+ return self.env.doc2path(docname)
@staticmethod
def get_line_number(node: Node) ->(int | None):
"""Get the real line number or admit we don't know."""
- pass
+ lineno = node.line
+ if lineno is None:
+ # Some nodes don't have a line number, use the parent's line number
+ while lineno is None and node.parent:
+ node = node.parent
+ lineno = node.line
+ return lineno
diff --git a/sphinx/ext/duration.py b/sphinx/ext/duration.py
index 9080b706a..8b9ec36e6 100644
--- a/sphinx/ext/duration.py
+++ b/sphinx/ext/duration.py
@@ -29,19 +29,51 @@ def on_builder_inited(app: Sphinx) ->None:
This clears the results of the last build.
"""
- pass
+ app.env.domains['duration'] = DurationDomain(app.env)
+ app.env.domains['duration'].data['reading_durations'] = {}
def on_source_read(app: Sphinx, docname: str, content: list[str]) ->None:
"""Start to measure reading duration."""
- pass
+ app.env.temp_data['reading_start_time'] = time.time()
def on_doctree_read(app: Sphinx, doctree: nodes.document) ->None:
"""Record a reading duration."""
- pass
+ start_time = app.env.temp_data.get('reading_start_time')
+ if start_time is not None:
+ duration = time.time() - start_time
+ docname = app.env.docname
+ app.env.domains['duration'].data['reading_durations'][docname] = duration
def on_build_finished(app: Sphinx, error: Exception) ->None:
"""Display duration ranking on the current build."""
- pass
+ if error:
+ return
+
+ durations = app.env.domains['duration'].data['reading_durations']
+ if not durations:
+ logger.info(__('No reading duration data available.'))
+ return
+
+ sorted_durations = sorted(durations.items(), key=itemgetter(1), reverse=True)
+ top_10 = list(islice(sorted_durations, 10))
+
+ logger.info(__('Top 10 slowest read documents:'))
+ for docname, duration in top_10:
+ logger.info(f'{docname}: {duration:.2f} seconds')
+
+
+def setup(app: Sphinx) -> dict[str, bool]:
+ app.add_domain(DurationDomain)
+ app.connect('builder-inited', on_builder_inited)
+ app.connect('source-read', on_source_read)
+ app.connect('doctree-read', on_doctree_read)
+ app.connect('build-finished', on_build_finished)
+
+ return {
+ 'version': sphinx.__display_version__,
+ 'parallel_read_safe': True,
+ 'parallel_write_safe': True,
+ }
diff --git a/sphinx/ext/extlinks.py b/sphinx/ext/extlinks.py
index beb4a5e4b..8210f26af 100644
--- a/sphinx/ext/extlinks.py
+++ b/sphinx/ext/extlinks.py
@@ -47,4 +47,22 @@ class ExternalLinksChecker(SphinxPostTransform):
If the URI in ``refnode`` has a replacement in ``extlinks``,
emit a warning with a replacement suggestion.
"""
- pass
+ uri = refnode.get('refuri', '')
+ if not uri:
+ return
+
+ for name, (base_url, caption) in self.app.config.extlinks.items():
+ if uri.startswith(base_url):
+ # Extract the part of the URI that matches the extlink pattern
+ suffix = uri[len(base_url):]
+ if suffix.endswith('.html'):
+ suffix = suffix[:-5] # Remove '.html' if present
+
+ # Construct the suggested replacement
+ if caption is None:
+ suggested_text = f':{name}:`{suffix}`'
+ else:
+ suggested_text = f':{name}:`{caption % suffix}`'
+
+ logger.warning(__('Extlink %r could be replaced by %s'), uri, suggested_text,
+ location=refnode)
diff --git a/sphinx/ext/githubpages.py b/sphinx/ext/githubpages.py
index 67d37a5bc..079983b91 100644
--- a/sphinx/ext/githubpages.py
+++ b/sphinx/ext/githubpages.py
@@ -13,7 +13,8 @@ if TYPE_CHECKING:
def _get_domain_from_url(url: str) ->str:
"""Get the domain from a URL."""
- pass
+ parsed_url = urllib.parse.urlparse(url)
+ return parsed_url.netloc
def create_nojekyll_and_cname(app: Sphinx, env: BuildEnvironment) ->None:
@@ -31,4 +32,26 @@ def create_nojekyll_and_cname(app: Sphinx, env: BuildEnvironment) ->None:
requires a CNAME file, we remove any existing ``CNAME`` files from the
output directory.
"""
- pass
+ if app.builder.format == 'html':
+ # Create .nojekyll file
+ nojekyll_path = os.path.join(app.outdir, '.nojekyll')
+ with contextlib.suppress(OSError):
+ open(nojekyll_path, 'wt').close()
+
+ # Handle CNAME file
+ cname_path = os.path.join(app.outdir, 'CNAME')
+ html_baseurl = app.config.html_baseurl
+
+ if html_baseurl:
+ domain = _get_domain_from_url(html_baseurl)
+ if domain and not domain.endswith('github.io'):
+ with open(cname_path, 'wt') as f:
+ f.write(domain)
+ else:
+ # Remove CNAME if it exists and is not needed
+ with contextlib.suppress(FileNotFoundError):
+ os.remove(cname_path)
+ else:
+ # Remove CNAME if html_baseurl is not set
+ with contextlib.suppress(FileNotFoundError):
+ os.remove(cname_path)
diff --git a/sphinx/ext/graphviz.py b/sphinx/ext/graphviz.py
index 5333534bf..7c79429b3 100644
--- a/sphinx/ext/graphviz.py
+++ b/sphinx/ext/graphviz.py
@@ -55,7 +55,14 @@ class ClickableMapDefinition:
If not exists, this only returns empty string.
"""
- pass
+ if not self.clickable:
+ return ''
+
+ map_tag = f'<map id="{self.id}">\n'
+ for item in self.clickable:
+ map_tag += item + '\n'
+ map_tag += '</map>'
+ return map_tag
class graphviz(nodes.General, nodes.Inline, nodes.Element):
@@ -93,11 +100,72 @@ class GraphvizSimple(SphinxDirective):
def fix_svg_relative_paths(self: (HTML5Translator | LaTeXTranslator |
TexinfoTranslator), filepath: str) ->None:
"""Change relative links in generated svg files to be relative to imgpath."""
- pass
+ tree = ET.parse(filepath)
+ root = tree.getroot()
+
+ imgpath = self.builder.imgpath
+ if imgpath:
+ for element in root.iter():
+ if 'href' in element.attrib:
+ href = element.attrib['href']
+ if not urlsplit(href).scheme:
+ new_href = posixpath.join(imgpath, href)
+ element.attrib['href'] = new_href
+
+ tree.write(filepath)
def render_dot(self: (HTML5Translator | LaTeXTranslator | TexinfoTranslator
), code: str, options: dict, format: str, prefix: str='graphviz',
filename: (str | None)=None) ->tuple[str | None, str | None]:
"""Render graphviz code into a PNG or PDF output file."""
- pass
+ if filename is None:
+ fname = f"{sha1(code.encode()).hexdigest()}.{format}"
+ else:
+ fname = filename
+
+ if hasattr(self.builder, 'imgpath'):
+ outfn = path.join(self.builder.outdir, self.builder.imgpath, fname)
+ else:
+ outfn = path.join(self.builder.outdir, fname)
+
+ if path.isfile(outfn):
+ return fname, outfn
+
+ ensuredir(path.dirname(outfn))
+
+ dot_args = [options.get('graphviz_dot', 'dot')]
+ dot_args.extend(options.get('graphviz_dot_args', []))
+ dot_args.extend(['-T' + format, '-o' + outfn])
+
+ if format == 'png':
+ dot_args.extend(['-Tcmapx'])
+
+ try:
+ p = subprocess.Popen(dot_args, stdout=subprocess.PIPE, stdin=subprocess.PIPE,
+ stderr=subprocess.PIPE, universal_newlines=True)
+ except OSError as err:
+ if err.errno != 2: # No such file or directory
+ raise
+ logger.warning(__('dot command %r cannot be run (needed for graphviz '
+ 'output), check the graphviz_dot setting'), dot_args[0])
+ return None, None
+
+ try:
+ # Graphviz may close standard input when an error occurs,
+ # resulting in a broken pipe on communicate()
+ stdout, stderr = p.communicate(code)
+ except OSError as err:
+ if err.errno != 32: # Broken pipe
+ raise
+ # in this case, read the standard output and standard error streams
+ # directly, to get the error message(s)
+ stdout, stderr = p.stdout.read(), p.stderr.read()
+ p.wait()
+ if p.returncode != 0:
+ raise GraphvizError('dot exited with error:\n[stderr]\n%s\n'
+ '[stdout]\n%s' % (stderr, stdout))
+ if not path.isfile(outfn):
+ raise GraphvizError('dot did not produce an output file:\n[stderr]\n%s\n'
+ '[stdout]\n%s' % (stderr, stdout))
+ return fname, outfn
diff --git a/sphinx/ext/ifconfig.py b/sphinx/ext/ifconfig.py
index 97b3f6272..d41661ba5 100644
--- a/sphinx/ext/ifconfig.py
+++ b/sphinx/ext/ifconfig.py
@@ -34,3 +34,18 @@ class IfConfig(SphinxDirective):
optional_arguments = 0
final_argument_whitespace = True
option_spec: ClassVar[OptionSpec] = {}
+
+ def run(self) -> list[Node]:
+ node = ifconfig()
+ node.document = self.state.document
+ self.set_source_info(node)
+ node['expr'] = self.arguments[0]
+
+ env = self.state.document.settings.env
+ if self.content:
+ self.state.nested_parse(self.content, self.content_offset, node)
+ if isinstance(node[0], nodes.paragraph):
+ content = node[0].children
+ node[0].replace_self(content)
+
+ return [node]
diff --git a/sphinx/ext/imgconverter.py b/sphinx/ext/imgconverter.py
index 9c69dd116..71a2d7829 100644
--- a/sphinx/ext/imgconverter.py
+++ b/sphinx/ext/imgconverter.py
@@ -22,8 +22,17 @@ class ImagemagickConverter(ImageConverter):
def is_available(self) ->bool:
"""Confirms the converter is available or not."""
- pass
+ try:
+ subprocess.run(['convert', '-version'], check=True, capture_output=True)
+ return True
+ except (CalledProcessError, FileNotFoundError):
+ return False
def convert(self, _from: str, _to: str) ->bool:
"""Converts the image to expected one."""
- pass
+ try:
+ subprocess.run(['convert', _from, _to], check=True, capture_output=True)
+ return True
+ except CalledProcessError:
+ logger.warning(__('imagemagick convert file "%s" to "%s" failed.'), _from, _to)
+ return False
diff --git a/sphinx/ext/imgmath.py b/sphinx/ext/imgmath.py
index 666d084f5..b17229cad 100644
--- a/sphinx/ext/imgmath.py
+++ b/sphinx/ext/imgmath.py
@@ -58,19 +58,39 @@ depthsvgcomment_re = re.compile('<!-- DEPTH=(-?\\d+) -->')
def read_svg_depth(filename: str) ->(int | None):
"""Read the depth from comment at last line of SVG file
"""
- pass
+ with open(filename, 'r') as f:
+ for line in f:
+ pass # Read until the last line
+ last_line = line.strip()
+
+ match = depthsvgcomment_re.match(last_line)
+ if match:
+ return int(match.group(1))
+ return None
def write_svg_depth(filename: str, depth: int) ->None:
"""Write the depth to SVG file as a comment at end of file
"""
- pass
+ with open(filename, 'a') as f:
+ f.write(f'\n<!-- DEPTH={depth} -->\n')
def generate_latex_macro(image_format: str, math: str, config: Config,
confdir: (str | os.PathLike[str])='') ->str:
"""Generate LaTeX macro."""
- pass
+ latex = LaTeXRenderer(config.imgmath_latex_preamble, config.imgmath_latex_closing).render(
+ config.imgmath_latex_template,
+ image_format=image_format,
+ math=math,
+ preamble=config.imgmath_use_preview and '\\usepackage[active,tightpage,displaymath]{preview}' or '',
+ fontsize=config.imgmath_font_size,
+ baselineskip=config.imgmath_latex_baselineskip,
+ fontset=config.imgmath_latex_fontset,
+ fontspec=config.imgmath_latex_fontspec,
+ commands=config.imgmath_latex_commands,
+ )
+ return latex
def ensure_tempdir(builder: Builder) ->str:
@@ -80,29 +100,66 @@ def ensure_tempdir(builder: Builder) ->str:
than using temporary files, since we can clean up everything at once
just removing the whole directory (see cleanup_tempdir)
"""
- pass
+ if not hasattr(builder, '_imgmath_tempdir'):
+ builder._imgmath_tempdir = tempfile.mkdtemp()
+ return builder._imgmath_tempdir
def compile_math(latex: str, builder: Builder) ->str:
"""Compile LaTeX macros for math to DVI."""
- pass
+ tempdir = ensure_tempdir(builder)
+ filename = os.path.join(tempdir, 'math.tex')
+
+ with open(filename, 'w', encoding='utf-8') as f:
+ f.write(latex)
+
+ cwd = os.getcwd()
+ os.chdir(tempdir)
+
+ try:
+ cmd = [builder.config.imgmath_latex, '--interaction=nonstopmode', 'math.tex']
+ subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ except CalledProcessError as exc:
+ raise MathExtError('latex exited with error', exc.stderr, exc.stdout)
+ finally:
+ os.chdir(cwd)
+
+ return os.path.join(tempdir, 'math.dvi')
def convert_dvi_to_image(command: list[str], name: str) ->tuple[str, str]:
"""Convert DVI file to specific image format."""
- pass
+ try:
+ result = subprocess.run(command, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ return (name, '')
+ except CalledProcessError as exc:
+ raise MathExtError('dvipng/dvisvgm exited with error', exc.stderr, exc.stdout)
def convert_dvi_to_png(dvipath: str, builder: Builder, out_path: str) ->(int |
None):
"""Convert DVI file to PNG image."""
- pass
+ cmd = [builder.config.imgmath_dvipng, '-o', out_path, '-T', 'tight', '-z9']
+ cmd.extend(builder.config.imgmath_dvipng_args)
+ cmd.append(dvipath)
+
+ convert_dvi_to_image(cmd, out_path)
+
+ depth = read_png_depth(out_path)
+ return depth
def convert_dvi_to_svg(dvipath: str, builder: Builder, out_path: str) ->(int |
None):
"""Convert DVI file to SVG image."""
- pass
+ cmd = [builder.config.imgmath_dvisvgm, '-o', out_path]
+ cmd.extend(builder.config.imgmath_dvisvgm_args)
+ cmd.append(dvipath)
+
+ convert_dvi_to_image(cmd, out_path)
+
+ depth = read_svg_depth(out_path)
+ return depth
def render_math(self: HTML5Translator, math: str) ->tuple[str | None, int |
@@ -120,4 +177,32 @@ def render_math(self: HTML5Translator, math: str) ->tuple[str | None, int |
docs successfully). If the programs are there, however, they may not fail
since that indicates a problem in the math source.
"""
- pass
+ builder = self.builder
+ config = builder.config
+
+ if config.imgmath_image_format not in SUPPORT_FORMAT:
+ raise MathExtError(f'imgmath_image_format must be either "png" or "svg", not "{config.imgmath_image_format}"')
+
+ latex = generate_latex_macro(config.imgmath_image_format, math, config, builder.confdir)
+
+ name = f"math-{sha1(latex.encode()).hexdigest()}.{config.imgmath_image_format}"
+ imgpath = path.join(builder.outdir, builder.imagedir, 'math', name)
+ if path.isfile(imgpath):
+ depth = read_png_depth(imgpath) if config.imgmath_image_format == 'png' else read_svg_depth(imgpath)
+ return imgpath, depth
+
+ ensuredir(path.dirname(imgpath))
+
+ # Build latex command; old versions of latex don't have the
+ # --output-directory option, so we have to manually chdir to the
+ # temp dir to run it.
+ dvipath = compile_math(latex, builder)
+
+ if config.imgmath_image_format == 'png':
+ depth = convert_dvi_to_png(dvipath, builder, imgpath)
+ elif config.imgmath_image_format == 'svg':
+ depth = convert_dvi_to_svg(dvipath, builder, imgpath)
+ else:
+ raise MathExtError(f'imgmath_image_format must be either "png" or "svg", not "{config.imgmath_image_format}"')
+
+ return imgpath, depth
diff --git a/sphinx/ext/inheritance_diagram.py b/sphinx/ext/inheritance_diagram.py
index 2bd75f19c..fca683f55 100644
--- a/sphinx/ext/inheritance_diagram.py
+++ b/sphinx/ext/inheritance_diagram.py
@@ -65,12 +65,33 @@ def try_import(objname: str) ->Any:
Returns imported object or module. If failed, returns None value.
"""
- pass
+ try:
+ return import_module(objname)
+ except ImportError:
+ try:
+ module_name, class_name = objname.rsplit('.', 1)
+ module = import_module(module_name)
+ return getattr(module, class_name)
+ except (ImportError, AttributeError, ValueError):
+ return None
def import_classes(name: str, currmodule: str) ->Any:
"""Import a class using its fully-qualified *name*."""
- pass
+ if name.startswith('.'):
+ name = currmodule + name
+
+ obj = try_import(name)
+ if obj is None:
+ raise ImportError(f'Could not import class or module {name}')
+
+ if inspect.isclass(obj):
+ return [obj]
+ elif inspect.ismodule(obj):
+ return [cls for name, cls in inspect.getmembers(obj, inspect.isclass)
+ if cls.__module__ == obj.__name__]
+ else:
+ raise ImportError(f'{name} is neither a class nor a module')
class InheritanceException(Exception):
@@ -101,10 +122,12 @@ class InheritanceGraph:
msg = 'No classes found for inheritance diagram'
raise InheritanceException(msg)
- def _import_classes(self, class_names: list[str], currmodule: str) ->list[
- Any]:
+ def _import_classes(self, class_names: list[str], currmodule: str) ->list[Any]:
"""Import a list of classes."""
- pass
+ classes = []
+ for name in class_names:
+ classes.extend(import_classes(name, currmodule))
+ return classes
def _class_info(self, classes: list[Any], show_builtins: bool,
private_bases: bool, parts: int, aliases: (dict[str, str] | None),
@@ -124,7 +147,34 @@ class InheritanceGraph:
*top_classes* gives the name(s) of the top most ancestor class to
traverse to. Multiple names can be specified separated by comma.
"""
- pass
+ all_classes = {}
+ top_classes = [cls for cls in top_classes if isinstance(cls, type)]
+
+ def recurse(cls):
+ if cls in all_classes:
+ return
+ if not show_builtins and cls in py_builtins:
+ return
+ if not private_bases and cls.__name__.startswith('_'):
+ return
+ if top_classes and cls in top_classes:
+ return
+
+ name = self.class_name(cls, parts, aliases)
+ if cls.__module__ == '__builtin__' or cls.__module__ == 'builtins':
+ module = 'builtins'
+ else:
+ module = cls.__module__
+ bases = [base for base in cls.__bases__ if show_builtins or base not in py_builtins]
+ all_classes[cls] = (name, module, bases, cls.__module__)
+
+ for base in bases:
+ recurse(base)
+
+ for cls in classes:
+ recurse(cls)
+
+ return list(all_classes.values())
def class_name(self, cls: Any, parts: int=0, aliases: (dict[str, str] |
None)=None) ->str:
@@ -133,11 +183,27 @@ class InheritanceGraph:
This works for things I've tested in matplotlib so far, but may not be
completely general.
"""
- pass
+ module = cls.__module__
+ name = cls.__qualname__
+ if module == 'builtins':
+ return name
+
+ if aliases is not None and name in aliases:
+ return aliases[name]
+
+ full_name = f"{module}.{name}"
+ if parts == 0:
+ return full_name
+
+ name_parts = full_name.split('.')
+ if parts > 0:
+ return '.'.join(name_parts[-parts:])
+ else:
+ return '.'.join(name_parts[abs(parts):])
def get_all_class_names(self) ->list[str]:
"""Get all of the class names involved in the graph."""
- pass
+ return [name for (name, _, _, _) in self.class_info]
default_graph_attrs = {'rankdir': 'LR', 'size': '"8.0, 12.0"',
'bgcolor': 'transparent'}
default_node_attrs = {'shape': 'box', 'fontsize': 10, 'height': 0.25,
@@ -160,7 +226,37 @@ class InheritanceGraph:
*graph_attrs*, *node_attrs*, *edge_attrs* are dictionaries containing
key/value pairs to pass on as graphviz properties.
"""
- pass
+ g_attrs = self.default_graph_attrs.copy()
+ n_attrs = self.default_node_attrs.copy()
+ e_attrs = self.default_edge_attrs.copy()
+ if graph_attrs:
+ g_attrs.update(graph_attrs)
+ if node_attrs:
+ n_attrs.update(node_attrs)
+ if edge_attrs:
+ e_attrs.update(edge_attrs)
+
+ res = []
+ res.append(f'digraph {name} {{')
+ res.append(' rankdir = "LR"')
+ res.append(' bgcolor = "transparent"')
+
+ for name, _, bases, module in self.class_info:
+ # Write the node
+ this_node_attrs = n_attrs.copy()
+ url = urls and urls.get(name)
+ if url is not None:
+ this_node_attrs['URL'] = f'"{url}"'
+ this_node_attrs['target'] = '"_top"'
+ node = ' %s [%s]' % (name, ','.join('%s=%s' % x for x in this_node_attrs.items()))
+ res.append(node)
+
+ # Write the edges
+ for base_name in bases:
+ res.append(f' {base_name} -> {name} [arrowhead="none", arrowtail="normal"]')
+
+ res.append('}')
+ return '\n'.join(res)
class inheritance_diagram(graphviz):
diff --git a/sphinx/ext/intersphinx/_cli.py b/sphinx/ext/intersphinx/_cli.py
index 65410871f..c07f6f4ac 100644
--- a/sphinx/ext/intersphinx/_cli.py
+++ b/sphinx/ext/intersphinx/_cli.py
@@ -6,4 +6,18 @@ from sphinx.ext.intersphinx._load import _fetch_inventory
def inspect_main(argv: list[str], /) ->int:
"""Debug functionality to print out an inventory"""
- pass
+ if len(argv) < 2:
+ print("Usage: python -m sphinx.ext.intersphinx <URI>", file=sys.stderr)
+ return 1
+
+ uri = argv[1]
+ try:
+ inventory = _fetch_inventory(None, uri, uri)
+ for type_name, items in inventory.items():
+ print(f"{type_name}:")
+ for name, (project, version, location, _) in items.items():
+ print(f" {name:<30} {project:<20} {version:<10} {location}")
+ return 0
+ except Exception as e:
+ print(f"Error fetching inventory: {e}", file=sys.stderr)
+ return 1
diff --git a/sphinx/ext/intersphinx/_load.py b/sphinx/ext/intersphinx/_load.py
index 021469d71..22fb9aa7e 100644
--- a/sphinx/ext/intersphinx/_load.py
+++ b/sphinx/ext/intersphinx/_load.py
@@ -35,7 +35,31 @@ def validate_intersphinx_mapping(app: Sphinx, config: Config) ->None:
* The second element of each value pair (inventory locations)
is a tuple of non-empty strings or None.
"""
- pass
+ intersphinx_mapping = config.intersphinx_mapping
+ if not isinstance(intersphinx_mapping, dict):
+ raise ConfigError('intersphinx_mapping must be a dictionary')
+
+ for key, value in list(intersphinx_mapping.items()):
+ if not isinstance(key, str) or not key:
+ raise ConfigError('intersphinx_mapping key must be a non-empty string')
+
+ if not isinstance(value, (list, tuple)) or len(value) != 2:
+ raise ConfigError('intersphinx_mapping value must be a two-element list or tuple')
+
+ target_uri, inventory_locations = value
+ if not isinstance(target_uri, str) or not target_uri:
+ raise ConfigError('first element of intersphinx_mapping value must be a non-empty string')
+
+ if inventory_locations is not None:
+ if not isinstance(inventory_locations, (list, tuple)):
+ inventory_locations = (inventory_locations,)
+ for loc in inventory_locations:
+ if not isinstance(loc, str) or not loc:
+ raise ConfigError('second element of intersphinx_mapping value must be None or a tuple of non-empty strings')
+
+ intersphinx_mapping[key] = (target_uri, inventory_locations)
+
+ config.intersphinx_mapping = intersphinx_mapping
def load_mappings(app: Sphinx) ->None:
@@ -43,18 +67,59 @@ def load_mappings(app: Sphinx) ->None:
The intersphinx mappings are expected to be normalized.
"""
- pass
+ env = app.env
+ config = app.config
+
+ if not hasattr(env, 'intersphinx_projects'):
+ env.intersphinx_projects = {}
+ if not hasattr(env, 'intersphinx_cache'):
+ env.intersphinx_cache = {}
+
+ for name, (uri, locations) in config.intersphinx_mapping.items():
+ env.intersphinx_projects[name] = _IntersphinxProject(
+ name=name,
+ uri=uri,
+ locations=locations or (None,),
+ inventory=None,
+ )
def fetch_inventory(app: Sphinx, uri: InventoryURI, inv: str) ->Inventory:
"""Fetch, parse and return an intersphinx inventory file."""
- pass
+ config = app.config
+ srcdir = app.srcdir
+
+ try:
+ inventory = _fetch_inventory(
+ target_uri=uri,
+ inv_location=inv,
+ config=config,
+ srcdir=srcdir,
+ )
+ except Exception as exc:
+ LOGGER.warning(__('intersphinx inventory %r not fetchable due to %s: %s'),
+ inv, type(exc).__name__, exc)
+ return {}
+
+ return inventory
def _fetch_inventory(*, target_uri: InventoryURI, inv_location: str, config:
Config, srcdir: Path) ->Inventory:
"""Fetch, parse and return an intersphinx inventory file."""
- pass
+ if inv_location is None:
+ inv_location = posixpath.join(target_uri, INVENTORY_FILENAME)
+
+ if inv_location.startswith(('http:', 'https:')):
+ f = _read_from_url(inv_location, config=config)
+ else:
+ f = path.join(srcdir, inv_location)
+
+ try:
+ with InventoryFile.load(f, uri=target_uri) as invdata:
+ return invdata.load()
+ except Exception as exc:
+ raise ValueError(__('Failed to read inventory file %r: %s') % (inv_location, exc))
def _get_safe_url(url: str) ->str:
@@ -69,7 +134,13 @@ def _get_safe_url(url: str) ->str:
:return: *url* with password removed
:rtype: ``str``
"""
- pass
+ parts = urlsplit(url)
+ if parts.username:
+ netloc = f'{parts.username}@{parts.hostname}'
+ if parts.port:
+ netloc += f':{parts.port}'
+ return urlunsplit((parts.scheme, netloc, parts.path, parts.query, parts.fragment))
+ return url
def _strip_basic_auth(url: str) ->str:
@@ -86,7 +157,13 @@ def _strip_basic_auth(url: str) ->str:
:return: *url* with any basic auth creds removed
:rtype: ``str``
"""
- pass
+ parts = urlsplit(url)
+ if parts.username or parts.password:
+ netloc = parts.hostname
+ if parts.port:
+ netloc += f':{parts.port}'
+ return urlunsplit((parts.scheme, netloc, parts.path, parts.query, parts.fragment))
+ return url
def _read_from_url(url: str, *, config: Config) ->IO:
@@ -105,4 +182,9 @@ def _read_from_url(url: str, *, config: Config) ->IO:
:return: data read from resource described by *url*
:rtype: ``file``-like object
"""
- pass
+ try:
+ with requests.get(url, stream=True, config=config, timeout=config.intersphinx_timeout) as r:
+ r.raise_for_status()
+ return r.raw
+ except requests.exceptions.RequestException as exc:
+ raise ValueError(__('Could not fetch remote inventory %r: %s') % (url, exc))
diff --git a/sphinx/ext/intersphinx/_resolve.py b/sphinx/ext/intersphinx/_resolve.py
index 37f1281fc..d7e9ea944 100644
--- a/sphinx/ext/intersphinx/_resolve.py
+++ b/sphinx/ext/intersphinx/_resolve.py
@@ -34,7 +34,15 @@ def resolve_reference_in_inventory(env: BuildEnvironment, inv_name:
Requires ``inventory_exists(env, inv_name)``.
"""
- pass
+ inventory = InventoryAdapter(env).load(inv_name)
+ reftarget = node['reftarget']
+ if reftarget in inventory:
+ newnode = nodes.reference('', '', internal=False)
+ newnode['refuri'] = inventory[reftarget][2]
+ newnode['reftitle'] = inventory[reftarget][3]
+ newnode.append(contnode)
+ return newnode
+ return None
def resolve_reference_any_inventory(env: BuildEnvironment,
@@ -44,7 +52,19 @@ def resolve_reference_any_inventory(env: BuildEnvironment,
Resolution is tried with the target as is in any inventory.
"""
- pass
+ reftarget = node['reftarget']
+ inventory_adapter = InventoryAdapter(env)
+ for inv_name in inventory_adapter.named_inventories:
+ if honor_disabled_refs and inv_name in env.config.intersphinx_disabled_reftypes:
+ continue
+ inventory = inventory_adapter.load(inv_name)
+ if reftarget in inventory:
+ newnode = nodes.reference('', '', internal=False)
+ newnode['refuri'] = inventory[reftarget][2]
+ newnode['reftitle'] = inventory[reftarget][3]
+ newnode.append(contnode)
+ return newnode
+ return None
def resolve_reference_detect_inventory(env: BuildEnvironment, node:
@@ -56,13 +76,30 @@ def resolve_reference_detect_inventory(env: BuildEnvironment, node:
to form ``inv_name:newtarget``. If ``inv_name`` is a named inventory, then resolution
is tried in that inventory with the new target.
"""
- pass
+ reftarget = node['reftarget']
+ result = resolve_reference_any_inventory(env, True, node, contnode)
+ if result:
+ return result
+
+ if ':' in reftarget:
+ inv_name, newtarget = reftarget.split(':', 1)
+ inventory_adapter = InventoryAdapter(env)
+ if inv_name in inventory_adapter.named_inventories:
+ node = node.copy()
+ node['reftarget'] = newtarget
+ return resolve_reference_in_inventory(env, inv_name, node, contnode)
+ return None
def missing_reference(app: Sphinx, env: BuildEnvironment, node:
pending_xref, contnode: TextElement) ->(nodes.reference | None):
"""Attempt to resolve a missing reference via intersphinx references."""
- pass
+ if node.get('refdomain'):
+ domain = env.get_domain(node['refdomain'])
+ if domain.name != 'std':
+ return None
+
+ return resolve_reference_detect_inventory(env, node, contnode)
class IntersphinxDispatcher(CustomReSTDispatcher):
@@ -90,7 +127,12 @@ class IntersphinxRole(SphinxRole):
- ``external:name`` -- any inventory and domain, explicit name.
- ``external:domain:name`` -- any inventory, explicit domain and name.
"""
- pass
+ match = self._re_inv_ref.match(name)
+ if match:
+ inv_name = match.group(2)
+ suffix = match.group(3)
+ return inv_name, suffix
+ return None, name
def _get_domain_role(self, name: str) ->tuple[str | None, str | None]:
"""Convert the *name* string into a domain and a role name.
@@ -99,12 +141,33 @@ class IntersphinxRole(SphinxRole):
- If *name* contains a single ``:``, the domain/role is split on this.
- If *name* contains multiple ``:``, return ``(None, None)``.
"""
- pass
+ parts = name.split(':')
+ if len(parts) == 1:
+ return None, name
+ elif len(parts) == 2:
+ return parts[0], parts[1]
+ else:
+ return None, None
def invoke_role(self, role: tuple[str, str]) ->tuple[list[Node], list[
system_message]]:
"""Invoke the role described by a ``(domain, role name)`` pair."""
- pass
+ domain_name, role_name = role
+ if domain_name:
+ domain = self.env.get_domain(domain_name)
+ role_fn = domain.roles.get(role_name)
+ else:
+ role_fn = self.env.roles.get(role_name)
+
+ if role_fn is None:
+ msg = self.inliner.reporter.error(
+ f'Unknown interpreted text role "{role_name}".',
+ line=self.lineno)
+ prb = self.inliner.problematic(self.rawtext, self.rawtext, msg)
+ return [prb], [msg]
+
+ return role_fn(self.name, self.rawtext, self.text, self.lineno,
+ self.inliner, self.options, self.content)
class IntersphinxRoleResolver(ReferencesResolver):
@@ -121,4 +184,7 @@ def install_dispatcher(app: Sphinx, docname: str, source: list[str]) ->None:
.. note:: The installed dispatcher will be uninstalled on disabling sphinx_domain
automatically.
"""
- pass
+ dispatcher = IntersphinxDispatcher()
+ app.add_rst_parser(dispatcher)
+ app.connect('source-read', lambda app, docname, source: dispatcher.enable())
+ app.connect('doctree-read', lambda app, doctree: dispatcher.disable())
diff --git a/sphinx/ext/intersphinx/_shared.py b/sphinx/ext/intersphinx/_shared.py
index 56a5986a5..06e77b0e3 100644
--- a/sphinx/ext/intersphinx/_shared.py
+++ b/sphinx/ext/intersphinx/_shared.py
@@ -90,4 +90,4 @@ class InventoryAdapter:
- Element two is a time value for cache invalidation, an integer.
- Element three is the loaded remote inventory of type :class:`!Inventory`.
"""
- pass
+ return self.env.intersphinx_cache
diff --git a/sphinx/ext/napoleon/docstring.py b/sphinx/ext/napoleon/docstring.py
index 9ee30cfc5..8d4e4b7da 100644
--- a/sphinx/ext/napoleon/docstring.py
+++ b/sphinx/ext/napoleon/docstring.py
@@ -49,13 +49,32 @@ class Deque(collections.deque):
Return the nth element of the stack, or ``self.sentinel`` if n is
greater than the stack size.
"""
- pass
+ try:
+ return self[n]
+ except IndexError:
+ return self.sentinel
def _convert_type_spec(_type: str, translations: (dict[str, str] | None)=None
) ->str:
"""Convert type specification to reference in reST."""
- pass
+ if translations is None:
+ translations = {}
+
+ # Remove any whitespace and split by '|' for union types
+ types = [t.strip() for t in _type.split('|')]
+
+ converted_types = []
+ for t in types:
+ # Check if the type is in the translations dictionary
+ if t in translations:
+ converted_types.append(f':py:class:`{translations[t]}`')
+ else:
+ # If not in translations, assume it's a valid Python type
+ converted_types.append(f':py:class:`{t}`')
+
+ # Join the converted types back together
+ return ' | '.join(converted_types)
class GoogleDocstring:
@@ -215,7 +234,7 @@ class GoogleDocstring:
The lines of the docstring in a list.
"""
- pass
+ return self._parsed_lines
class NumpyDocstring(GoogleDocstring):
@@ -330,4 +349,44 @@ class NumpyDocstring(GoogleDocstring):
func_name1, func_name2, :meth:`func_name`, func_name3
"""
- pass
+ result = []
+ current_func = None
+ current_desc = []
+
+ for line in content:
+ line = line.strip()
+ if ':' in line:
+ if current_func:
+ result.extend(self._format_see_also_item(current_func, current_desc))
+ current_func, desc = line.split(':', 1)
+ current_desc = [desc.strip()]
+ elif line:
+ if current_func:
+ current_desc.append(line)
+ else:
+ result.append(self._format_admonition('seealso', [line]))
+ else:
+ if current_func:
+ result.extend(self._format_see_also_item(current_func, current_desc))
+ current_func = None
+ current_desc = []
+
+ if current_func:
+ result.extend(self._format_see_also_item(current_func, current_desc))
+
+ return result
+
+ def _format_see_also_item(self, func: str, desc: list[str]) ->list[str]:
+ """Helper method to format individual See Also items."""
+ func_parts = func.split(',')
+ formatted_funcs = []
+ for part in func_parts:
+ part = part.strip()
+ if part.startswith(':'):
+ formatted_funcs.append(part)
+ else:
+ formatted_funcs.append(f':obj:`{part}`')
+
+ item = [', '.join(formatted_funcs)]
+ item.extend(' ' + line for line in desc)
+ return item
diff --git a/sphinx/ext/todo.py b/sphinx/ext/todo.py
index 423cd2039..496775285 100644
--- a/sphinx/ext/todo.py
+++ b/sphinx/ext/todo.py
@@ -48,6 +48,29 @@ class Todo(BaseAdmonition, SphinxDirective):
option_spec: ClassVar[OptionSpec] = {'class': directives.class_option,
'name': directives.unchanged}
+ def run(self) ->list[Node]:
+ if not self.config.todo_include_todos:
+ return []
+
+ content = self.content
+ todo_entry = todo_node(content, **self.options)
+ todo_entry += nodes.title(_('Todo'), _('Todo'))
+ self.set_source_info(todo_entry)
+
+ targetid = 'todo-%d' % self.env.new_serialno('todo')
+ targetnode = nodes.target('', '', ids=[targetid])
+ self.state.document.note_explicit_target(targetnode)
+
+ todo_entry['ids'].append(targetid)
+ self.env.get_domain('todo').add_todo(targetid, {
+ 'docname': self.env.docname,
+ 'lineno': self.lineno,
+ 'todo': todo_entry.deepcopy(),
+ 'target': targetnode,
+ })
+
+ return [targetnode, todo_entry]
+
class TodoDomain(Domain):
name = 'todo'
@@ -64,6 +87,10 @@ class TodoList(SphinxDirective):
final_argument_whitespace = False
option_spec: ClassVar[OptionSpec] = {}
+ def run(self) ->list[Node]:
+ # Simply create a todolist node which will be processed later
+ return [todolist('')]
+
class TodoListProcessor:
@@ -78,4 +105,48 @@ class TodoListProcessor:
def resolve_reference(self, todo: todo_node, docname: str) ->None:
"""Resolve references in the todo content."""
- pass
+ for node in todo.traverse(addnodes.pending_xref):
+ contnode = node[0].deepcopy()
+ newnode = None
+ try:
+ newnode = self.domain.resolve_xref(self.env, docname, self.builder,
+ node['reftype'], node['reftarget'],
+ node, contnode)
+ except NoUri:
+ pass
+ if newnode is None:
+ logger.warning(__('Failed to resolve %s: %s') % (node['reftype'], node['reftarget']),
+ location=node)
+ else:
+ node.replace_self(newnode)
+
+ def process(self, doctree: nodes.document, docname: str) ->None:
+ for node in doctree.traverse(todolist):
+ if not self.config.todo_include_todos:
+ node.parent.remove(node)
+ continue
+
+ content = []
+ for todo_info in self.domain.get_todos():
+ para = nodes.paragraph(classes=['todo-source'])
+ description = _('(The original entry is located in %s, line %d.)') % \
+ (todo_info['docname'], todo_info['lineno'])
+ para += nodes.Text(description)
+
+ ref = nodes.reference('', '')
+ ref['refdocname'] = todo_info['docname']
+ ref['refuri'] = self.builder.get_relative_uri(docname, todo_info['docname'])
+ ref['refuri'] += '#' + todo_info['target']['refid']
+ ref.append(nodes.Text(todo_info['docname']))
+
+ para += nodes.Text(' (')
+ para += ref
+ para += nodes.Text(')')
+
+ item = nodes.list_item('', para)
+ todo_entry = todo_info['todo']
+ self.resolve_reference(todo_entry, docname)
+ item += todo_entry
+ content.append(item)
+
+ node.replace_self(nodes.bullet_list('', *content))
diff --git a/sphinx/ext/viewcode.py b/sphinx/ext/viewcode.py
index 2fc49bef8..7fa4289fb 100644
--- a/sphinx/ext/viewcode.py
+++ b/sphinx/ext/viewcode.py
@@ -44,9 +44,21 @@ class ViewcodeAnchorTransform(SphinxPostTransform):
def get_module_filename(app: Sphinx, modname: str) ->(str | None):
"""Get module filename for *modname*."""
- pass
+ try:
+ module = import_module(modname)
+ return module.__file__
+ except ImportError:
+ logger.warning(__('Failed to import %s'), modname)
+ return None
def should_generate_module_page(app: Sphinx, modname: str) ->bool:
"""Check generation of module page is needed."""
- pass
+ try:
+ module = import_module(modname)
+ # Check if the module has any public attributes or functions
+ public_members = [name for name in dir(module) if not name.startswith('_')]
+ return len(public_members) > 0
+ except ImportError:
+ logger.warning(__('Failed to import %s'), modname)
+ return False
diff --git a/sphinx/extension.py b/sphinx/extension.py
index 816225dad..df757e581 100644
--- a/sphinx/extension.py
+++ b/sphinx/extension.py
@@ -32,4 +32,26 @@ def verify_needs_extensions(app: Sphinx, config: Config) ->None:
:raises VersionRequirementError: if the version of an extension in
:confval:`needs_extension` is unknown or older than the required version.
"""
- pass
+ if not hasattr(config, 'needs_extensions'):
+ return
+
+ for name, version in config.needs_extensions.items():
+ if name not in app.extensions:
+ logger.warning(__('The extension %r was not loaded yet, but is required for this project.'), name)
+ continue
+
+ ext = app.extensions[name]
+ if ext.version == 'unknown version':
+ logger.warning(__('The extension %r has an unknown version'), name)
+ continue
+
+ try:
+ required = Version(version)
+ actual = Version(ext.version)
+ if actual < required:
+ raise VersionRequirementError(
+ __('This project needs the extension %r at least in version %s, '
+ 'but you have version %s.') % (name, version, ext.version)
+ )
+ except InvalidVersion:
+ logger.warning(__('The version of %r is invalid.'), name)
diff --git a/sphinx/io.py b/sphinx/io.py
index 5f18053ac..da117b837 100644
--- a/sphinx/io.py
+++ b/sphinx/io.py
@@ -44,7 +44,10 @@ class SphinxBaseReader(standalone.Reader):
Creates a new document object which has a special reporter object good
for logging.
"""
- pass
+ document = super().new_document()
+ reporter = LoggingReporter.from_reporter(document.reporter)
+ document.reporter = reporter
+ return document
class SphinxStandaloneReader(SphinxBaseReader):
@@ -54,7 +57,10 @@ class SphinxStandaloneReader(SphinxBaseReader):
def read_source(self, env: BuildEnvironment) ->str:
"""Read content from source and do post-process."""
- pass
+ content = super().read()
+ if isinstance(self.source, str):
+ content = env.preprocess_source(self.source, content)
+ return content
class SphinxI18nReader(SphinxBaseReader):
@@ -74,7 +80,7 @@ class SphinxDummyWriter(UnfilteredWriter):
def SphinxDummySourceClass(source: Any, *args: Any, **kwargs: Any) ->Any:
"""Bypass source object as is to cheat Publisher."""
- pass
+ return source
class SphinxFileInput(FileInput):
diff --git a/sphinx/jinja2glue.py b/sphinx/jinja2glue.py
index 6b3cabe75..f6a146e29 100644
--- a/sphinx/jinja2glue.py
+++ b/sphinx/jinja2glue.py
@@ -28,12 +28,23 @@ def _todim(val: (int | str)) ->str:
Everything else is returned unchanged.
"""
- pass
+ if val is None:
+ return 'initial'
+ if val == 0 or val == '0':
+ return '0'
+ if isinstance(val, int) or (isinstance(val, str) and val.isdigit()):
+ return f'{val}px'
+ return str(val)
def accesskey(context: Any, key: str) ->str:
"""Helper to output each access key only once."""
- pass
+ if 'accesskeys' not in context:
+ context['accesskeys'] = set()
+ if key not in context['accesskeys']:
+ context['accesskeys'].add(key)
+ return ' accesskey="%s"' % key
+ return ''
class idgen:
@@ -46,6 +57,9 @@ class idgen:
return self.id
next = __next__
+ def __iter__(self):
+ return self
+
class SphinxFileSystemLoader(FileSystemLoader):
"""
@@ -53,8 +67,56 @@ class SphinxFileSystemLoader(FileSystemLoader):
template names.
"""
+ def get_source(self, environment: Environment, template: str) -> tuple[str, str | None, Callable[[], bool] | None]:
+ try:
+ return super().get_source(environment, template)
+ except TemplateNotFound:
+ # Try to load the template by joining it with the search path
+ for searchpath in self.searchpath:
+ filename = path.join(searchpath, template)
+ if path.exists(filename):
+ with open(filename, 'r', encoding=self.encoding) as f:
+ contents = f.read()
+ mtime = path.getmtime(filename)
+ def uptodate():
+ try:
+ return path.getmtime(filename) == mtime
+ except OSError:
+ return False
+ return contents, filename, uptodate
+ raise TemplateNotFound(template)
+
class BuiltinTemplateLoader(TemplateBridge, BaseLoader):
"""
Interfaces the rendering environment of jinja2 for use in Sphinx.
"""
+
+ def __init__(self) -> None:
+ self.loaders: dict[str, SphinxFileSystemLoader] = {}
+ self.templates: dict[str, str] = {}
+ self.environment = SandboxedEnvironment(loader=self)
+
+ def init(self, builder: Builder, theme: Theme | None = None, dirs: list[str] | None = None) -> None:
+ self.loaders = {}
+ self.templates = {}
+ if theme:
+ self.loaders['theme'] = SphinxFileSystemLoader(theme.get_theme_dirs())
+ if dirs:
+ self.loaders['searchpath'] = SphinxFileSystemLoader(dirs)
+ if builder.config.template_path:
+ self.loaders['template_path'] = SphinxFileSystemLoader(builder.config.template_path)
+
+ def get_source(self, environment: Environment, template: str) -> tuple[str, str | None, Callable[[], bool] | None]:
+ for loadername, loader in self.loaders.items():
+ try:
+ return loader.get_source(environment, template)
+ except TemplateNotFound:
+ pass
+ raise TemplateNotFound(template)
+
+ def render(self, template: str, context: dict) -> str:
+ return self.environment.get_template(template).render(context)
+
+ def render_string(self, source: str, context: dict) -> str:
+ return self.environment.from_string(source).render(context)
diff --git a/sphinx/parsers.py b/sphinx/parsers.py
index a9686d339..4d0f211fd 100644
--- a/sphinx/parsers.py
+++ b/sphinx/parsers.py
@@ -32,7 +32,9 @@ class Parser(docutils.parsers.Parser):
:param sphinx.application.Sphinx app: Sphinx application object
"""
- pass
+ self.app = app
+ self.config = app.config
+ self.env = app.env
class RSTParser(docutils.parsers.rst.Parser, Parser):
@@ -44,13 +46,19 @@ class RSTParser(docutils.parsers.rst.Parser, Parser):
refs: sphinx.io.SphinxStandaloneReader
"""
- pass
+ transforms = super().get_transforms()
+ transforms.remove(SmartQuotes)
+ return transforms
def parse(self, inputstring: (str | StringList), document: nodes.document
) ->None:
"""Parse text and generate a document tree."""
- pass
+ if isinstance(inputstring, str):
+ inputstring = StringList(inputstring.splitlines(), document.current_source)
+ self.decorate(inputstring)
+ super().parse(inputstring, document)
def decorate(self, content: StringList) ->None:
"""Preprocess reST content before parsing."""
- pass
+ prepend_prolog(content, self.config.rst_prolog)
+ append_epilog(content, self.config.rst_epilog)
diff --git a/sphinx/project.py b/sphinx/project.py
index 7075b88a1..0b6a51895 100644
--- a/sphinx/project.py
+++ b/sphinx/project.py
@@ -29,21 +29,53 @@ class Project:
def restore(self, other: Project) ->None:
"""Take over a result of last build."""
- pass
+ self.docnames = other.docnames
+ self._path_to_docname = other._path_to_docname
+ self._docname_to_path = other._docname_to_path
def discover(self, exclude_paths: Iterable[str]=(), include_paths:
Iterable[str]=('**',)) ->set[str]:
"""Find all document files in the source directory and put them in
:attr:`docnames`.
"""
- pass
+ self.docnames = set()
+ self._path_to_docname.clear()
+ self._docname_to_path.clear()
+
+ for filename in get_matching_files(self.srcdir, include_patterns=include_paths,
+ exclude_patterns=list(exclude_paths) + EXCLUDE_PATHS):
+ if os.path.splitext(filename)[1] in self.source_suffix:
+ docname = self.path2doc(filename)
+ if docname:
+ self.docnames.add(docname)
+
+ return self.docnames
def path2doc(self, filename: (str | os.PathLike[str])) ->(str | None):
"""Return the docname for the filename if the file is a document.
*filename* should be absolute or relative to the source directory.
"""
- pass
+ filename = Path(filename).resolve()
+ try:
+ rel_path = filename.relative_to(self.srcdir)
+ except ValueError:
+ return None
+
+ for suffix in self.source_suffix:
+ if rel_path.name.endswith(suffix):
+ rel_path = rel_path.with_suffix('')
+ break
+ else:
+ return None
+
+ docname = str(rel_path).replace(os.path.sep, '/')
+ if docname in self._path_to_docname:
+ return self._path_to_docname[filename]
+ else:
+ self._path_to_docname[filename] = docname
+ self._docname_to_path[docname] = filename
+ return docname
def doc2path(self, docname: str, absolute: bool) ->_StrPath:
"""Return the filename for the document name.
@@ -51,4 +83,17 @@ class Project:
If *absolute* is True, return as an absolute path.
Else, return as a relative path to the source directory.
"""
- pass
+ if docname in self._docname_to_path:
+ path = self._docname_to_path[docname]
+ else:
+ path = Path(docname.replace('/', os.path.sep))
+ if not path.suffix:
+ path = path.with_suffix(self._first_source_suffix)
+ path = Path(self.srcdir) / path
+ self._docname_to_path[docname] = path
+ self._path_to_docname[path] = docname
+
+ if absolute:
+ return path_stabilize(path)
+ else:
+ return _StrPath(path.relative_to(self.srcdir))
diff --git a/sphinx/pycode/ast.py b/sphinx/pycode/ast.py
index f7da51a16..e7d9e4009 100644
--- a/sphinx/pycode/ast.py
+++ b/sphinx/pycode/ast.py
@@ -11,7 +11,10 @@ OPERATORS: dict[type[ast.AST], str] = {ast.Add: '+', ast.And: 'and', ast.
def unparse(node: (ast.AST | None), code: str='') ->(str | None):
"""Unparse an AST to string."""
- pass
+ if node is None:
+ return None
+ visitor = _UnparseVisitor(code)
+ return visitor.visit(node)
class _UnparseVisitor(ast.NodeVisitor):
@@ -24,4 +27,9 @@ class _UnparseVisitor(ast.NodeVisitor):
def _visit_arg_with_default(self, arg: ast.arg, default: (ast.AST | None)
) ->str:
"""Unparse a single argument to a string."""
- pass
+ result = arg.arg
+ if arg.annotation:
+ result += ': ' + self.visit(arg.annotation)
+ if default:
+ result += ' = ' + self.visit(default)
+ return result
diff --git a/sphinx/pycode/parser.py b/sphinx/pycode/parser.py
index 54e6b77c5..0f263c6a3 100644
--- a/sphinx/pycode/parser.py
+++ b/sphinx/pycode/parser.py
@@ -20,7 +20,12 @@ emptyline_re = re.compile('^\\s*(#.*)?$')
def get_assign_targets(node: ast.AST) ->list[ast.expr]:
"""Get list of targets from Assign and AnnAssign node."""
- pass
+ if isinstance(node, ast.Assign):
+ return node.targets
+ elif isinstance(node, ast.AnnAssign):
+ return [node.target]
+ else:
+ return []
def get_lvar_names(node: ast.AST, self: (ast.arg | None)=None) ->list[str]:
@@ -32,12 +37,24 @@ def get_lvar_names(node: ast.AST, self: (ast.arg | None)=None) ->list[str]:
dic["bar"] = 'baz'
# => TypeError
"""
- pass
+ if isinstance(node, ast.Name):
+ return [node.id]
+ elif isinstance(node, ast.Tuple) or isinstance(node, ast.List):
+ return sum((get_lvar_names(elt, self) for elt in node.elts), [])
+ elif isinstance(node, ast.Attribute):
+ if isinstance(node.value, ast.Name) and node.value.id == 'self':
+ return [f'self.{node.attr}']
+ else:
+ raise TypeError('The assignment is not a variable')
+ else:
+ raise TypeError('The assignment is not a variable')
def dedent_docstring(s: str) ->str:
"""Remove common leading indentation from docstring."""
- pass
+ def dummy(): pass
+ dummy.__doc__ = s
+ return inspect.getdoc(dummy)
class Token:
@@ -56,7 +73,7 @@ class Token:
return self.kind == other
elif isinstance(other, str):
return self.value == other
- elif isinstance(other, list | tuple):
+ elif isinstance(other, (list, tuple)):
return [self.kind, self.value] == list(other)
elif other is None:
return False
@@ -80,21 +97,39 @@ class TokenProcessor:
def get_line(self, lineno: int) ->str:
"""Returns specified line."""
- pass
+ return self.buffers[lineno - 1]
def fetch_token(self) ->(Token | None):
"""Fetch the next token from source code.
Returns ``None`` if sequence finished.
"""
- pass
+ try:
+ token = next(self.tokens)
+ self.previous = self.current
+ self.current = Token(*token)
+ return self.current
+ except StopIteration:
+ return None
def fetch_until(self, condition: Any) ->list[Token]:
"""Fetch tokens until specified token appeared.
.. note:: This also handles parenthesis well.
"""
- pass
+ tokens = []
+ depth = 0
+ while True:
+ token = self.fetch_token()
+ if token is None:
+ return tokens
+ tokens.append(token)
+ if token == '(':
+ depth += 1
+ elif token == ')':
+ depth -= 1
+ elif depth == 0 and token == condition:
+ return tokens
class AfterCommentParser(TokenProcessor):
diff --git a/sphinx/registry.py b/sphinx/registry.py
index dac389ea8..85a88c86b 100644
--- a/sphinx/registry.py
+++ b/sphinx/registry.py
@@ -69,9 +69,26 @@ class SphinxComponentRegistry:
def load_extension(self, app: Sphinx, extname: str) ->None:
"""Load a Sphinx extension."""
- pass
+ try:
+ module = import_module(extname)
+ setup_func = getattr(module, 'setup', None)
+ if setup_func is None:
+ raise ExtensionError(f"Extension {extname} has no setup() function")
+
+ if callable(setup_func):
+ setup_func(app)
+ else:
+ raise ExtensionError(f"Extension {extname}'s setup() function is not callable")
+
+ app.extensions[extname] = Extension(extname, module.__file__)
+ except ImportError as exc:
+ raise ExtensionError(f"Could not import extension {extname}", exc) from exc
+ except Exception as exc:
+ raise ExtensionError(f"Error occurred while setting up extension {extname}", exc) from exc
def merge_source_suffix(app: Sphinx, config: Config) ->None:
"""Merge any user-specified source_suffix with any added by extensions."""
- pass
+ for suffix, filetype in app.registry.source_suffix.items():
+ if suffix not in config.source_suffix:
+ config.source_suffix[suffix] = filetype
diff --git a/sphinx/roles.py b/sphinx/roles.py
index 8173b137a..50ddde675 100644
--- a/sphinx/roles.py
+++ b/sphinx/roles.py
@@ -68,7 +68,16 @@ class XRefRole(ReferenceRole):
reference node and must return a new (or the same) ``(title, target)``
tuple.
"""
- pass
+ if self.fix_parens:
+ if not has_explicit_title:
+ title = utils.unescape(title)
+ if target.endswith('()'):
+ target = target[:-2]
+ if not has_explicit_title:
+ title += '()'
+ if self.lowercase:
+ target = target.lower()
+ return title, target
def result_nodes(self, document: nodes.document, env: BuildEnvironment,
node: Element, is_ref: bool) ->tuple[list[Node], list[system_message]]:
@@ -77,7 +86,7 @@ class XRefRole(ReferenceRole):
This method can add other nodes and must return a ``(nodes, messages)``
tuple (the usual return value of a role function).
"""
- pass
+ return [node], []
class AnyXRefRole(XRefRole):
diff --git a/sphinx/search/ja.py b/sphinx/search/ja.py
index 703d7260b..d9001fc49 100644
--- a/sphinx/search/ja.py
+++ b/sphinx/search/ja.py
@@ -49,6 +49,53 @@ class MecabSplitter(BaseSplitter):
if self.ctypes_libmecab:
self.ctypes_libmecab.mecab_destroy(self.ctypes_mecab)
+ def init_native(self, options):
+ self.mecab = MeCab.Tagger(options.get('mecab_args', ''))
+
+ def init_ctypes(self, options):
+ import ctypes
+ import ctypes.util
+
+ libmecab = ctypes.util.find_library('mecab')
+ if libmecab is None:
+ raise RuntimeError('libmecab not found')
+
+ self.ctypes_libmecab = ctypes.CDLL(libmecab)
+ self.ctypes_libmecab.mecab_new.restype = ctypes.c_void_p
+ self.ctypes_libmecab.mecab_new.argtypes = (ctypes.c_int, ctypes.POINTER(ctypes.c_char_p))
+ self.ctypes_libmecab.mecab_sparse_tostr.restype = ctypes.c_char_p
+ self.ctypes_libmecab.mecab_sparse_tostr.argtypes = (ctypes.c_void_p, ctypes.c_char_p)
+ self.ctypes_libmecab.mecab_destroy.argtypes = (ctypes.c_void_p,)
+
+ mecab_args = options.get('mecab_args', '').encode('utf-8')
+ argc = 1
+ argv = (ctypes.c_char_p * 2)(b'mecab', mecab_args)
+ self.ctypes_mecab = self.ctypes_libmecab.mecab_new(argc, argv)
+ if self.ctypes_mecab is None:
+ raise RuntimeError('mecab_new failed')
+
+ def split(self, input: str) ->list[str]:
+ if native_module:
+ return self.split_native(input)
+ else:
+ return self.split_ctypes(input)
+
+ def split_native(self, input: str) ->list[str]:
+ result = []
+ for node in self.mecab.parse(input).split('\n'):
+ if node == 'EOS':
+ break
+ surface = node.split('\t')[0]
+ if surface:
+ result.append(surface)
+ return result
+
+ def split_ctypes(self, input: str) ->list[str]:
+ input_bytes = input.encode(self.dict_encode)
+ result_bytes = self.ctypes_libmecab.mecab_sparse_tostr(self.ctypes_mecab, input_bytes)
+ result_string = result_bytes.decode(self.dict_encode)
+ return [line.split('\t')[0] for line in result_string.splitlines() if line != 'EOS']
+
class JanomeSplitter(BaseSplitter):
@@ -58,6 +105,14 @@ class JanomeSplitter(BaseSplitter):
self.user_dict_enc = options.get('user_dic_enc', 'utf8')
self.init_tokenizer()
+ def init_tokenizer(self):
+ if not janome_module:
+ raise ImportError('Janome is not installed')
+ self.tokenizer = janome.tokenizer.Tokenizer(udic=self.user_dict, udic_enc=self.user_dict_enc)
+
+ def split(self, input: str) ->list[str]:
+ return [token.surface for token in self.tokenizer.tokenize(input)]
+
class DefaultSplitter(BaseSplitter):
patterns_ = {re.compile(pattern): value for pattern, value in {
@@ -330,3 +385,16 @@ class SearchJapanese(SearchLanguage):
"""
lang = 'ja'
language_name = 'Japanese'
+
+ def __init__(self, options):
+ super().__init__(options)
+ self.splitter = DefaultSplitter(options)
+
+ def split(self, input):
+ return self.splitter.split(input)
+
+ def word_filter(self, stemmed_word):
+ return len(stemmed_word) > 1
+
+ def stem(self, word):
+ return word
diff --git a/sphinx/testing/fixtures.py b/sphinx/testing/fixtures.py
index 354a465af..7ab094149 100644
--- a/sphinx/testing/fixtures.py
+++ b/sphinx/testing/fixtures.py
@@ -19,7 +19,8 @@ DEFAULT_ENABLED_MARKERS = [
def pytest_configure(config: pytest.Config) ->None:
"""Register custom markers"""
- pass
+ for marker in DEFAULT_ENABLED_MARKERS:
+ config.addinivalue_line('markers', marker)
class SharedResult:
@@ -33,7 +34,18 @@ def app_params(request: Any, test_params: dict[str, Any], shared_result:
Parameters that are specified by 'pytest.mark.sphinx' for
sphinx.application.Sphinx initialization
"""
- pass
+ markers = request.node.iter_markers('sphinx')
+ pargs = {}
+ kwargs = {}
+
+ if markers:
+ # use last args if multiple 'sphinx' markers are specified
+ args, kwargs = list(markers)[-1].args, list(markers)[-1].kwargs
+
+ kwargs.setdefault('srcdir', sphinx_test_tempdir)
+ kwargs.setdefault('builddir', os.path.join(sphinx_test_tempdir, '_build'))
+
+ return _app_params(args, kwargs)
_app_params = namedtuple('_app_params', 'args,kwargs')
@@ -50,7 +62,10 @@ def test_params(request: Any) ->dict[str, Any]:
have same 'shared_result' value.
**NOTE**: You can not specify both shared_result and srcdir.
"""
- pass
+ markers = request.node.iter_markers('test_params')
+ if markers:
+ return list(markers)[-1].kwargs
+ return {}
@pytest.fixture
@@ -60,7 +75,9 @@ def app(test_params: dict[str, Any], app_params: _app_params, make_app:
"""
Provides the 'sphinx.application.Sphinx' object
"""
- pass
+ app = make_app()
+ yield app
+ app.cleanup()
@pytest.fixture
@@ -68,7 +85,7 @@ def status(app: SphinxTestApp) ->StringIO:
"""
Back-compatibility for testing with previous @with_app decorator
"""
- pass
+ return app._status
@pytest.fixture
@@ -76,7 +93,7 @@ def warning(app: SphinxTestApp) ->StringIO:
"""
Back-compatibility for testing with previous @with_app decorator
"""
- pass
+ return app._warning
@pytest.fixture
@@ -87,7 +104,10 @@ def make_app(test_params: dict[str, Any]) ->Iterator[Callable[[],
if you want to initialize 'app' in your test function. please use this
instead of using SphinxTestApp class directory.
"""
- pass
+ def _make_app():
+ app = SphinxTestApp(**test_params)
+ return app
+ yield _make_app
@pytest.fixture
@@ -96,13 +116,14 @@ def if_graphviz_found(app: SphinxTestApp) ->None:
The test will be skipped when using 'if_graphviz_found' fixture and graphviz
dot command is not found.
"""
- pass
+ if shutil.which('dot') is None:
+ pytest.skip('graphviz "dot" command is not available')
@pytest.fixture(scope='session')
def sphinx_test_tempdir(tmp_path_factory: pytest.TempPathFactory) ->Path:
"""Temporary directory."""
- pass
+ return tmp_path_factory.getbasetemp() / "sphinx-test"
@pytest.fixture
@@ -114,4 +135,7 @@ def rollback_sysmodules() ->Iterator[None]:
For example, used in test_ext_autosummary.py to permit unloading the
target module to clear its cache.
"""
- pass
+ modules = sys.modules.copy()
+ yield
+ sys.modules.clear()
+ sys.modules.update(modules)
diff --git a/sphinx/testing/path.py b/sphinx/testing/path.py
index 6aa6b7bf7..f26454965 100644
--- a/sphinx/testing/path.py
+++ b/sphinx/testing/path.py
@@ -16,7 +16,7 @@ FILESYSTEMENCODING = sys.getfilesystemencoding() or sys.getdefaultencoding()
def getumask() ->int:
"""Get current umask value"""
- pass
+ return os.umask(0)
UMASK = getumask()
@@ -33,43 +33,43 @@ class path(str):
"""
The name of the directory the file or directory is in.
"""
- pass
+ return path(os.path.dirname(self))
def abspath(self) ->path:
"""
Returns the absolute path.
"""
- pass
+ return path(os.path.abspath(self))
def isabs(self) ->bool:
"""
Returns ``True`` if the path is absolute.
"""
- pass
+ return os.path.isabs(self)
def isdir(self) ->bool:
"""
Returns ``True`` if the path is a directory.
"""
- pass
+ return os.path.isdir(self)
def isfile(self) ->bool:
"""
Returns ``True`` if the path is a file.
"""
- pass
+ return os.path.isfile(self)
def islink(self) ->bool:
"""
Returns ``True`` if the path is a symbolic link.
"""
- pass
+ return os.path.islink(self)
def ismount(self) ->bool:
"""
Returns ``True`` if the path is a mount point.
"""
- pass
+ return os.path.ismount(self)
def rmtree(self, ignore_errors: bool=False, onerror: (Callable[[
Callable[..., Any], str, Any], object] | None)=None) ->None:
@@ -88,7 +88,7 @@ class path(str):
caused it to fail and `exc_info` is a tuple as returned by
:func:`sys.exc_info`.
"""
- pass
+ shutil.rmtree(self, ignore_errors=ignore_errors, onerror=onerror)
def copytree(self, destination: str, symlinks: bool=False) ->None:
"""
@@ -100,7 +100,7 @@ class path(str):
links in the destination tree otherwise the contents of the files
pointed to by the symbolic links are copied.
"""
- pass
+ shutil.copytree(self, destination, symlinks=symlinks)
def movetree(self, destination: str) ->None:
"""
@@ -110,39 +110,42 @@ class path(str):
If the `destination` is a file it may be overwritten depending on the
:func:`os.rename` semantics.
"""
- pass
+ shutil.move(self, destination)
move = movetree
def unlink(self) ->None:
"""
Removes a file.
"""
- pass
+ os.unlink(self)
def stat(self) ->Any:
"""
Returns a stat of the file.
"""
- pass
+ return os.stat(self)
def write_text(self, text: str, encoding: str='utf-8', **kwargs: Any
) ->None:
"""
Writes the given `text` to the file.
"""
- pass
+ with open(self, 'w', encoding=encoding, **kwargs) as f:
+ f.write(text)
def read_text(self, encoding: str='utf-8', **kwargs: Any) ->str:
"""
Returns the text in the file.
"""
- pass
+ with open(self, 'r', encoding=encoding, **kwargs) as f:
+ return f.read()
def read_bytes(self) ->builtins.bytes:
"""
Returns the bytes in the file.
"""
- pass
+ with open(self, 'rb') as f:
+ return f.read()
def write_bytes(self, bytes: bytes, append: bool=False) ->None:
"""
@@ -151,32 +154,34 @@ class path(str):
:param append:
If ``True`` given `bytes` are added at the end of the file.
"""
- pass
+ mode = 'ab' if append else 'wb'
+ with open(self, mode) as f:
+ f.write(bytes)
def exists(self) ->bool:
"""
Returns ``True`` if the path exist.
"""
- pass
+ return os.path.exists(self)
def lexists(self) ->bool:
"""
Returns ``True`` if the path exists unless it is a broken symbolic
link.
"""
- pass
+ return os.path.lexists(self)
def makedirs(self, mode: int=511, exist_ok: bool=False) ->None:
"""
Recursively create directories.
"""
- pass
+ os.makedirs(self, mode=mode, exist_ok=exist_ok)
def joinpath(self, *args: Any) ->path:
"""
Joins the path with the argument given and returns the result.
"""
- pass
+ return path(os.path.join(self, *args))
__div__ = __truediv__ = joinpath
def __repr__(self) ->str:
diff --git a/sphinx/testing/restructuredtext.py b/sphinx/testing/restructuredtext.py
index f8e53bc18..19f7ecbdb 100644
--- a/sphinx/testing/restructuredtext.py
+++ b/sphinx/testing/restructuredtext.py
@@ -9,4 +9,21 @@ from sphinx.util.docutils import sphinx_domains
def parse(app: Sphinx, text: str, docname: str='index') ->nodes.document:
"""Parse a string as reStructuredText with Sphinx application."""
- pass
+ reader = SphinxStandaloneReader()
+ parser = RSTParser()
+ parser.set_application(app)
+
+ settings = app.env.settings.copy()
+ settings['initial_header_level'] = 1
+ settings['docname'] = docname
+
+ with sphinx_domains(app.env):
+ document = publish_doctree(
+ source=text,
+ source_path=path.join(app.srcdir, docname + '.rst'),
+ reader=reader,
+ parser=parser,
+ settings_overrides=settings,
+ )
+
+ return document
diff --git a/sphinx/testing/util.py b/sphinx/testing/util.py
index 6b9e7daa2..0f8542855 100644
--- a/sphinx/testing/util.py
+++ b/sphinx/testing/util.py
@@ -24,7 +24,9 @@ if TYPE_CHECKING:
def etree_parse(path: (str | os.PathLike[str])) ->ElementTree:
"""Parse a file into a (safe) XML element tree."""
- pass
+ from xml.etree import ElementTree as ET
+ parser = ET.XMLParser(resolve_entities=False)
+ return ET.parse(path, parser=parser)
class SphinxTestApp(sphinx.application.Sphinx):
@@ -102,12 +104,12 @@ class SphinxTestApp(sphinx.application.Sphinx):
@property
def status(self) ->StringIO:
"""The in-memory text I/O for the application status messages."""
- pass
+ return self._status
@property
def warning(self) ->StringIO:
"""The in-memory text I/O for the application warning messages."""
- pass
+ return self._warning
def __repr__(self) ->str:
return f'<{self.__class__.__name__} buildername={self.builder.name!r}>'
diff --git a/sphinx/texinputs/sphinxlatextables.sty b/sphinx/texinputs/sphinxlatextables.sty
index 4114955e0..f52692fc7 100644
--- a/sphinx/texinputs/sphinxlatextables.sty
+++ b/sphinx/texinputs/sphinxlatextables.sty
@@ -412,12 +412,50 @@
\fi
\fi
}%
+
+% Implementation of is_longtable()
+\def\is@longtable{%
+ \ifx\longtable\undefined
+ \@false
+ \else
+ \ifx\sphinxtablecolumns\undefined
+ \@false
+ \else
+ \ifnum\sphinxtablecolumns>\z@
+ \@true
+ \else
+ \@false
+ \fi
+ \fi
+ \fi
+}
% fallback default in case user has set latex_use_latex_multicolumn to True:
% \sphinxcolwidth will use this only inside LaTeX's standard \multicolumn
\def\sphinx@multiwidth #1#2{\dimexpr % #1 to gobble the \@gobble (!)
(\ifx\TY@final\@undefined\linewidth\else\sphinx@TY@tablewidth\fi
-\spx@arrayrulewidth)*#2-\tw@\tabcolsep-\spx@arrayrulewidth\relax}%
+% Implementation of get_table_type()
+\def\get@table@type{%
+ \ifx\longtable\undefined
+ \ifx\tabulary\undefined
+ tabular%
+ \else
+ tabulary%
+ \fi
+ \else
+ \ifnum\sphinxtablecolumns>\z@
+ longtable%
+ \else
+ \ifx\tabulary\undefined
+ tabular%
+ \else
+ tabulary%
+ \fi
+ \fi
+ \fi
+}
+
% \spx@table@hackCT@inhibitvline
% packages like colortbl add group levels, we need to "climb back up" to be
% able to hack the \vline and also the colortbl inserted tokens. The hack
@@ -455,6 +493,19 @@
% hence also \spx@arrayrulewidth...
{\sphinxcolorpanelextraoverhang+\the\spx@arrayrulewidth}%
\else\aftergroup\spx@table@hackCT@fixcolorpanel\fi}%
+
+% Implementation of get_colspec()
+\def\get@colspec{%
+ \ifx\sphinxtablecolspec\undefined
+ \ifx\sphinxtablecolwidths\undefined
+ l% Default to left-aligned if no column spec is provided
+ \else
+ \sphinxtablecolwidths
+ \fi
+ \else
+ \sphinxtablecolspec
+ \fi
+}
%
% inmergedcell
% \spx@table@hackCT@inmergedcell will be locally set to either this
@@ -1200,6 +1251,25 @@ local use of booktabs table style}%
% header row colours are fixed, not alternating, so there is at least no
% coherence issue there.
+% Implementation of add_cell()
+\def\add@cell#1#2{%
+ \global\advance\sphinxtablecellid by 1\relax
+ \count@=#1\relax
+ \@whilenum\count@>\z@\do{%
+ \advance\count@\m@ne
+ \count@@=#2\relax
+ \@whilenum\count@@>\z@\do{%
+ \advance\count@@\m@ne
+ \expandafter\gdef\csname sphinxtablecell@\the\sphinxtablerow @\the\sphinxtablecol\endcsname{\the\sphinxtablecellid}%
+ \global\advance\sphinxtablecol by 1\relax
+ }%
+ \global\sphinxtablecol=\sphinxtablestartcol\relax
+ \global\advance\sphinxtablerow by 1\relax
+ }%
+ \global\sphinxtablerow=\sphinxtablestartrow\relax
+ \global\advance\sphinxtablecol by #2\relax
+}
+
% The \spx@arrayrulewidth is used for some complex matters of merged
% cells size computations.
% tabularcolumns argument will override any global or local style and
@@ -1237,5 +1307,23 @@ local use of booktabs table style}%
}
\fi
+% Implementation of cell()
+\def\get@cell#1#2{%
+ \ifx#1\relax
+ \def\sphinxtablecurrentrow{\the\sphinxtablerow}%
+ \else
+ \def\sphinxtablecurrentrow{#1}%
+ \fi
+ \ifx#2\relax
+ \def\sphinxtablecurrentcol{\the\sphinxtablecol}%
+ \else
+ \def\sphinxtablecurrentcol{#2}%
+ \fi
+ \expandafter\ifx\csname sphinxtablecell@\sphinxtablecurrentrow @\sphinxtablecurrentcol\endcsname\relax
+ \PackageError{sphinxlatextables}{Cell (\sphinxtablecurrentrow,\sphinxtablecurrentcol) does not exist}{}%
+ \else
+ \csname sphinxtablecell@\sphinxtablecurrentrow @\sphinxtablecurrentcol\endcsname
+ \fi
+}
\endinput
diff --git a/sphinx/theming.py b/sphinx/theming.py
index 43bb6bd9b..969ccd265 100644
--- a/sphinx/theming.py
+++ b/sphinx/theming.py
@@ -81,23 +81,32 @@ class Theme:
"""Return a list of theme directories, beginning with this theme's,
then the base theme's, then that one's base theme's, etc.
"""
- pass
+ return list(self._dirs)
def get_config(self, section: str, name: str, default: Any=_NO_DEFAULT
) ->Any:
"""Return the value for a theme configuration setting, searching the
base theme chain.
"""
- pass
+ if section == 'theme' and name in self._options:
+ return self._options[name]
+ if default is _NO_DEFAULT:
+ raise ThemeError(f'setting not found: {section}.{name}')
+ return default
def get_options(self, overrides: (dict[str, Any] | None)=None) ->dict[
str, Any]:
"""Return a dictionary of theme options and their values."""
- pass
+ options = self._options.copy()
+ if overrides:
+ options.update(overrides)
+ return options
def _cleanup(self) ->None:
"""Remove temporary directories."""
- pass
+ for tmp_dir in self._tmp_dirs:
+ shutil.rmtree(tmp_dir, ignore_errors=True)
+ self._tmp_dirs.clear()
class HTMLThemeFactory:
@@ -114,37 +123,103 @@ class HTMLThemeFactory:
def _load_builtin_themes(self) ->None:
"""Load built-in themes."""
- pass
+ themes = self._find_themes(path.join(package_dir, 'themes'))
+ self._themes.update(themes)
def _load_additional_themes(self, theme_paths: list[str]) ->None:
"""Load additional themes placed at specified directories."""
- pass
+ for theme_path in theme_paths:
+ themes = self._find_themes(theme_path)
+ self._themes.update(themes)
def _load_entry_point_themes(self) ->None:
"""Try to load a theme with the specified name.
This uses the ``sphinx.html_themes`` entry point from package metadata.
"""
- pass
+ for entry_point in entry_points(group='sphinx.html_themes'):
+ self._entry_point_themes[entry_point.name] = entry_point.load
@staticmethod
def _find_themes(theme_path: str) ->dict[str, str]:
"""Search themes from specified directory."""
- pass
+ themes = {}
+ if not path.isdir(theme_path):
+ return themes
+
+ for entry in os.listdir(theme_path):
+ full_path = path.join(theme_path, entry)
+ if path.isdir(full_path):
+ if path.isfile(path.join(full_path, _THEME_CONF)) or path.isfile(path.join(full_path, _THEME_TOML)):
+ themes[entry] = full_path
+ elif _is_archived_theme(full_path):
+ themes[entry[:-4]] = full_path # remove '.zip' from name
+
+ return themes
def create(self, name: str) ->Theme:
"""Create an instance of theme."""
- pass
+ if name not in self._themes and name in self._entry_point_themes:
+ self._entry_point_themes[name]()
+
+ if name not in self._themes:
+ raise ThemeError(__('no theme named %r found') % name)
+
+ theme_path = self._themes[name]
+ configs = {}
+ tmp_dirs = []
+
+ if _is_archived_theme(theme_path):
+ tmp_dir = tempfile.mkdtemp()
+ _extract_zip(theme_path, tmp_dir)
+ theme_path = tmp_dir
+ tmp_dirs.append(tmp_dir)
+
+ while True:
+ config_file = path.join(theme_path, _THEME_TOML)
+ if path.exists(config_file):
+ config = self._load_theme_toml(config_file)
+ else:
+ config_file = path.join(theme_path, _THEME_CONF)
+ if not path.exists(config_file):
+ break
+ config = self._load_theme_conf(config_file)
+
+ configs[theme_path] = config
+
+ parent = config.options.get('inherit')
+ if parent is None:
+ break
+
+ theme_path = self._themes.get(parent)
+ if not theme_path:
+ raise ThemeError(__('no theme named %r found, inherited by %r') % (parent, name))
+
+ if _is_archived_theme(theme_path):
+ tmp_dir = tempfile.mkdtemp()
+ _extract_zip(theme_path, tmp_dir)
+ theme_path = tmp_dir
+ tmp_dirs.append(tmp_dir)
+
+ return Theme(name, configs=configs, paths=list(configs.keys()), tmp_dirs=tmp_dirs)
def _is_archived_theme(filename: str, /) ->bool:
"""Check whether the specified file is an archived theme file or not."""
- pass
+ return filename.endswith('.zip') and path.isfile(filename)
def _extract_zip(filename: str, target_dir: str, /) ->None:
"""Extract zip file to target directory."""
- pass
+ with ZipFile(filename) as archive:
+ for name in archive.namelist():
+ if name.endswith('/'):
+ continue
+ # convert filename to system dependent path
+ target = path.join(target_dir, *name.split('/'))
+ ensuredir(path.dirname(target))
+ with open(target, 'wb') as fp:
+ fp.write(archive.read(name))
class _ConfigFile:
diff --git a/sphinx/transforms/compact_bullet_list.py b/sphinx/transforms/compact_bullet_list.py
index 54ebc5311..744ffb98b 100644
--- a/sphinx/transforms/compact_bullet_list.py
+++ b/sphinx/transforms/compact_bullet_list.py
@@ -21,6 +21,20 @@ class RefOnlyListChecker(nodes.GenericNodeVisitor):
"""Invisible nodes should be ignored."""
pass
+ def visit_paragraph(self, node: nodes.paragraph) -> None:
+ if len(node.children) != 1 or not isinstance(node.children[0], nodes.reference):
+ raise nodes.NodeFound
+
+ def visit_bullet_list(self, node: nodes.bullet_list) -> None:
+ pass
+
+ def visit_list_item(self, node: nodes.list_item) -> None:
+ if len(node.children) != 1 or not isinstance(node.children[0], nodes.paragraph):
+ raise nodes.NodeFound
+
+ def default_visit(self, node: Node) -> None:
+ raise nodes.NodeFound
+
class RefOnlyBulletListTransform(SphinxTransform):
"""Change refonly bullet lists to use compact_paragraphs.
@@ -29,3 +43,24 @@ class RefOnlyBulletListTransform(SphinxTransform):
odd when html_compact_lists is false.
"""
default_priority = 100
+
+ def apply(self, **kwargs: Any) -> None:
+ checker = RefOnlyListChecker(self.document)
+ for node in self.document.traverse(nodes.bullet_list):
+ if not node.get('compact_bullet_list'):
+ try:
+ node.walk(checker)
+ except nodes.NodeFound:
+ continue
+ for child in node.children:
+ child[0].__class__ = addnodes.compact_paragraph
+ node['compact_bullet_list'] = True
+
+
+def setup(app: Sphinx) -> ExtensionMetadata:
+ app.add_transform(RefOnlyBulletListTransform)
+ return {
+ 'version': '1.0',
+ 'parallel_read_safe': True,
+ 'parallel_write_safe': True,
+ }
diff --git a/sphinx/transforms/i18n.py b/sphinx/transforms/i18n.py
index 7d4cc25e7..d90d2357e 100644
--- a/sphinx/transforms/i18n.py
+++ b/sphinx/transforms/i18n.py
@@ -40,7 +40,22 @@ def publish_msgstr(app: Sphinx, source: str, source_path: str, source_line:
:return: document
:rtype: docutils.nodes.document
"""
- pass
+ from docutils.core import publish_doctree
+
+ # Create a new document with the given source
+ document = publish_doctree(source=source,
+ source_path=source_path,
+ reader=None,
+ parser=app.registry.create_source_parser(app, 'restructuredtext'),
+ settings_overrides=settings,
+ config=config)
+
+ # Set the source information
+ for node in document.traverse():
+ node.source = source_path
+ node.line = source_line
+
+ return document
class PreserveTranslatableMessages(SphinxTransform):
@@ -63,7 +78,14 @@ class _NodeUpdater:
def compare_references(self, old_refs: Sequence[nodes.Element],
new_refs: Sequence[nodes.Element], warning_msg: str) ->None:
"""Warn about mismatches between references in original and translated content."""
- pass
+ if len(old_refs) != len(new_refs):
+ logger.warning(warning_msg, location=self.node)
+ return
+
+ for old, new in zip(old_refs, new_refs):
+ if old['reftype'] != new['reftype'] or old['reftarget'] != new['reftarget']:
+ logger.warning(warning_msg, location=self.node)
+ return
class Locale(SphinxTransform):
diff --git a/sphinx/transforms/post_transforms/images.py b/sphinx/transforms/post_transforms/images.py
index 76d5727d2..480991b38 100644
--- a/sphinx/transforms/post_transforms/images.py
+++ b/sphinx/transforms/post_transforms/images.py
@@ -61,7 +61,14 @@ class ImageConverter(BaseImageConverter):
def is_available(self) ->bool:
"""Return the image converter is available or not."""
- pass
+ if self.available is None:
+ self.available = self._check_availability()
+ return self.available
+
+ def _check_availability(self) ->bool:
+ """Check if the converter is available."""
+ # This method should be implemented by subclasses
+ return False
def convert(self, _from: str, _to: str) ->bool:
"""Convert an image file to the expected format.
@@ -69,4 +76,16 @@ class ImageConverter(BaseImageConverter):
*_from* is a path of the source image file, and *_to* is a path
of the destination file.
"""
- pass
+ if not self.is_available():
+ return False
+
+ for source_ext, dest_ext in self.conversion_rules:
+ if _from.lower().endswith(source_ext) and _to.lower().endswith(dest_ext):
+ return self._do_convert(_from, _to)
+
+ return False
+
+ def _do_convert(self, source: str, destination: str) ->bool:
+ """Perform the actual conversion."""
+ # This method should be implemented by subclasses
+ return False
diff --git a/sphinx/util/_importer.py b/sphinx/util/_importer.py
index 6f88b541b..37b5c234e 100644
--- a/sphinx/util/_importer.py
+++ b/sphinx/util/_importer.py
@@ -6,4 +6,15 @@ from sphinx.errors import ExtensionError
def import_object(object_name: str, /, source: str='') ->Any:
"""Import python object by qualname."""
- pass
+ try:
+ module_name, object_name = object_name.rsplit('.', 1)
+ except ValueError:
+ raise ExtensionError(f"Invalid object name: {object_name}")
+
+ try:
+ module = import_module(module_name)
+ return getattr(module, object_name)
+ except ImportError:
+ raise ExtensionError(f"Could not import {module_name}")
+ except AttributeError:
+ raise ExtensionError(f"Could not find {object_name} in {module_name}")
diff --git a/sphinx/util/_io.py b/sphinx/util/_io.py
index 47a4e10e9..920f70f91 100644
--- a/sphinx/util/_io.py
+++ b/sphinx/util/_io.py
@@ -16,3 +16,13 @@ class TeeStripANSI:
) ->None:
self.stream_term = stream_term
self.stream_file = stream_file
+
+ def write(self, text: str) ->None:
+ self.stream_term.write(text)
+ self.stream_file.write(strip_escape_sequences(text))
+
+ def flush(self) ->None:
+ if hasattr(self.stream_term, 'flush'):
+ self.stream_term.flush()
+ if hasattr(self.stream_file, 'flush'):
+ self.stream_file.flush()
diff --git a/sphinx/util/_timestamps.py b/sphinx/util/_timestamps.py
index 9804211d8..720ec32f1 100644
--- a/sphinx/util/_timestamps.py
+++ b/sphinx/util/_timestamps.py
@@ -7,4 +7,7 @@ def _format_rfc3339_microseconds(timestamp: int, /) ->str:
:param timestamp: The timestamp to format, in microseconds.
"""
- pass
+ seconds, microseconds = divmod(timestamp, 1_000_000)
+ dt = time.gmtime(seconds)
+ return (f"{dt.tm_year:04d}-{dt.tm_mon:02d}-{dt.tm_mday:02d}T"
+ f"{dt.tm_hour:02d}:{dt.tm_min:02d}:{dt.tm_sec:02d}.{microseconds:06d}Z")
diff --git a/sphinx/util/console.py b/sphinx/util/console.py
index 1da057880..7510138ae 100644
--- a/sphinx/util/console.py
+++ b/sphinx/util/console.py
@@ -32,12 +32,15 @@ codes: dict[str, str] = {}
def terminal_safe(s: str) ->str:
"""Safely encode a string for printing to the terminal."""
- pass
+ return s.encode('ascii', 'replace').decode('ascii')
def get_terminal_width() ->int:
"""Return the width of the terminal in columns."""
- pass
+ try:
+ return shutil.get_terminal_size().columns
+ except AttributeError:
+ return 80 # fallback value
_tw: int = get_terminal_width()
@@ -53,7 +56,7 @@ def strip_colors(s: str) ->str:
.. seealso:: :func:`strip_escape_sequences`
"""
- pass
+ return _ansi_color_re.sub('', s)
def strip_escape_sequences(text: str, /) ->str:
@@ -76,7 +79,7 @@ def strip_escape_sequences(text: str, /) ->str:
__ https://en.wikipedia.org/wiki/ANSI_escape_code
"""
- pass
+ return _ansi_re.sub('', text)
_attrs = {'reset': '39;49;00m', 'bold': '01m', 'faint': '02m', 'standout':
diff --git a/sphinx/util/docfields.py b/sphinx/util/docfields.py
index 71ce35c34..92ba41fae 100644
--- a/sphinx/util/docfields.py
+++ b/sphinx/util/docfields.py
@@ -22,7 +22,12 @@ logger = logging.getLogger(__name__)
def _is_single_paragraph(node: nodes.field_body) ->bool:
"""True if the node only contains one paragraph (and system messages)."""
- pass
+ return (
+ len(node.children) == 1 and
+ isinstance(node.children[0], nodes.paragraph) or
+ (len(node.children) == 0 and all(isinstance(child, nodes.system_message)
+ for child in node.children))
+ )
class Field:
@@ -115,8 +120,54 @@ class DocFieldTransformer:
def transform_all(self, node: addnodes.desc_content) ->None:
"""Transform all field list children of a node."""
- pass
+ for field_list in node.traverse(nodes.field_list):
+ self.transform(field_list)
def transform(self, node: nodes.field_list) ->None:
"""Transform a single field list *node*."""
- pass
+ typemap = self.typemap
+
+ entries: list[nodes.field] = []
+ groupindices: dict[str, int] = {}
+
+ def handle_field(field: nodes.field, name: str, sig: str, signode: TextlikeNode | None) -> None:
+ fieldtype, is_grouped = typemap.get(name, (None, False))
+ if fieldtype is None:
+ # Unknown field type, keep it as-is
+ entries.append(field)
+ return
+
+ typename = fieldtype.name
+
+ if is_grouped:
+ if typename in groupindices:
+ group = entries[groupindices[typename]]
+ else:
+ group = self.create_grouped_field(fieldtype, sig)
+ entries.append(group)
+ groupindices[typename] = len(entries) - 1
+
+ if fieldtype.is_typed:
+ arg, type_ = self.split_type_and_arg(sig)
+ self.add_field_arg_to_group(group, fieldtype, arg, type_)
+ else:
+ self.add_field_to_group(group, fieldtype, sig)
+ else:
+ field = self.create_field(fieldtype, sig)
+ entries.append(field)
+
+ for field in node:
+ assert isinstance(field, nodes.field)
+ field_name = field[0].astext()
+ field_body = field[1]
+
+ if field_name in typemap:
+ handle_field(field, field_name, field_body.astext(), field_body)
+ elif ':' in field_name:
+ name, _, sig = field_name.partition(':')
+ handle_field(field, name.strip(), sig.strip(), field_body)
+ else:
+ entries.append(field)
+
+ node.clear()
+ node.extend(entries)
diff --git a/sphinx/util/docstrings.py b/sphinx/util/docstrings.py
index 8e3c68d14..dbefab875 100644
--- a/sphinx/util/docstrings.py
+++ b/sphinx/util/docstrings.py
@@ -8,7 +8,23 @@ field_list_item_re = re.compile(Body.patterns['field_marker'])
def separate_metadata(s: (str | None)) ->tuple[str | None, dict[str, str]]:
"""Separate docstring into metadata and others."""
- pass
+ if s is None:
+ return None, {}
+
+ metadata = {}
+ lines = s.split('\n')
+ content_lines = []
+
+ for line in lines:
+ match = field_list_item_re.match(line)
+ if match:
+ field, _, value = line.partition(':')
+ metadata[field.strip()] = value.strip()
+ else:
+ content_lines.append(line)
+
+ content = '\n'.join(content_lines).strip()
+ return content if content else None, metadata
def prepare_docstring(s: str, tabsize: int=8) ->list[str]:
@@ -19,11 +35,43 @@ def prepare_docstring(s: str, tabsize: int=8) ->list[str]:
ViewList (used as argument of nested_parse().) An empty line is added to
act as a separator between this docstring and following content.
"""
- pass
+ if not s:
+ return []
+
+ lines = s.expandtabs(tabsize).split('\n')
+
+ # Find minimum indentation (first line doesn't count)
+ indent = sys.maxsize
+ for line in lines[1:]:
+ stripped = line.lstrip()
+ if stripped:
+ indent = min(indent, len(line) - len(stripped))
+
+ # Remove indentation (first line is special)
+ trimmed = [lines[0].strip()]
+ if indent < sys.maxsize:
+ for line in lines[1:]:
+ trimmed.append(line[indent:].rstrip())
+
+ # Strip off trailing and leading blank lines:
+ while trimmed and not trimmed[-1]:
+ trimmed.pop()
+ while trimmed and not trimmed[0]:
+ trimmed.pop(0)
+
+ # Add an empty line to act as a separator
+ trimmed.append('')
+
+ return trimmed
def prepare_commentdoc(s: str) ->list[str]:
"""Extract documentation comment lines (starting with #:) and return them
as a list of lines. Returns an empty list if there is no documentation.
"""
- pass
+ result = []
+ for line in s.split('\n'):
+ line = line.strip()
+ if line.startswith('#:'):
+ result.append(line[2:].strip())
+ return result
diff --git a/sphinx/util/docutils.py b/sphinx/util/docutils.py
index d4889e254..c6fb8cc38 100644
--- a/sphinx/util/docutils.py
+++ b/sphinx/util/docutils.py
@@ -36,12 +36,15 @@ additional_nodes: set[type[Element]] = set()
@contextmanager
def docutils_namespace() ->Iterator[None]:
"""Create namespace for reST parsers."""
- pass
+ try:
+ yield
+ finally:
+ pass
def is_directive_registered(name: str) ->bool:
"""Check the *name* directive is already registered."""
- pass
+ return name in directives._directives
def register_directive(name: str, directive: type[Directive]) ->None:
@@ -50,12 +53,12 @@ def register_directive(name: str, directive: type[Directive]) ->None:
This modifies global state of docutils. So it is better to use this
inside ``docutils_namespace()`` to prevent side-effects.
"""
- pass
+ directives.register_directive(name, directive)
def is_role_registered(name: str) ->bool:
"""Check the *name* role is already registered."""
- pass
+ return name in roles._roles
def register_role(name: str, role: RoleFunction) ->None:
@@ -64,17 +67,18 @@ def register_role(name: str, role: RoleFunction) ->None:
This modifies global state of docutils. So it is better to use this
inside ``docutils_namespace()`` to prevent side-effects.
"""
- pass
+ roles.register_local_role(name, role)
def unregister_role(name: str) ->None:
"""Unregister a role from docutils."""
- pass
+ if name in roles._roles:
+ del roles._roles[name]
def is_node_registered(node: type[Element]) ->bool:
"""Check the *node* is already registered."""
- pass
+ return node in additional_nodes
def register_node(node: type[Element]) ->None:
@@ -83,7 +87,7 @@ def register_node(node: type[Element]) ->None:
This modifies global state of some visitors. So it is better to use this
inside ``docutils_namespace()`` to prevent side-effects.
"""
- pass
+ additional_nodes.add(node)
def unregister_node(node: type[Element]) ->None:
@@ -91,7 +95,8 @@ def unregister_node(node: type[Element]) ->None:
This is inverse of ``nodes._add_nodes_class_names()``.
"""
- pass
+ if node in additional_nodes:
+ additional_nodes.remove(node)
@contextmanager
@@ -101,7 +106,16 @@ def patched_get_language() ->Iterator[None]:
This ignores the second argument ``reporter`` to suppress warnings.
refs: https://github.com/sphinx-doc/sphinx/issues/3788
"""
- pass
+ from docutils.languages import get_language as original_get_language
+
+ def patched_get_language(language_code, reporter=None):
+ return original_get_language(language_code)
+
+ try:
+ docutils.languages.get_language = patched_get_language
+ yield
+ finally:
+ docutils.languages.get_language = original_get_language
@contextmanager
@@ -115,19 +129,41 @@ def patched_rst_get_language() ->Iterator[None]:
refs: https://github.com/sphinx-doc/sphinx/issues/10179
"""
- pass
+ from docutils.parsers.rst.languages import get_language as original_get_language
+
+ def patched_get_language(language_code, reporter=None):
+ return original_get_language(language_code)
+
+ try:
+ docutils.parsers.rst.languages.get_language = patched_get_language
+ yield
+ finally:
+ docutils.parsers.rst.languages.get_language = original_get_language
@contextmanager
def using_user_docutils_conf(confdir: (str | None)) ->Iterator[None]:
"""Let docutils know the location of ``docutils.conf`` for Sphinx."""
- pass
+ try:
+ if confdir:
+ docutilsconfig = os.environ.get('DOCUTILSCONFIG', None)
+ os.environ['DOCUTILSCONFIG'] = path.join(confdir, 'docutils.conf')
+ yield
+ finally:
+ if confdir:
+ if docutilsconfig is None:
+ os.environ.pop('DOCUTILSCONFIG', None)
+ else:
+ os.environ['DOCUTILSCONFIG'] = docutilsconfig
@contextmanager
def patch_docutils(confdir: (str | None)=None) ->Iterator[None]:
"""Patch to docutils temporarily."""
- pass
+ with patched_get_language(), \
+ patched_rst_get_language(), \
+ using_user_docutils_conf(confdir):
+ yield
class CustomReSTDispatcher:
diff --git a/sphinx/util/exceptions.py b/sphinx/util/exceptions.py
index 53e2b545f..f52afced5 100644
--- a/sphinx/util/exceptions.py
+++ b/sphinx/util/exceptions.py
@@ -11,9 +11,24 @@ if TYPE_CHECKING:
def save_traceback(app: (Sphinx | None), exc: BaseException) ->str:
"""Save the given exception's traceback in a temporary file."""
- pass
+ with NamedTemporaryFile('w', delete=False, suffix='.log') as f:
+ traceback.print_exc(file=f)
+ if isinstance(exc, SphinxParallelError):
+ f.write('\n')
+ f.write(exc.orig_exc_str)
+
+ return f.name
def format_exception_cut_frames(x: int=1) ->str:
"""Format an exception with traceback, but only the last x frames."""
- pass
+ type_, value, tb = sys.exc_info()
+ if tb:
+ tb_list = traceback.extract_tb(tb)
+ if len(tb_list) > x:
+ tb_list = tb_list[-x:]
+ formatted = traceback.format_list(tb_list)
+ formatted.extend(traceback.format_exception_only(type_, value))
+ else:
+ formatted = traceback.format_exception_only(type_, value)
+ return ''.join(strip_escape_sequences(line) for line in formatted)
diff --git a/sphinx/util/fileutil.py b/sphinx/util/fileutil.py
index fa4bab77f..01b620f1b 100644
--- a/sphinx/util/fileutil.py
+++ b/sphinx/util/fileutil.py
@@ -19,7 +19,10 @@ def _template_basename(filename: (str | os.PathLike[str])) ->(str | None):
If the input looks like a template, then return the filename output should
be written to. Otherwise, return no result (None).
"""
- pass
+ basename = os.path.basename(filename)
+ if basename.endswith('_t') or basename.endswith('.template'):
+ return basename[:-2] if basename.endswith('_t') else basename[:-9]
+ return None
def copy_asset_file(source: (str | os.PathLike[str]), destination: (str |
@@ -36,7 +39,23 @@ def copy_asset_file(source: (str | os.PathLike[str]), destination: (str |
:param renderer: The template engine. If not given, SphinxRenderer is used by default
:param bool force: Overwrite the destination file even if it exists.
"""
- pass
+ if os.path.isdir(destination):
+ destination = os.path.join(destination, os.path.basename(source))
+
+ if os.path.exists(destination) and not force:
+ return
+
+ if context is not None and _template_basename(source):
+ if renderer is None:
+ from sphinx.util.template import SphinxRenderer
+ renderer = SphinxRenderer()
+ with open(source, 'r') as f:
+ template = f.read()
+ rendered = renderer.render_string(template, context)
+ with open(destination, 'w') as f:
+ f.write(rendered)
+ else:
+ copyfile(source, destination)
def copy_asset(source: (str | os.PathLike[str]), destination: (str | os.
@@ -59,4 +78,32 @@ def copy_asset(source: (str | os.PathLike[str]), destination: (str | os.
:param onerror: The error handler.
:param bool force: Overwrite the destination file even if it exists.
"""
- pass
+ if not os.path.exists(source):
+ logger.warning(__('Cannot find asset file: %s'), source)
+ return
+
+ if os.path.isfile(source):
+ copy_asset_file(source, destination, context, renderer, force=force)
+ else:
+ ensuredir(destination)
+ for root, dirs, files in os.walk(source):
+ for dir_name in dirs[:]:
+ if excluded(os.path.join(root, dir_name)):
+ dirs.remove(dir_name)
+
+ for filename in files:
+ if excluded(os.path.join(root, filename)):
+ continue
+
+ source_path = os.path.join(root, filename)
+ relative_path = os.path.relpath(source_path, source)
+ dest_path = os.path.join(destination, relative_path)
+
+ try:
+ ensuredir(os.path.dirname(dest_path))
+ copy_asset_file(source_path, dest_path, context, renderer, force=force)
+ except Exception as exc:
+ if onerror:
+ onerror(source_path, exc)
+ else:
+ raise
diff --git a/sphinx/util/http_date.py b/sphinx/util/http_date.py
index 0a1f2f186..2fb017e7d 100644
--- a/sphinx/util/http_date.py
+++ b/sphinx/util/http_date.py
@@ -14,9 +14,17 @@ _GMT_OFFSET = float(time.localtime().tm_gmtoff)
def epoch_to_rfc1123(epoch: float) ->str:
"""Return HTTP-date string from epoch offset."""
- pass
+ t = time.gmtime(epoch)
+ return f"{_WEEKDAY_NAME[t.tm_wday]}, {t.tm_mday:02d} {_MONTH_NAME[t.tm_mon]} {t.tm_year} {t.tm_hour:02d}:{t.tm_min:02d}:{t.tm_sec:02d} GMT"
def rfc1123_to_epoch(rfc1123: str) ->float:
"""Return epoch offset from HTTP-date string."""
- pass
+ try:
+ t = parsedate_tz(rfc1123)
+ if t is None:
+ raise ValueError("Invalid RFC 1123 date format")
+ return time.mktime(t[:9]) - _GMT_OFFSET
+ except Exception:
+ warnings.warn("Invalid date format (expected RFC 1123)", RemovedInSphinx90Warning, stacklevel=2)
+ return 0
diff --git a/sphinx/util/i18n.py b/sphinx/util/i18n.py
index c493eb2cf..b0ca00411 100644
--- a/sphinx/util/i18n.py
+++ b/sphinx/util/i18n.py
@@ -65,10 +65,44 @@ class CatalogRepository:
self.language = language
self.encoding = encoding
+ def get_catalog_files(self) -> Iterator[CatalogInfo]:
+ for locale_dir in self._locale_dirs:
+ catalog_dir = path.join(self.basedir, locale_dir, self.language, 'LC_MESSAGES')
+ if not path.exists(catalog_dir):
+ continue
+
+ for domain in os.listdir(catalog_dir):
+ if domain.endswith('.po'):
+ domain = domain[:-3]
+ yield CatalogInfo(catalog_dir, domain, self.encoding)
+
+ def write_mo(self, catalog: CatalogInfo) -> None:
+ po_file = path.join(catalog.base_dir, catalog.domain + '.po')
+ mo_file = path.join(catalog.base_dir, catalog.domain + '.mo')
+ with open(po_file, 'rb') as infile:
+ try:
+ po = read_po(infile, catalog.charset)
+ except Exception as exc:
+ logger.warning(__('reading error: %s, %s'), po_file, exc)
+ return
+
+ with open(mo_file, 'wb') as outfile:
+ try:
+ write_mo(outfile, po)
+ except Exception as exc:
+ logger.warning(__('writing error: %s, %s'), mo_file, exc)
+
+ def compile_catalogs(self) -> None:
+ for catalog in self.get_catalog_files():
+ self.write_mo(catalog)
+
def docname_to_domain(docname: str, compaction: (bool | str)) ->str:
"""Convert docname to domain for catalogs."""
- pass
+ if compaction:
+ return docname.split(SEP, 1)[0]
+ else:
+ return docname.replace(SEP, '_')
date_format_mappings = {'%a': 'EEE', '%A': 'EEEE', '%b': 'MMM', '%B':
diff --git a/sphinx/util/index_entries.py b/sphinx/util/index_entries.py
index 56ebb9a85..124a006cd 100644
--- a/sphinx/util/index_entries.py
+++ b/sphinx/util/index_entries.py
@@ -3,4 +3,10 @@ from __future__ import annotations
def _split_into(n: int, type: str, value: str) ->list[str]:
"""Split an index entry into a given number of parts at semicolons."""
- pass
+ parts = value.split(';', n - 1)
+ if len(parts) < n:
+ parts.extend([''] * (n - len(parts)))
+ elif len(parts) > n:
+ parts[n-1] = ';'.join(parts[n-1:])
+ parts = parts[:n]
+ return parts
diff --git a/sphinx/util/inspect.py b/sphinx/util/inspect.py
index 5e7c1ae78..e279ff3b1 100644
--- a/sphinx/util/inspect.py
+++ b/sphinx/util/inspect.py
@@ -62,7 +62,9 @@ def unwrap(obj: Any) ->Any:
Mocked objects are returned as is.
"""
- pass
+ if hasattr(obj, '__wrapped__'):
+ return unwrap(obj.__wrapped__)
+ return obj
def unwrap_all(obj: Any, *, stop: (Callable[[Any], bool] | None)=None) ->Any:
@@ -74,7 +76,17 @@ def unwrap_all(obj: Any, *, stop: (Callable[[Any], bool] | None)=None) ->Any:
When specified, *stop* is a predicate indicating whether an object should
be unwrapped or not.
"""
- pass
+ while True:
+ if stop and stop(obj):
+ return obj
+ if hasattr(obj, '__wrapped__'):
+ obj = obj.__wrapped__
+ elif isinstance(obj, (partial, partialmethod)):
+ obj = obj.func
+ elif isinstance(obj, (classmethod, staticmethod)):
+ obj = obj.__func__
+ else:
+ return obj
def getall(obj: Any) ->(Sequence[str] | None):
@@ -84,22 +96,29 @@ def getall(obj: Any) ->(Sequence[str] | None):
raises :exc:`ValueError` if ``obj.__all__`` is not a list or tuple of
strings.
"""
- pass
+ if not hasattr(obj, '__all__'):
+ return None
+ all_attr = getattr(obj, '__all__')
+ if not isinstance(all_attr, (list, tuple)):
+ raise ValueError('__all__ must be a list or tuple of strings')
+ if not all(isinstance(item, str) for item in all_attr):
+ raise ValueError('__all__ must contain only strings')
+ return all_attr
def getannotations(obj: Any) ->Mapping[str, Any]:
"""Safely get the ``__annotations__`` attribute of an object."""
- pass
+ return getattr(obj, '__annotations__', {})
def getglobals(obj: Any) ->Mapping[str, Any]:
"""Safely get :attr:`obj.__globals__ <function.__globals__>`."""
- pass
+ return getattr(obj, '__globals__', {})
def getmro(obj: Any) ->tuple[type, ...]:
"""Safely get :attr:`obj.__mro__ <class.__mro__>`."""
- pass
+ return getattr(obj, '__mro__', ())
def getorigbases(obj: Any) ->(tuple[Any, ...] | None):
@@ -108,7 +127,12 @@ def getorigbases(obj: Any) ->(tuple[Any, ...] | None):
This returns ``None`` if the object is not a class or if ``__orig_bases__``
is not well-defined (e.g., a non-tuple object or an empty sequence).
"""
- pass
+ if not isinstance(obj, type):
+ return None
+ orig_bases = getattr(obj, '__orig_bases__', None)
+ if not isinstance(orig_bases, tuple) or len(orig_bases) == 0:
+ return None
+ return orig_bases
def getslots(obj: Any) ->(dict[str, Any] | dict[str, None] | None):
@@ -118,7 +142,19 @@ def getslots(obj: Any) ->(dict[str, Any] | dict[str, None] | None):
- This raises a :exc:`TypeError` if *obj* is not a class.
- This raises a :exc:`ValueError` if ``obj.__slots__`` is invalid.
"""
- pass
+ if not isinstance(obj, type):
+ raise TypeError("Expected a class object")
+
+ slots = getattr(obj, '__slots__', None)
+ if slots is None:
+ return None
+
+ if isinstance(slots, str):
+ return {slots: None}
+ elif isinstance(slots, (list, tuple)):
+ return {slot: None for slot in slots}
+ else:
+ raise ValueError("Invalid __slots__ attribute")
def isenumclass(x: Any) ->TypeIs[type[enum.Enum]]:
diff --git a/sphinx/util/inventory.py b/sphinx/util/inventory.py
index 5648e43b1..d2ac4aa2b 100644
--- a/sphinx/util/inventory.py
+++ b/sphinx/util/inventory.py
@@ -25,7 +25,103 @@ class InventoryFileReader:
self.stream = stream
self.buffer = b''
self.eof = False
+ self.decompressor = None
+
+ def read_buffer(self) ->None:
+ chunk = self.stream.read(BUFSIZE)
+ if not chunk:
+ self.eof = True
+ return
+ if self.decompressor:
+ try:
+ self.buffer += self.decompressor.decompress(chunk)
+ except zlib.error:
+ # If decompression fails, treat the chunk as uncompressed
+ self.buffer += chunk
+ self.decompressor = None
+ else:
+ self.buffer += chunk
+
+ def readline(self) ->str:
+ while b'\n' not in self.buffer:
+ if self.eof:
+ break
+ self.read_buffer()
+ if b'\n' in self.buffer:
+ line, self.buffer = self.buffer.split(b'\n', 1)
+ else:
+ line = self.buffer
+ self.buffer = b''
+ return line.decode('utf-8')
+
+ def read_compressed_chunks(self) ->Iterator[bytes]:
+ self.decompressor = zlib.decompressobj()
+ while not self.eof:
+ self.read_buffer()
+ yield self.buffer
+ self.buffer = b''
+ if self.decompressor.unused_data:
+ yield self.decompressor.unused_data
class InventoryFile:
- pass
+ def __init__(self) ->None:
+ self.projects: dict[str, Inventory] = {}
+
+ def load(self, stream: IO[bytes], uri: str, joinfunc: Callable[[str, str], str]) ->None:
+ reader = InventoryFileReader(stream)
+ line = reader.readline().rstrip()
+ if line == '# Sphinx inventory version 1':
+ self.load_v1(reader, uri, joinfunc)
+ elif line == '# Sphinx inventory version 2':
+ self.load_v2(reader, uri, joinfunc)
+ else:
+ raise ValueError('invalid inventory header: %s' % line)
+
+ def load_v1(self, reader: InventoryFileReader, uri: str, joinfunc: Callable[[str, str], str]) ->None:
+ projname = reader.readline().rstrip()[11:]
+ for line in reader.read_compressed_chunks():
+ name, type, location = line.decode().rstrip().split(None, 2)
+ self.projects.setdefault(projname, {})[name] = (type, location)
+
+ def load_v2(self, reader: InventoryFileReader, uri: str, joinfunc: Callable[[str, str], str]) ->None:
+ projname = reader.readline().rstrip()[11:]
+ version = reader.readline().rstrip()[11:]
+ reader.readline() # skip the "zlib" line
+ for line in reader.read_compressed_chunks():
+ name, type, prio, location, dispname = line.decode().rstrip().split(None, 4)
+ if location.endswith('$'):
+ location = location[:-1] + name
+ location = joinfunc(uri, location)
+ self.projects.setdefault(projname, {})[name] = (type, prio, location, dispname)
+
+ def dump(self, f: IO[str]) ->None:
+ f.write('# Sphinx inventory version 2\n')
+ for (projname, items) in self.projects.items():
+ f.write('# Project: %s\n' % projname)
+ f.write('# Version: \n')
+ f.write('# The remainder of this file is compressed using zlib.\n')
+ compressor = zlib.compressobj(9)
+ for name, info in sorted(items.items()):
+ type, prio, location, dispname = info
+ if dispname == name:
+ dispname = '-'
+ f.write(compressor.compress(
+ f'{name} {type} {prio} {location} {dispname}\n'.encode()))
+ f.write(compressor.flush())
+
+ def __contains__(self, name: str) ->bool:
+ for project in self.projects.values():
+ if name in project:
+ return True
+ return False
+
+ def __getitem__(self, name: str) ->InventoryItem:
+ for project in self.projects.values():
+ if name in project:
+ return project[name]
+ raise KeyError(name)
+
+ def __iter__(self) ->Iterator[tuple[str, InventoryItem]]:
+ for project in self.projects.values():
+ yield from project.items()
diff --git a/sphinx/util/logging.py b/sphinx/util/logging.py
index 00c940a4c..c69e94e92 100644
--- a/sphinx/util/logging.py
+++ b/sphinx/util/logging.py
@@ -41,12 +41,16 @@ def getLogger(name: str) ->SphinxLoggerAdapter:
>>> logger.info('Hello, this is an extension!')
Hello, this is an extension!
"""
- pass
+ logger = logging.getLogger(NAMESPACE + '.' + name)
+ return SphinxLoggerAdapter(logger, {})
def convert_serializable(records: list[logging.LogRecord]) ->None:
"""Convert LogRecord serializable."""
- pass
+ for record in records:
+ record.args = tuple(arg for arg in record.args if isinstance(arg, (str, int, float, bool, type(None))))
+ record.exc_info = None
+ record.exc_text = None
class SphinxLogRecord(logging.LogRecord):
@@ -94,7 +98,15 @@ class SphinxLoggerAdapter(logging.LoggerAdapter):
:param once: Do not log this warning,
if a previous warning already has same ``msg``, ``args`` and ``once=True``.
"""
- pass
+ kwargs.update({
+ 'type': type,
+ 'subtype': subtype,
+ 'location': location,
+ 'nonl': nonl,
+ 'color': color,
+ 'once': once
+ })
+ self.logger.warning(msg, *args, **kwargs)
class WarningStreamHandler(logging.StreamHandler):
@@ -120,7 +132,15 @@ def pending_warnings() ->Iterator[logging.Handler]:
Similar to :func:`pending_logging`.
"""
- pass
+ logger = logging.getLogger(NAMESPACE)
+ memhandler = MemoryHandler()
+ memhandler.setLevel(logging.WARNING)
+ logger.addHandler(memhandler)
+ try:
+ yield memhandler
+ finally:
+ logger.removeHandler(memhandler)
+ memhandler.flushTo(logger)
@contextmanager
@@ -134,7 +154,14 @@ def suppress_logging() ->Iterator[MemoryHandler]:
>>> some_long_process()
>>>
"""
- pass
+ logger = logging.getLogger(NAMESPACE)
+ memhandler = MemoryHandler()
+ memhandler.setLevel(logging.DEBUG)
+ logger.addHandler(memhandler)
+ try:
+ yield memhandler
+ finally:
+ logger.removeHandler(memhandler)
@contextmanager
@@ -149,7 +176,15 @@ def pending_logging() ->Iterator[MemoryHandler]:
>>>
Warning message! # the warning is flushed here
"""
- pass
+ logger = logging.getLogger(NAMESPACE)
+ memhandler = MemoryHandler()
+ memhandler.setLevel(logging.DEBUG)
+ logger.addHandler(memhandler)
+ try:
+ yield memhandler
+ finally:
+ logger.removeHandler(memhandler)
+ memhandler.flushTo(logger)
skip_warningiserror = nullcontext
@@ -166,7 +201,13 @@ def prefixed_warnings(prefix: str) ->Iterator[None]:
.. versionadded:: 2.0
"""
- pass
+ logger = logging.getLogger(NAMESPACE)
+ prefix_filter = MessagePrefixFilter(prefix)
+ logger.addFilter(prefix_filter)
+ try:
+ yield
+ finally:
+ logger.removeFilter(prefix_filter)
class LogCollector:
@@ -186,7 +227,18 @@ class _RaiseOnWarningFilter(logging.Filter):
def is_suppressed_warning(warning_type: str, sub_type: str,
suppress_warnings: (Set[str] | Sequence[str])) ->bool:
"""Check whether the warning is suppressed or not."""
- pass
+ if warning_type is None:
+ return False
+
+ for warning in suppress_warnings:
+ if warning == warning_type:
+ return True
+ if warning.endswith('*') and warning_type.startswith(warning[:-1]):
+ return True
+ if sub_type and warning == f"{warning_type}.{sub_type}":
+ return True
+
+ return False
class WarningSuppressor(logging.Filter):
diff --git a/sphinx/util/matching.py b/sphinx/util/matching.py
index de4967a6a..e2d5f53aa 100644
--- a/sphinx/util/matching.py
+++ b/sphinx/util/matching.py
@@ -14,7 +14,40 @@ def _translate_pattern(pat: str) ->str:
Adapted from the fnmatch module, but enhanced so that single stars don't
match slashes.
"""
- pass
+ i, n = 0, len(pat)
+ res = []
+ while i < n:
+ c = pat[i]
+ i = i + 1
+ if c == '*':
+ if i < n and pat[i] == '*':
+ res.append('.*')
+ i = i + 1
+ else:
+ res.append('[^/]*')
+ elif c == '?':
+ res.append('[^/]')
+ elif c == '[':
+ j = i
+ if j < n and pat[j] == '!':
+ j = j + 1
+ if j < n and pat[j] == ']':
+ j = j + 1
+ while j < n and pat[j] != ']':
+ j = j + 1
+ if j >= n:
+ res.append('\\[')
+ else:
+ stuff = pat[i:j].replace('\\', '\\\\')
+ i = j + 1
+ if stuff[0] == '!':
+ stuff = '^' + stuff[1:]
+ elif stuff[0] == '^':
+ stuff = '\\' + stuff
+ res.append('[%s]' % stuff)
+ else:
+ res.append(re.escape(c))
+ return ''.join(res) + r'\Z(?ms)'
class Matcher:
@@ -41,7 +74,9 @@ def patmatch(name: str, pat: str) ->(re.Match[str] | None):
"""Return if name matches the regular expression (pattern)
``pat```. Adapted from fnmatch module.
"""
- pass
+ if pat not in _pat_cache:
+ _pat_cache[pat] = re.compile(_translate_pattern(pat))
+ return _pat_cache[pat].match(name)
def patfilter(names: Iterable[str], pat: str) ->list[str]:
@@ -50,7 +85,7 @@ def patfilter(names: Iterable[str], pat: str) ->list[str]:
Adapted from fnmatch module.
"""
- pass
+ return [name for name in names if patmatch(name, pat)]
def get_matching_files(dirname: (str | os.PathLike[str]), include_patterns:
@@ -64,4 +99,14 @@ def get_matching_files(dirname: (str | os.PathLike[str]), include_patterns:
exclusions from *exclude_patterns* take priority over inclusions.
"""
- pass
+ dirname = os.path.abspath(dirname)
+ include_matcher = Matcher(include_patterns)
+ exclude_matcher = Matcher(exclude_patterns)
+
+ for root, _, files in os.walk(dirname):
+ for filename in files:
+ path = os.path.join(root, filename)
+ rel_path = os.path.relpath(path, dirname)
+ rel_path = path_stabilize(rel_path)
+ if include_matcher(rel_path) and not exclude_matcher(rel_path):
+ yield canon_path(rel_path)
diff --git a/sphinx/util/nodes.py b/sphinx/util/nodes.py
index c491cf3fb..d9fa4da0e 100644
--- a/sphinx/util/nodes.py
+++ b/sphinx/util/nodes.py
@@ -59,7 +59,8 @@ class NodeMatcher(Generic[N]):
While the `NodeMatcher` object can be used as an argument to `Node.findall`, doing so
confounds type checkers' ability to determine the return type of the iterator.
"""
- pass
+ for child in node.findall(self):
+ yield cast(N, child)
def get_full_module_name(node: Node) ->str:
@@ -69,7 +70,9 @@ def get_full_module_name(node: Node) ->str:
:param nodes.Node node: target node
:return: full module dotted path
"""
- pass
+ module = node.__class__.__module__
+ class_name = node.__class__.__name__
+ return f"{module}.{class_name}"
def repr_domxml(node: Node, length: int=80) ->str:
@@ -83,7 +86,14 @@ def repr_domxml(node: Node, length: int=80) ->str:
returns full of DOM XML representation.
:return: DOM XML representation
"""
- pass
+ from xml.dom import minidom
+ document = minidom.parseString(node.asdom().toxml())
+ dom_string = document.documentElement.toxml()
+
+ if length:
+ return dom_string[:length] + '...' if len(dom_string) > length else dom_string
+ else:
+ return dom_string
IGNORED_NODES = (nodes.Invisible, nodes.literal_block, nodes.doctest_block,
@@ -95,13 +105,23 @@ IMAGE_TYPE_NODES = nodes.image,
def extract_messages(doctree: Element) ->Iterable[tuple[Element, str]]:
"""Extract translatable messages from a document tree."""
- pass
+ for node in doctree.traverse(nodes.TextElement):
+ if not isinstance(node, IGNORED_NODES) and node.get('translatable', True):
+ if isinstance(node, LITERAL_TYPE_NODES):
+ msg = node.rawsource
+ else:
+ msg = node.astext()
+
+ if msg:
+ yield (node, msg)
def traverse_translatable_index(doctree: Element) ->Iterable[tuple[Element,
list[tuple[str, str, str, str, str | None]]]]:
"""Traverse translatable index node from a document tree."""
- pass
+ for node in doctree.traverse(addnodes.index):
+ if 'entries' in node:
+ yield (node, node['entries'])
def nested_parse_with_titles(state: RSTState, content: StringList, node:
@@ -115,7 +135,9 @@ def nested_parse_with_titles(state: RSTState, content: StringList, node:
This function is retained for compatibility and will be deprecated in
Sphinx 8. Prefer ``nested_parse_to_nodes()``.
"""
- pass
+ with _fresh_title_style_context():
+ state.nested_parse(content, content_offset, node, match_titles=True)
+ return node.astext()
def clean_astext(node: Element) ->str:
diff --git a/sphinx/util/osutil.py b/sphinx/util/osutil.py
index 83ada565b..ec61ddd1c 100644
--- a/sphinx/util/osutil.py
+++ b/sphinx/util/osutil.py
@@ -20,22 +20,31 @@ SEP = '/'
def canon_path(native_path: (str | os.PathLike[str]), /) ->str:
"""Return path in OS-independent form"""
- pass
+ return str(Path(native_path).as_posix())
def path_stabilize(filepath: (str | os.PathLike[str]), /) ->str:
"""Normalize path separator and unicode string"""
- pass
+ return unicodedata.normalize('NFC', str(Path(filepath).as_posix()))
def relative_uri(base: str, to: str) ->str:
"""Return a relative URL from ``base`` to ``to``."""
- pass
+ b = canon_path(base)
+ t = canon_path(to)
+ if b == t:
+ return ''
+ b_dir = path.dirname(b)
+ common_prefix = path.commonprefix([b_dir, t])
+ if common_prefix == b_dir:
+ return t[len(common_prefix):].lstrip(SEP)
+ else:
+ return '../' * (b_dir.count(SEP) - common_prefix.count(SEP)) + t[len(common_prefix):].lstrip(SEP)
def ensuredir(file: (str | os.PathLike[str])) ->None:
"""Ensure that a path exists."""
- pass
+ Path(file).mkdir(parents=True, exist_ok=True)
def _last_modified_time(source: (str | os.PathLike[str]), /) ->int:
@@ -48,13 +57,15 @@ def _last_modified_time(source: (str | os.PathLike[str]), /) ->int:
We prefer to err on the side of re-rendering a file,
so we round up to the nearest microsecond.
"""
- pass
+ mtime = os.stat(source).st_mtime
+ return int(mtime * 1_000_000 + 1)
def _copy_times(source: (str | os.PathLike[str]), dest: (str | os.PathLike[
str])) ->None:
"""Copy a file's modification times."""
- pass
+ st = os.stat(source)
+ os.utime(dest, (st.st_atime, st.st_mtime))
def copyfile(source: (str | os.PathLike[str]), dest: (str | os.PathLike[str
@@ -68,7 +79,16 @@ def copyfile(source: (str | os.PathLike[str]), dest: (str | os.PathLike[str
.. note:: :func:`copyfile` is a no-op if *source* and *dest* are identical.
"""
- pass
+ if not os.path.exists(source):
+ raise FileNotFoundError(f"Source file '{source}' does not exist")
+
+ if os.path.samefile(source, dest):
+ return
+
+ if os.path.exists(dest) and not force:
+ return
+
+ shutil.copy2(source, dest)
_no_fn_re = re.compile('[^a-zA-Z0-9_-]')
@@ -82,7 +102,11 @@ def relpath(path: (str | os.PathLike[str]), start: (str | os.PathLike[str] |
This is an alternative of ``os.path.relpath()``. This returns original path
if *path* and *start* are on different drives (for Windows platform).
"""
- pass
+ try:
+ return os.path.relpath(path, start)
+ except ValueError:
+ # If path and start are on different drives (Windows)
+ return str(path)
safe_relpath = relpath
diff --git a/sphinx/util/parallel.py b/sphinx/util/parallel.py
index d4b1bfccb..a6ea00da0 100644
--- a/sphinx/util/parallel.py
+++ b/sphinx/util/parallel.py
@@ -22,7 +22,18 @@ class SerialTasks:
"""Has the same interface as ParallelTasks, but executes tasks directly."""
def __init__(self, nproc: int=1) ->None:
- pass
+ self.nproc = nproc
+ self.tasks = []
+
+ def add_task(self, task_func: Callable, arg: Any=None, result_func: Callable | None=None) -> None:
+ self.tasks.append((task_func, arg, result_func))
+
+ def join(self) -> None:
+ for task_func, arg, result_func in self.tasks:
+ result = task_func(arg)
+ if result_func:
+ result_func(result)
+ self.tasks.clear()
class ParallelTasks:
@@ -37,3 +48,64 @@ class ParallelTasks:
self._precvsWaiting: dict[int, Any] = {}
self._pworking = 0
self._taskid = 0
+
+ def add_task(self, task_func: Callable, arg: Any=None, result_func: Callable | None=None) -> None:
+ self._taskid += 1
+ self._result_funcs[self._taskid] = result_func
+ self._args[self._taskid] = [arg]
+
+ if self._pworking < self.nproc:
+ self._start_task(self._taskid, task_func)
+ else:
+ self._precvsWaiting[self._taskid] = task_func
+
+ def _start_task(self, taskid: int, task_func: Callable) -> None:
+ precv, psend = multiprocessing.Pipe(False)
+ proc = multiprocessing.Process(target=self._process_task,
+ args=(taskid, task_func, psend, self._args[taskid]))
+ proc.start()
+ self._procs[taskid] = proc
+ self._precvs[taskid] = precv
+ self._pworking += 1
+
+ def _process_task(self, taskid: int, task_func: Callable, psend: Any, args: list[Any]) -> None:
+ try:
+ if args:
+ ret = task_func(*args)
+ else:
+ ret = task_func()
+ psend.send((taskid, ret, None))
+ except Exception as e:
+ psend.send((taskid, None, e))
+
+ def join(self) -> None:
+ while self._pworking:
+ for taskid, precv in list(self._precvs.items()):
+ if precv.poll():
+ res = precv.recv()
+ self._finish_task(res)
+ break
+ else:
+ time.sleep(0.02)
+
+ def _finish_task(self, res: tuple[int, Any, Exception | None]) -> None:
+ taskid, result, exc = res
+ if taskid in self._procs:
+ self._procs[taskid].join()
+ del self._procs[taskid]
+ del self._precvs[taskid]
+ self._pworking -= 1
+
+ if exc:
+ raise SphinxParallelError(exc)
+
+ result_func = self._result_funcs.get(taskid)
+ if result_func:
+ result_func(result)
+
+ if self._precvsWaiting:
+ newtaskid, task_func = self._precvsWaiting.popitem()
+ self._start_task(newtaskid, task_func)
+
+ del self._result_funcs[taskid]
+ del self._args[taskid]
diff --git a/sphinx/util/parsing.py b/sphinx/util/parsing.py
index cc99e270e..7d0c7e5ef 100644
--- a/sphinx/util/parsing.py
+++ b/sphinx/util/parsing.py
@@ -40,4 +40,27 @@ def nested_parse_to_nodes(state: RSTState, text: (str | StringList), *,
.. versionadded:: 7.4
"""
- pass
+ if isinstance(text, str):
+ text = StringList(string2lines(text), source=source)
+ elif isinstance(text, StringList):
+ text = text.copy()
+ text.source = source
+ else:
+ raise TypeError("text must be a string or StringList")
+
+ memo = state.memo
+ memo.title_styles = []
+ memo.section_level = 0
+ memo.section_bubble_up_kludge = False
+ memo.allow_section_headings = allow_section_headings
+ memo.keep_title_context = keep_title_context
+
+ node = Element()
+ node.document = state.document
+ node.source = source
+ node.line = offset + 1
+
+ with contextlib.suppress(state.nested_parse_with_titles):
+ state.nested_parse(text, offset, node, match_titles=True)
+
+ return node.children
diff --git a/sphinx/util/png.py b/sphinx/util/png.py
index fb20e2105..699ebdec2 100644
--- a/sphinx/util/png.py
+++ b/sphinx/util/png.py
@@ -11,7 +11,25 @@ IEND_CHUNK = b'\x00\x00\x00\x00IEND\xaeB`\x82'
def read_png_depth(filename: str) ->(int | None):
"""Read the special tEXt chunk indicating the depth from a PNG file."""
- pass
+ with open(filename, 'rb') as f:
+ data = f.read()
+
+ if len(data) < LEN_DEPTH:
+ return None
+
+ pos = data.rfind(IEND_CHUNK)
+ if pos == -1:
+ return None
+
+ chunk_start = data.rfind(DEPTH_CHUNK_START, 0, pos)
+ if chunk_start == -1:
+ return None
+
+ depth_str = data[chunk_start + len(DEPTH_CHUNK_START):pos].split(b'\0', 1)[0]
+ try:
+ return int(depth_str)
+ except ValueError:
+ return None
def write_png_depth(filename: str, depth: int) ->None:
@@ -19,4 +37,22 @@ def write_png_depth(filename: str, depth: int) ->None:
The chunk is placed immediately before the special IEND chunk.
"""
- pass
+ with open(filename, 'rb') as f:
+ data = f.read()
+
+ pos = data.rfind(IEND_CHUNK)
+ if pos == -1:
+ raise ValueError("Invalid PNG file: IEND chunk not found")
+
+ depth_chunk = (
+ DEPTH_CHUNK_LEN +
+ DEPTH_CHUNK_START +
+ str(depth).encode('ascii') +
+ b'\0' +
+ struct.pack('!I', binascii.crc32(DEPTH_CHUNK_START + str(depth).encode('ascii') + b'\0'))
+ )
+
+ new_data = data[:pos] + depth_chunk + data[pos:]
+
+ with open(filename, 'wb') as f:
+ f.write(new_data)
diff --git a/sphinx/util/requests.py b/sphinx/util/requests.py
index c1ee76baa..58a818df9 100644
--- a/sphinx/util/requests.py
+++ b/sphinx/util/requests.py
@@ -14,7 +14,15 @@ _USER_AGENT = (
def _get_tls_cacert(url: str, certs: (str | dict[str, str] | None)) ->(str |
bool):
"""Get additional CA cert for a specific URL."""
- pass
+ if certs is None:
+ return True
+ elif isinstance(certs, str):
+ return certs
+ else:
+ hostname = urlsplit(url).hostname
+ if hostname:
+ return certs.get(hostname, True)
+ return True
def get(url: str, **kwargs: Any) ->requests.Response:
@@ -22,7 +30,11 @@ def get(url: str, **kwargs: Any) ->requests.Response:
This sets up User-Agent header and TLS verification automatically.
"""
- pass
+ kwargs.setdefault('headers', {}).setdefault('User-Agent', _USER_AGENT)
+ kwargs.setdefault('verify', _get_tls_cacert(url, kwargs.pop('certs', None)))
+ with warnings.catch_warnings():
+ warnings.filterwarnings('ignore', category=InsecureRequestWarning)
+ return requests.get(url, **kwargs)
def head(url: str, **kwargs: Any) ->requests.Response:
@@ -30,7 +42,11 @@ def head(url: str, **kwargs: Any) ->requests.Response:
This sets up User-Agent header and TLS verification automatically.
"""
- pass
+ kwargs.setdefault('headers', {}).setdefault('User-Agent', _USER_AGENT)
+ kwargs.setdefault('verify', _get_tls_cacert(url, kwargs.pop('certs', None)))
+ with warnings.catch_warnings():
+ warnings.filterwarnings('ignore', category=InsecureRequestWarning)
+ return requests.head(url, **kwargs)
class _Session(requests.Session):
@@ -42,4 +58,13 @@ class _Session(requests.Session):
This sets up User-Agent header and TLS verification automatically.
"""
- pass
+ kwargs.setdefault('headers', {}).setdefault('User-Agent', _user_agent or _USER_AGENT)
+ if _tls_info:
+ kwargs['verify'] = _tls_info[0]
+ if len(_tls_info) > 1:
+ kwargs['cert'] = _tls_info[1]
+ else:
+ kwargs.setdefault('verify', _get_tls_cacert(url, kwargs.pop('certs', None)))
+ with warnings.catch_warnings():
+ warnings.filterwarnings('ignore', category=InsecureRequestWarning)
+ return super().request(method, url, **kwargs)
diff --git a/sphinx/util/rst.py b/sphinx/util/rst.py
index 21fe03367..c6e4e22f2 100644
--- a/sphinx/util/rst.py
+++ b/sphinx/util/rst.py
@@ -25,20 +25,43 @@ WIDECHARS['ja'] = 'WFA'
def textwidth(text: str, widechars: str='WF') ->int:
"""Get width of text."""
- pass
+ width = 0
+ for char in text:
+ if east_asian_width(char) in WIDECHARS[widechars]:
+ width += 2
+ else:
+ width += 1
+ return width
@pass_environment
def heading(env: Environment, text: str, level: int=1) ->str:
"""Create a heading for *level*."""
- pass
+ if level <= 0:
+ raise ValueError("Heading level must be positive")
+ if level > len(SECTIONING_CHARS):
+ char = SECTIONING_CHARS[-1]
+ else:
+ char = SECTIONING_CHARS[level - 1]
+ width = textwidth(text)
+ return f"{text}\n{char * width}"
def prepend_prolog(content: StringList, prolog: str) ->None:
"""Prepend a string to content body as prolog."""
- pass
+ if not prolog:
+ return
+ # Insert a blank line between the prolog and the existing content
+ content.insert(0, '')
+ # Insert the prolog at the beginning of the content
+ content.insert(0, prolog)
def append_epilog(content: StringList, epilog: str) ->None:
"""Append a string to content body as epilog."""
- pass
+ if not epilog:
+ return
+ # Insert a blank line between the existing content and the epilog
+ content.append('')
+ # Append the epilog to the end of the content
+ content.append(epilog)
diff --git a/sphinx/util/tags.py b/sphinx/util/tags.py
index 808df44b6..77515cd95 100644
--- a/sphinx/util/tags.py
+++ b/sphinx/util/tags.py
@@ -40,4 +40,28 @@ class Tags:
are permitted, and operate on tag names, where truthy values mean
the tag is present and vice versa.
"""
- pass
+ if condition in self._condition_cache:
+ return self._condition_cache[condition]
+
+ try:
+ parser = BooleanParser(_ENV.parse(condition))
+ ast = parser.parse_expression()
+ result = self._eval_node(ast)
+ self._condition_cache[condition] = result
+ return result
+ except jinja2.exceptions.TemplateSyntaxError as exc:
+ raise ValueError(f'Invalid boolean expression: {condition}') from exc
+
+ def _eval_node(self, node: jinja2.nodes.Node) -> bool:
+ if isinstance(node, jinja2.nodes.Name):
+ return node.name in self._tags
+ elif isinstance(node, jinja2.nodes.Not):
+ return not self._eval_node(node.node)
+ elif isinstance(node, jinja2.nodes.And):
+ return self._eval_node(node.left) and self._eval_node(node.right)
+ elif isinstance(node, jinja2.nodes.Or):
+ return self._eval_node(node.left) or self._eval_node(node.right)
+ elif isinstance(node, jinja2.nodes.Const):
+ return bool(node.value)
+ else:
+ raise ValueError(f'Invalid node type: {type(node)}')
diff --git a/sphinx/util/texescape.py b/sphinx/util/texescape.py
index 2ed1eb943..0e0da9b4c 100644
--- a/sphinx/util/texescape.py
+++ b/sphinx/util/texescape.py
@@ -34,14 +34,36 @@ _tex_hlescape_map_without_unicode: dict[int, str] = {}
def escape(s: str, latex_engine: (str | None)=None) ->str:
"""Escape text for LaTeX output."""
- pass
+ if latex_engine == 'platex':
+ # for Japanese environment
+ s = s.replace('\\', '\\textbackslash{}')
+ s = s.replace('{', '\\{')
+ s = s.replace('}', '\\}')
+ s = s.replace('$', '\\$')
+ s = s.replace('%', '\\%')
+ s = s.replace('&', '\\&')
+ s = s.replace('#', '\\#')
+ s = s.replace('_', '\\_')
+ s = s.replace('~', '\\textasciitilde{}')
+ s = s.replace('^', '\\textasciicircum{}')
+ return s
+ else:
+ return ''.join(_tex_escape_map.get(ord(c), c) for c in s)
def hlescape(s: str, latex_engine: (str | None)=None) ->str:
"""Escape text for LaTeX highlighter."""
- pass
+ if latex_engine == 'platex':
+ # for Japanese environment
+ s = s.replace('\\', '\\textbackslash{}')
+ s = s.replace('{', '\\{')
+ s = s.replace('}', '\\}')
+ s = s.replace('$', '\\$')
+ return s
+ else:
+ return ''.join(_tex_hlescape_map.get(ord(c), c) for c in s)
def escape_abbr(text: str) ->str:
"""Adjust spacing after abbreviations. Works with @ letter or other."""
- pass
+ return re.sub(r'\.(?=\w)', r'.\@', text)
diff --git a/sphinx/util/typing.py b/sphinx/util/typing.py
index e0151d793..d24685be4 100644
--- a/sphinx/util/typing.py
+++ b/sphinx/util/typing.py
@@ -42,7 +42,7 @@ _INVALID_BUILTIN_CLASSES: Final[Mapping[object, str]] = {Context:
def is_invalid_builtin_class(obj: Any) ->bool:
"""Check *obj* is an invalid built-in class."""
- pass
+ return obj in _INVALID_BUILTIN_CLASSES
TextlikeNode: TypeAlias = nodes.Text | nodes.TextElement
@@ -99,22 +99,27 @@ def get_type_hints(obj: Any, globalns: (dict[str, Any] | None)=None,
This is a simple wrapper of `typing.get_type_hints()` that does not raise an error on
runtime.
"""
- pass
+ try:
+ return typing.get_type_hints(obj, globalns, localns, include_extras)
+ except Exception:
+ logger.warning('Failed to get type hints for %r', obj)
+ return {}
def is_system_TypeVar(typ: Any) ->bool:
"""Check *typ* is system defined TypeVar."""
- pass
+ return isinstance(typ, TypeVar) and typ.__module__ == 'typing'
def _is_annotated_form(obj: Any) ->TypeIs[Annotated[Any, ...]]:
"""Check if *obj* is an annotated type."""
- pass
+ return isinstance(obj, type(Annotated)) and obj.__origin__ is Annotated
def _is_unpack_form(obj: Any) ->bool:
"""Check if the object is :class:`typing.Unpack` or equivalent."""
- pass
+ return (hasattr(typing, 'Unpack') and isinstance(obj, type(typing.Unpack))
+ or getattr(obj, '__origin__', None) is getattr(typing, 'Unpack', None))
def restify(cls: Any, mode: _RestifyMode='fully-qualified-except-typing'
@@ -129,7 +134,21 @@ def restify(cls: Any, mode: _RestifyMode='fully-qualified-except-typing'
'smart'
Show the name of the annotation.
"""
- pass
+ if isinstance(cls, str):
+ return cls
+ elif isinstance(cls, type):
+ if mode == 'smart':
+ return cls.__name__
+ else:
+ module = cls.__module__
+ name = cls.__qualname__
+ if module == 'typing' or module == 'builtins':
+ return name
+ return f'{module}.{name}'
+ elif isinstance(cls, TypeVar):
+ return cls.__name__
+ else:
+ return str(cls)
def stringify_annotation(annotation: Any, /, mode: _StringifyMode=
@@ -147,7 +166,23 @@ def stringify_annotation(annotation: Any, /, mode: _StringifyMode=
'fully-qualified'
Show the module name and qualified name of the annotation.
"""
- pass
+ if isinstance(annotation, str):
+ return annotation
+ elif isinstance(annotation, type):
+ if mode == 'smart':
+ return annotation.__name__
+ elif mode == 'fully-qualified':
+ return f'{annotation.__module__}.{annotation.__qualname__}'
+ else: # fully-qualified-except-typing
+ module = annotation.__module__
+ name = annotation.__qualname__
+ if module == 'typing' or module == 'builtins':
+ return name
+ return f'{module}.{name}'
+ elif isinstance(annotation, TypeVar):
+ return annotation.__name__
+ else:
+ return str(annotation)
_DEPRECATED_OBJECTS: dict[str, tuple[Any, str, tuple[int, int]]] = {}
diff --git a/sphinx/versioning.py b/sphinx/versioning.py
index e75880d9e..bcea28225 100644
--- a/sphinx/versioning.py
+++ b/sphinx/versioning.py
@@ -31,7 +31,10 @@ def add_uids(doctree: Node, condition: Callable[[Node], bool]) ->Iterator[Node
:param condition:
A callable which returns either ``True`` or ``False`` for a given node.
"""
- pass
+ for node in doctree.traverse(condition):
+ if 'uid' not in node:
+ node['uid'] = uuid4().hex
+ yield node
def merge_doctrees(old: Node, new: Node, condition: Callable[[Node], bool]
@@ -45,19 +48,48 @@ def merge_doctrees(old: Node, new: Node, condition: Callable[[Node], bool]
:param condition:
A callable which returns either ``True`` or ``False`` for a given node.
"""
- pass
+ old_iter = old.traverse(condition)
+ new_iter = new.traverse(condition)
+ old_nodes = {node['uid']: node for node in old_iter if 'uid' in node}
+
+ for new_node in new_iter:
+ if 'uid' not in new_node:
+ yield new_node
+ elif new_node['uid'] not in old_nodes:
+ yield new_node
+ elif new_node != old_nodes[new_node['uid']]:
+ yield new_node
def get_ratio(old: str, new: str) ->float:
"""Return a "similarity ratio" (in percent) representing the similarity
between the two strings where 0 is equal and anything above less than equal.
"""
- pass
+ if IS_SPEEDUP:
+ return Levenshtein.ratio(old, new) * 100
+ else:
+ distance = levenshtein_distance(old, new)
+ return (1 - (distance / max(len(old), len(new)))) * 100
def levenshtein_distance(a: str, b: str) ->int:
"""Return the Levenshtein edit distance between two strings *a* and *b*."""
- pass
+ if len(a) < len(b):
+ a, b = b, a
+ if not b:
+ return len(a)
+
+ previous_row = range(len(b) + 1)
+ for i, column1 in enumerate(a):
+ current_row = [i + 1]
+ for j, column2 in enumerate(b):
+ insertions = previous_row[j + 1] + 1
+ deletions = current_row[j] + 1
+ substitutions = previous_row[j] + (column1 != column2)
+ current_row.append(min(insertions, deletions, substitutions))
+ previous_row = current_row
+
+ return previous_row[-1]
class UIDTransform(SphinxTransform):
diff --git a/sphinx/writers/html5.py b/sphinx/writers/html5.py
index 761abb855..2e5157d93 100644
--- a/sphinx/writers/html5.py
+++ b/sphinx/writers/html5.py
@@ -22,7 +22,11 @@ logger = logging.getLogger(__name__)
def multiply_length(length: str, scale: int) ->str:
"""Multiply *length* (width or height) by *scale*."""
- pass
+ match = re.match(r'^(\d+(?:\.\d+)?)(px|em|rem|%)$', length)
+ if match:
+ value, unit = match.groups()
+ return f"{float(value) * scale}{unit}"
+ return length
class HTML5Translator(SphinxTranslator, BaseTranslator):
@@ -51,4 +55,21 @@ class HTML5Translator(SphinxTranslator, BaseTranslator):
The *parameter_group* value is the type of child nodes acting as required parameters
or as a set of contiguous optional parameters.
"""
- pass
+ self.body.append(sig_open_paren)
+ self.required_params_left = sum(isinstance(c, parameter_group) for c in node.children)
+ self.param_separator = node.child_text_separator
+ self.optional_param_level = 0
+ for child in node:
+ if isinstance(child, parameter_group):
+ self.required_params_left -= 1
+ if self.optional_param_level:
+ self.body.append(']')
+ self.optional_param_level -= 1
+ elif isinstance(child, addnodes.optional):
+ self.optional_param_level += 1
+ self.body.append('[')
+ self.visit(child)
+ while self.optional_param_level:
+ self.body.append(']')
+ self.optional_param_level -= 1
+ self.body.append(sig_close_paren)
diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py
index c0c3fff2d..50dae1b53 100644
--- a/sphinx/writers/latex.py
+++ b/sphinx/writers/latex.py
@@ -102,7 +102,7 @@ class Table:
def is_longtable(self) ->bool:
"""True if and only if table uses longtable environment."""
- pass
+ return 'longtable' in self.styles or self.caption
def get_table_type(self) ->str:
"""Returns the LaTeX environment name for the table.
@@ -113,7 +113,12 @@ class Table:
* tabular
* tabulary
"""
- pass
+ if self.is_longtable():
+ return 'longtable'
+ elif self.colspec:
+ return 'tabulary'
+ else:
+ return 'tabular'
def get_colspec(self) ->str:
"""Returns a column spec of table.
@@ -125,14 +130,23 @@ class Table:
The ``\\\\X`` and ``T`` column type specifiers are defined in
``sphinxlatextables.sty``.
"""
- pass
+ if self.colspec:
+ return self.colspec
+ elif self.colwidths and len(self.colwidths) == self.colcount:
+ return ''.join('p{%.2f\\linewidth}' % width for width in self.colwidths)
+ else:
+ return 'l' * self.colcount
def add_cell(self, height: int, width: int) ->None:
"""Adds a new cell to a table.
It will be located at current position: (``self.row``, ``self.col``).
"""
- pass
+ self.cell_id += 1
+ for i in range(height):
+ for j in range(width):
+ self.cells[self.row + i, self.col + j] = self.cell_id
+ self.col += width
def cell(self, row: (int | None)=None, col: (int | None)=None) ->(TableCell
| None):
@@ -141,7 +155,12 @@ class Table:
If no option arguments: ``row`` or ``col`` are given, the current position;
``self.row`` and ``self.col`` are used to get a cell object by default.
"""
- pass
+ row = self.row if row is None else row
+ col = self.col if col is None else col
+ try:
+ return TableCell(self, row, col)
+ except IndexError:
+ return None
class TableCell:
@@ -162,22 +181,39 @@ class TableCell:
@property
def width(self) ->int:
"""Returns the cell width."""
- pass
+ return sum(1 for col in range(self.col, self.table.colcount)
+ if self.table.cells[self.row, col] == self.cell_id)
@property
def height(self) ->int:
"""Returns the cell height."""
- pass
+ return sum(1 for row in range(self.row, self.table.row + 1)
+ if self.table.cells[row, self.col] == self.cell_id)
def escape_abbr(text: str) ->str:
"""Adjust spacing after abbreviations."""
- pass
+ return re.sub(r'\.(?=\w)', r'.\@', text)
def rstdim_to_latexdim(width_str: str, scale: int=100) ->str:
"""Convert `width_str` with rst length to LaTeX length."""
- pass
+ match = re.match(r'^(\d*\.?\d*)(\S*)$', width_str)
+ if not match:
+ return None
+ res = float(match.group(1))
+ unit = match.group(2)
+ if not unit or unit == "px":
+ res = f"{res * scale / 100:.2f}\\sphinxpxdimen"
+ elif unit == "%":
+ res = f"{res * scale / 100:.2f}\\linewidth"
+ elif unit == 'em':
+ res = f"{res:.2f}em"
+ elif unit in ('\\textwidth', '\\linewidth', '\\paperwidth'):
+ res = f"{res:.2f}{unit}"
+ else:
+ res = f"{res:.2f}{unit}"
+ return res
class LaTeXTranslator(SphinxTranslator):
@@ -293,7 +329,7 @@ class LaTeXTranslator(SphinxTranslator):
@property
def table(self) ->(Table | None):
"""Get current table."""
- pass
+ return self.tables[-1] if self.tables else None
depart_sidebar = depart_topic
def _visit_sig_parameter_list(self, node: Element, parameter_group:
@@ -306,7 +342,19 @@ class LaTeXTranslator(SphinxTranslator):
The caller is responsible for closing adding surrounding LaTeX macro argument start
and stop tokens.
"""
- pass
+ params = []
+ optional_params = []
+ for child in node.children:
+ if isinstance(child, parameter_group):
+ if child.get('optional'):
+ optional_params.append(child.astext())
+ else:
+ params.append(child.astext())
+
+ if params:
+ self.body.append(', '.join(params))
+ if optional_params:
+ self.body.append('[' + ', '.join(optional_params) + ']')
visit_field_name = visit_term
depart_field_name = depart_term
visit_field_body = visit_definition
@@ -314,7 +362,7 @@ class LaTeXTranslator(SphinxTranslator):
def is_inline(self, node: Element) ->bool:
"""Check whether a node represents an inline element."""
- pass
+ return isinstance(node, (nodes.inline, nodes.literal, nodes.reference))
visit_attention = _visit_named_admonition
depart_attention = _depart_named_admonition
visit_caution = _visit_named_admonition
@@ -338,7 +386,7 @@ class LaTeXTranslator(SphinxTranslator):
def visit_option_argument(self, node: Element) ->None:
"""The delimiter between an option and its argument."""
- pass
+ self.body.append('=')
from sphinx.builders.latex.nodes import HYPERLINK_SUPPORT_NODES, captioned_literal_block, footnotetext
diff --git a/sphinx/writers/texinfo.py b/sphinx/writers/texinfo.py
index ff82196df..a3a293868 100644
--- a/sphinx/writers/texinfo.py
+++ b/sphinx/writers/texinfo.py
@@ -71,14 +71,16 @@ TEMPLATE = """\\input texinfo @c -*-texinfo-*-
def find_subsections(section: Element) ->list[nodes.section]:
"""Return a list of subsections for the given ``section``."""
- pass
+ return [node for node in section.children if isinstance(node, nodes.section)]
def smart_capwords(s: str, sep: (str | None)=None) ->str:
"""Like string.capwords() but does not capitalize words that already
contain a capital letter.
"""
- pass
+ words = s.split(sep) if sep else s.split()
+ capitalized = [word.capitalize() if word.islower() else word for word in words]
+ return (sep or ' ').join(capitalized)
class TexinfoWriter(writers.Writer):
@@ -144,41 +146,75 @@ class TexinfoTranslator(SphinxTranslator):
Assigns the attribute ``node_name`` to each section.
"""
- pass
+ for section in self.document.traverse(nodes.section):
+ if 'ids' in section:
+ node_id = section['ids'][0]
+ node_name = self.escape_id(node_id)
+ section['node_name'] = node_name
+ self.node_names[node_id] = node_name
def collect_node_menus(self) ->None:
"""Collect the menu entries for each "node" section."""
- pass
+ for section in self.document.traverse(nodes.section):
+ if 'node_name' not in section:
+ continue
+ entries = []
+ for subsection in find_subsections(section):
+ if 'node_name' in subsection:
+ title = self.escape_menu(subsection.next_node(nodes.title).astext())
+ entries.append((subsection['node_name'], title))
+ self.node_menus[section['node_name']] = entries
def collect_rellinks(self) ->None:
"""Collect the relative links (next, previous, up) for each "node"."""
- pass
+ def process_section(section, parent=None):
+ if 'node_name' not in section:
+ return
+ node_name = section['node_name']
+ links = ['', '', ''] # next, previous, up
+ if parent and 'node_name' in parent:
+ links[2] = parent['node_name']
+ subsections = find_subsections(section)
+ if subsections:
+ links[0] = subsections[0]['node_name']
+ for i, subsection in enumerate(subsections):
+ if i > 0:
+ process_section(subsection, section)
+ self.rellinks[subsection['node_name']][1] = node_name
+ if i < len(subsections) - 1:
+ self.rellinks[subsection['node_name']][0] = subsections[i + 1]['node_name']
+ self.rellinks[node_name] = links
+
+ process_section(self.document)
def escape(self, s: str) ->str:
"""Return a string with Texinfo command characters escaped."""
- pass
+ return s.replace('@', '@@').replace('{', '@{').replace('}', '@}')
def escape_arg(self, s: str) ->str:
"""Return an escaped string suitable for use as an argument
to a Texinfo command.
"""
- pass
+ return s.replace(',', '@comma{}').replace(':', '@colon{}')
def escape_id(self, s: str) ->str:
"""Return an escaped string suitable for node names and anchors."""
- pass
+ return re.sub(r'[^\w.+-]', '-', s)
def escape_menu(self, s: str) ->str:
"""Return an escaped string suitable for menu entries."""
- pass
+ return s.replace(':', '@:').replace('*', '@*').replace(' ', '@w{ }')
def ensure_eol(self) ->None:
"""Ensure the last line in body is terminated by new line."""
- pass
+ if self.body and not self.body[-1].endswith('\n'):
+ self.body[-1] += '\n'
def get_short_id(self, id: str) ->str:
"""Return a shorter 'id' associated with ``id``."""
- pass
+ if id not in self.short_ids:
+ self.short_ids[id] = str(len(self.short_ids))
+ return self.short_ids[id]
headings = ('@unnumbered', '@chapter', '@section', '@subsection',
'@subsubsection')
rubrics = '@heading', '@subheading', '@subsubheading'
diff --git a/sphinx/writers/text.py b/sphinx/writers/text.py
index 2e3317450..6732d2f5e 100644
--- a/sphinx/writers/text.py
+++ b/sphinx/writers/text.py
@@ -101,11 +101,13 @@ class Table:
"""Add a row to the table, to use with ``add_cell()``. It is not needed
to call ``add_row()`` before the first ``add_cell()``.
"""
- pass
+ self.lines.append([])
+ self.current_line = len(self.lines) - 1
+ self.current_col = 0
def set_separator(self) ->None:
"""Sets the separator below the current line."""
- pass
+ self.separator = len(self.lines)
def add_cell(self, cell: Cell) ->None:
"""Add a cell to the current line, to use with ``add_row()``. To add
@@ -113,7 +115,10 @@ class Table:
``cell.colspan`` or ``cell.rowspan`` BEFORE inserting it into
the table.
"""
- pass
+ while self.current_col < len(self.lines[self.current_line]) and self.lines[self.current_line][self.current_col]:
+ self.current_col += 1
+ self[self.current_line, self.current_col] = cell
+ self.current_col += cell.colspan
def __getitem__(self, pos: tuple[int, int]) ->Cell:
line, col = pos
@@ -139,19 +144,27 @@ class Table:
``self.colwidth`` or ``self.measured_widths``).
This takes into account cells spanning multiple columns.
"""
- pass
+ width = sum(source[cell.col:cell.col + cell.colspan])
+ return width + (cell.colspan - 1) * 3 # Add space for separators
def rewrap(self) ->None:
"""Call ``cell.wrap()`` on all cells, and measure each column width
after wrapping (result written in ``self.measured_widths``).
"""
- pass
+ self.measured_widths = self.colwidth[:]
+ for line in self.lines:
+ for cell in line:
+ if cell.col == cell.col: # Only process each cell once
+ wrapped = textwrap.wrap(cell.text, width=self.cell_width(cell, self.measured_widths))
+ cell.wrapped = wrapped
+ if cell.colspan == 1:
+ self.measured_widths[cell.col] = max(self.measured_widths[cell.col], max(map(len, wrapped)) if wrapped else 0)
def physical_lines_for_line(self, line: list[Cell]) ->int:
"""For a given line, compute the number of physical lines it spans
due to text wrapping.
"""
- pass
+ return max(len(cell.wrapped) for cell in line if cell.row == line.index(cell))
def __str__(self) ->str:
out = []
@@ -212,23 +225,78 @@ class TextWrapper(textwrap.TextWrapper):
This method respects wide/fullwidth characters for width adjustment.
"""
- pass
+ lines = []
+ if self.width <= 0:
+ raise ValueError("invalid width %r (must be > 0)" % self.width)
+
+ chunks.reverse()
+ while chunks:
+ cur_line = []
+ cur_len = 0
+
+ if lines:
+ indent = self.subsequent_indent
+ else:
+ indent = self.initial_indent
+
+ width = self.width - len(indent)
+
+ if self.drop_whitespace and chunks[-1].strip() == '' and lines:
+ del chunks[-1]
+
+ while chunks:
+ l = column_width(chunks[-1])
+
+ if cur_len + l <= width:
+ cur_line.append(chunks.pop())
+ cur_len += l
+ else:
+ break
+
+ if chunks and column_width(chunks[-1]) > width:
+ self._handle_long_word(chunks, cur_line, cur_len, width)
+
+ if self.drop_whitespace and cur_line and cur_line[-1].strip() == '':
+ del cur_line[-1]
+
+ if cur_line:
+ lines.append(indent + ''.join(cur_line))
+
+ return lines
def _break_word(self, word: str, space_left: int) ->tuple[str, str]:
"""Break line by unicode width instead of len(word)."""
- pass
+ total_width = 0
+ for i, char in enumerate(word):
+ char_width = column_width(char)
+ if total_width + char_width > space_left:
+ return word[:i], word[i:]
+ total_width += char_width
+ return word, ''
def _split(self, text: str) ->list[str]:
"""Override original method that only split by 'wordsep_re'.
This '_split' splits wide-characters into chunks by one character.
"""
- pass
+ chunks = []
+ for chunk in self.wordsep_re.split(text):
+ if column_width(chunk) > 1:
+ chunks.extend(list(chunk))
+ else:
+ chunks.append(chunk)
+ return [c for c in chunks if c]
- def _handle_long_word(self, reversed_chunks: list[str], cur_line: list[
- str], cur_len: int, width: int) ->None:
+ def _handle_long_word(self, reversed_chunks: list[str], cur_line: list[str], cur_len: int, width: int) ->None:
"""Override original method for using self._break_word() instead of slice."""
- pass
+ space_left = max(width - cur_len, 1)
+ if self.break_long_words:
+ chunk = reversed_chunks[-1]
+ word_part, remainder = self._break_word(chunk, space_left)
+ reversed_chunks[-1] = remainder
+ cur_line.append(word_part)
+ elif not cur_line:
+ cur_line.append(reversed_chunks.pop())
MAXWIDTH = 70
diff --git a/sphinx/writers/xml.py b/sphinx/writers/xml.py
index 47b7357a7..4281a5c02 100644
--- a/sphinx/writers/xml.py
+++ b/sphinx/writers/xml.py
@@ -30,4 +30,4 @@ class PseudoXMLWriter(BaseXMLWriter):
def supports(self, format: str) ->bool:
"""All format-specific elements are supported."""
- pass
+ return format in self.supported