Skip to content

back to OpenHands summary

OpenHands: sphinx

Failed to run pytests for test tests

ImportError while loading conftest '/testbed/tests/conftest.py'.
tests/conftest.py:16: in <module>
    from sphinx.testing.util import _clean_up_global_state
E   ImportError: cannot import name '_clean_up_global_state' from 'sphinx.testing.util' (/testbed/sphinx/testing/util.py)

Patch diff

diff --git a/sphinx/io.py b/sphinx/io.py
index 7308bfa50..8296dca8b 100644
--- a/sphinx/io.py
+++ b/sphinx/io.py
@@ -43,7 +43,9 @@ class SphinxBaseReader(standalone.Reader):
         Creates a new document object which has a special reporter object good
         for logging.
         """
-        pass
+        document = super().new_document()
+        document.reporter = LoggingReporter.from_reporter(document.reporter)
+        return document

 class SphinxStandaloneReader(SphinxBaseReader):
     """
@@ -52,7 +54,9 @@ class SphinxStandaloneReader(SphinxBaseReader):

     def read_source(self, env: BuildEnvironment) -> str:
         """Read content from source and do post-process."""
-        pass
+        if isinstance(self.source, SphinxFileInput):
+            self.source.set_fs_encoding(env.fs_encoding)
+        return super().read()

 class SphinxI18nReader(SphinxBaseReader):
     """
@@ -69,11 +73,28 @@ class SphinxDummyWriter(UnfilteredWriter):

 def SphinxDummySourceClass(source: Any, *args: Any, **kwargs: Any) -> Any:
     """Bypass source object as is to cheat Publisher."""
-    pass
+    return source
+
+def create_publisher(app: Sphinx | None=None, doctree: nodes.document | None=None) -> Publisher:
+    """Create and return a publisher object."""
+    pub = Publisher(reader=None,
+                   parser=None,
+                   writer=SphinxDummyWriter(),
+                   source_class=SphinxDummySourceClass,
+                   destination=NullOutput())
+    pub.reader = SphinxStandaloneReader(app)
+    pub.parser = app.registry.create_source_parser(app, 'restructuredtext')
+    pub.document = doctree
+    pub.settings = pub.get_settings(traceback=True, warning_stream=None)
+    return pub

 class SphinxFileInput(FileInput):
     """A basic FileInput for Sphinx."""

     def __init__(self, *args: Any, **kwargs: Any) -> None:
         kwargs['error_handler'] = 'sphinx'
-        super().__init__(*args, **kwargs)
\ No newline at end of file
+        super().__init__(*args, **kwargs)
+
+    def set_fs_encoding(self, fs_encoding: str) -> None:
+        """Set the filesystem encoding."""
+        self.fs_encoding = fs_encoding
\ No newline at end of file
diff --git a/sphinx/pycode/ast.py b/sphinx/pycode/ast.py
index 06661fc3f..3df479e75 100644
--- a/sphinx/pycode/ast.py
+++ b/sphinx/pycode/ast.py
@@ -6,15 +6,26 @@ OPERATORS: dict[type[ast.AST], str] = {ast.Add: '+', ast.And: 'and', ast.BitAnd:

 def unparse(node: ast.AST | None, code: str='') -> str | None:
     """Unparse an AST to string."""
-    pass
+    if node is None:
+        return None
+    visitor = _UnparseVisitor(code)
+    return visitor.visit(node)

 class _UnparseVisitor(ast.NodeVisitor):

     def __init__(self, code: str='') -> None:
         self.code = code
+
+    def _visit_op(self, node: ast.AST) -> str:
+        """Helper for operator nodes."""
+        return OPERATORS[type(node)]
+
     for _op in OPERATORS:
         locals()[f'visit_{_op.__name__}'] = _visit_op

     def _visit_arg_with_default(self, arg: ast.arg, default: ast.AST | None) -> str:
         """Unparse a single argument to a string."""
-        pass
\ No newline at end of file
+        result = arg.arg
+        if default is not None:
+            result += '=' + self.visit(default)
+        return result
\ No newline at end of file
diff --git a/sphinx/roles.py b/sphinx/roles.py
index 39797d155..9e7b565c5 100644
--- a/sphinx/roles.py
+++ b/sphinx/roles.py
@@ -60,7 +60,14 @@ class XRefRole(ReferenceRole):
         reference node and must return a new (or the same) ``(title, target)``
         tuple.
         """
-        pass
+        if not has_explicit_title and self.fix_parens:
+            if target.endswith('()'):
+                title = title[:-2]
+            if target.endswith('[]'):
+                title = title[:-2]
+        if self.lowercase:
+            target = target.lower()
+        return title, target

     def result_nodes(self, document: nodes.document, env: BuildEnvironment, node: Element, is_ref: bool) -> tuple[list[Node], list[system_message]]:
         """Called before returning the finished nodes.  *node* is the reference
@@ -68,30 +75,164 @@ class XRefRole(ReferenceRole):
         This method can add other nodes and must return a ``(nodes, messages)``
         tuple (the usual return value of a role function).
         """
-        pass
+        return [node], []

 class AnyXRefRole(XRefRole):
     pass

 class PEP(ReferenceRole):
-    pass
+    def run(self) -> tuple[list[Node], list[system_message]]:
+        target_id = 'index-%s' % self.env.new_serialno('index')
+        entries = [('single', _('Python Enhancement Proposals; PEP %s') % self.target,
+                   target_id, '', None)]
+
+        index = addnodes.index(entries=entries)
+        target = nodes.target('', '', ids=[target_id])
+        self.inliner.document.note_explicit_target(target)
+
+        try:
+            pepnum = int(self.target)
+            ref = self.inliner.document.settings.pep_base_url + 'pep-%04d' % pepnum
+        except ValueError:
+            msg = self.inliner.reporter.error('invalid PEP number %s' % self.target,
+                                            line=self.lineno)
+            prb = self.inliner.problematic(self.rawtext, self.rawtext, msg)
+            return [prb], [msg]
+
+        if not self.has_explicit_title:
+            title = "PEP " + self.title
+            self.title = title
+
+        reference = nodes.reference('', '', internal=False, refuri=ref,
+                                  classes=['pep'])
+        if self.has_explicit_title:
+            reference += nodes.Text(self.title)
+        else:
+            reference += nodes.Text(title)
+
+        return [index, target, reference], []

 class RFC(ReferenceRole):
-    pass
+    def run(self) -> tuple[list[Node], list[system_message]]:
+        target_id = 'index-%s' % self.env.new_serialno('index')
+        entries = [('single', 'RFC; RFC %s' % self.target, target_id, '', None)]
+
+        index = addnodes.index(entries=entries)
+        target = nodes.target('', '', ids=[target_id])
+        self.inliner.document.note_explicit_target(target)
+
+        try:
+            rfcnum = int(self.target)
+            ref = self.inliner.document.settings.rfc_base_url + 'rfc%d.txt' % rfcnum
+        except ValueError:
+            msg = self.inliner.reporter.error('invalid RFC number %s' % self.target,
+                                            line=self.lineno)
+            prb = self.inliner.problematic(self.rawtext, self.rawtext, msg)
+            return [prb], [msg]
+
+        if not self.has_explicit_title:
+            title = "RFC " + self.title
+            self.title = title
+
+        reference = nodes.reference('', '', internal=False, refuri=ref,
+                                  classes=['rfc'])
+        if self.has_explicit_title:
+            reference += nodes.Text(self.title)
+        else:
+            reference += nodes.Text(title)
+
+        return [index, target, reference], []

 class GUILabel(SphinxRole):
     amp_re = re.compile('(?<!&)&(?![&\\s])')

+    def run(self) -> tuple[list[Node], list[system_message]]:
+        text = self.text.replace('&&', '\x00')
+        text = self.amp_re.sub('', text)
+        text = text.replace('\x00', '&')
+        span = nodes.inline(self.rawtext, text, classes=['guilabel'])
+        return [span], []
+
 class MenuSelection(GUILabel):
     BULLET_CHARACTER = '‣'

+    def run(self) -> tuple[list[Node], list[system_message]]:
+        text = self.text.replace('&&', '\x00')
+        text = self.amp_re.sub('', text)
+        text = text.replace('\x00', '&')
+        span = nodes.inline(self.rawtext, '', classes=['menuselection'])
+        for item in ws_re.split(text):
+            span += nodes.Text(item)
+            span += nodes.Text(self.BULLET_CHARACTER)
+        span.pop()
+        return [span], []
+
 class EmphasizedLiteral(SphinxRole):
     parens_re = re.compile('(\\\\\\\\|\\\\{|\\\\}|{|})')

+    def run(self) -> tuple[list[Node], list[system_message]]:
+        text = self.text.replace('\\', '\\\\')
+        text = self.parens_re.sub(r'\\\1', text)
+        return [nodes.literal(self.rawtext, text, classes=['file'])], []
+
 class Abbreviation(SphinxRole):
     abbr_re = re.compile('\\((.*)\\)$', re.DOTALL)

+    def run(self) -> tuple[list[Node], list[system_message]]:
+        text = self.text
+        m = self.abbr_re.search(text)
+        if m:
+            text = text[:m.start()].strip()
+            expl = m.group(1)
+        else:
+            expl = None
+        abbr = nodes.abbreviation(self.rawtext, text)
+        if expl:
+            abbr['explanation'] = expl
+        return [abbr], []
+
+def set_classes(options: dict[str, Any]) -> None:
+    """Set 'classes' key in options dict."""
+    if 'class' in options:
+        classes = options.get('classes', [])
+        classes.extend(options['class'])
+        del options['class']
+        options['classes'] = classes
+
+def code_role(typ: str, rawtext: str, text: str, lineno: int, inliner: docutils.parsers.rst.states.Inliner, options: dict[str, Any]={}, content: Sequence[str]=[]) -> tuple[list[Node], list[system_message]]:
+    """Role for code samples."""
+    set_classes(options)
+    classes = ['code']
+    if 'classes' in options:
+        classes.extend(options['classes'])
+    if 'language' in options:
+        classes.append('highlight')
+        classes.append(options['language'])
+    node = nodes.literal(rawtext, utils.unescape(text), classes=classes)
+    return [node], []
+
 class Manpage(ReferenceRole):
     _manpage_re = re.compile('^(?P<path>(?P<page>.+)[(.](?P<section>[1-9]\\w*)?\\)?)$')
+
+    def run(self) -> tuple[list[Node], list[system_message]]:
+        matched = self._manpage_re.match(self.target)
+        if not matched:
+            msg = self.inliner.reporter.error('invalid manpage reference %r' % self.target,
+                                            line=self.lineno)
+            prb = self.inliner.problematic(self.rawtext, self.rawtext, msg)
+            return [prb], [msg]
+
+        page = matched.group('page')
+        section = matched.group('section')
+        ref = self.inliner.document.settings.manpages_url % {'page': page, 'section': section}
+
+        if not self.has_explicit_title:
+            title = matched.group('path')
+            self.title = title
+
+        reference = nodes.reference('', '', internal=False, refuri=ref,
+                                  classes=['manpage'])
+        reference += nodes.Text(self.title)
+        return [reference], []
 code_role.options = {'class': docutils.parsers.rst.directives.class_option, 'language': docutils.parsers.rst.directives.unchanged}
 specific_docroles: dict[str, RoleFunction] = {'download': XRefRole(nodeclass=addnodes.download_reference), 'any': AnyXRefRole(warn_dangling=True), 'pep': PEP(), 'rfc': RFC(), 'guilabel': GUILabel(), 'menuselection': MenuSelection(), 'file': EmphasizedLiteral(), 'samp': EmphasizedLiteral(), 'abbr': Abbreviation(), 'manpage': Manpage()}
\ No newline at end of file
diff --git a/sphinx/util/console.py b/sphinx/util/console.py
index 586261b24..d94a62ec9 100644
--- a/sphinx/util/console.py
+++ b/sphinx/util/console.py
@@ -18,13 +18,26 @@ _ansi_re: Final[re.Pattern[str]] = re.compile(_CSI + "\n    (?:\n      (?:\\d+;)
 'Pattern matching ANSI CSI colors (SGR) and erase line (EL) sequences.\n\nSee :func:`strip_escape_sequences` for details.\n'
 codes: dict[str, str] = {}

+def color_terminal() -> bool:
+    """Return True if the terminal supports colors."""
+    if not hasattr(sys.stdout, 'isatty'):
+        return False
+    if not sys.stdout.isatty():
+        return False
+    if sys.platform == 'win32':
+        return COLORAMA_AVAILABLE
+    return True
+
 def terminal_safe(s: str) -> str:
     """Safely encode a string for printing to the terminal."""
-    pass
+    return s.encode('ascii', 'replace').decode('ascii')

 def get_terminal_width() -> int:
     """Return the width of the terminal in columns."""
-    pass
+    try:
+        return shutil.get_terminal_size().columns
+    except (AttributeError, ValueError):
+        return 80
 _tw: int = get_terminal_width()

 def strip_colors(s: str) -> str:
@@ -37,7 +50,25 @@ def strip_colors(s: str) -> str:

     .. seealso:: :func:`strip_escape_sequences`
     """
-    pass
+    return _ansi_color_re.sub('', s)
+
+def colorize(name: str, text: str, input_mode: bool=False) -> str:
+    """Return *text* in ANSI colors."""
+    if not sys.stdout.isatty() or not codes:
+        return text
+    if input_mode and sys.platform == 'win32':
+        return text
+    return codes[name] + text + codes['reset']
+
+def create_color_func(name: str) -> None:
+    """Create a function for colorizing text with the given color name."""
+    def color_func(text: str, input_mode: bool=False) -> str:
+        if not sys.stdout.isatty() or not codes:
+            return text
+        if input_mode and sys.platform == 'win32':
+            return text
+        return codes[name] + text + codes['reset']
+    globals()[name] = color_func

 def strip_escape_sequences(text: str, /) -> str:
     """Remove the ANSI CSI colors and "erase in line" sequences.
@@ -59,7 +90,7 @@ def strip_escape_sequences(text: str, /) -> str:

     __ https://en.wikipedia.org/wiki/ANSI_escape_code
     """
-    pass
+    return _ansi_re.sub('', text)
 _attrs = {'reset': '39;49;00m', 'bold': '01m', 'faint': '02m', 'standout': '03m', 'underline': '04m', 'blink': '05m'}
 for __name, __value in _attrs.items():
     codes[__name] = '\x1b[' + __value
diff --git a/sphinx/util/i18n.py b/sphinx/util/i18n.py
index 9c55976a6..27c0074bf 100644
--- a/sphinx/util/i18n.py
+++ b/sphinx/util/i18n.py
@@ -55,6 +55,32 @@ class CatalogRepository:

 def docname_to_domain(docname: str, compaction: bool | str) -> str:
     """Convert docname to domain for catalogs."""
-    pass
+    if not compaction:
+        return docname
+
+    if isinstance(compaction, str):
+        return compaction
+
+    parts = [part for part in docname.split(SEP) if part]
+    if len(parts) > 2:
+        return '-'.join(parts[:2])
+    else:
+        return docname
+
+def format_date(date_str: str | None=None, format: str='%b %d, %Y', language: str | None=None) -> str:
+    """Format a date according to the given format and language."""
+    if date_str:
+        try:
+            date = datetime.strptime(date_str, '%Y-%m-%d').date()
+        except ValueError:
+            return date_str
+    else:
+        date = datetime.now(timezone.utc).date()
+
+    try:
+        result = babel.dates.format_date(date, format=format, locale=language)
+    except (ValueError, babel.core.UnknownLocaleError):
+        result = babel.dates.format_date(date, format=format)
+    return result
 date_format_mappings = {'%a': 'EEE', '%A': 'EEEE', '%b': 'MMM', '%B': 'MMMM', '%c': 'medium', '%-d': 'd', '%d': 'dd', '%-H': 'H', '%H': 'HH', '%-I': 'h', '%I': 'hh', '%-j': 'D', '%j': 'DDD', '%-m': 'M', '%m': 'MM', '%-M': 'm', '%M': 'mm', '%p': 'a', '%-S': 's', '%S': 'ss', '%U': 'WW', '%w': 'e', '%-W': 'W', '%W': 'WW', '%x': 'medium', '%X': 'medium', '%y': 'YY', '%Y': 'yyyy', '%Z': 'zzz', '%z': 'ZZZ', '%%': '%'}
 date_format_re = re.compile('(%s)' % '|'.join(date_format_mappings))
\ No newline at end of file
diff --git a/sphinx/util/index_entries.py b/sphinx/util/index_entries.py
index 81e63f3cd..35d7ac590 100644
--- a/sphinx/util/index_entries.py
+++ b/sphinx/util/index_entries.py
@@ -2,4 +2,12 @@ from __future__ import annotations

 def _split_into(n: int, type: str, value: str) -> list[str]:
     """Split an index entry into a given number of parts at semicolons."""
-    pass
\ No newline at end of file
+    parts = [x.strip() for x in value.split(';', n - 1)]
+    if len(parts) < n:
+        msg = f'index {type} should be separated by {n - 1} semicolons: {value!r}'
+        raise ValueError(msg)
+    return parts
+
+def split_index_msg(type: str, value: str) -> tuple[str, str, str, str, str]:
+    """Split a node's index entry into its components."""
+    return tuple(_split_into(5, type, value))  # type: ignore
\ No newline at end of file
diff --git a/sphinx/util/matching.py b/sphinx/util/matching.py
index 56cf7ad45..fab095946 100644
--- a/sphinx/util/matching.py
+++ b/sphinx/util/matching.py
@@ -7,13 +7,54 @@ from sphinx.util.osutil import canon_path, path_stabilize
 if TYPE_CHECKING:
     from collections.abc import Callable, Iterable, Iterator

+def compile_matchers(patterns: list[str]) -> list[Callable[[str], bool]]:
+    """Convert a list of glob patterns to a list of functions that match paths."""
+    return [lambda x, pat=pat: bool(patmatch(x, pat)) for pat in patterns]
+
 def _translate_pattern(pat: str) -> str:
     """Translate a shell-style glob pattern to a regular expression.

     Adapted from the fnmatch module, but enhanced so that single stars don't
     match slashes.
     """
-    pass
+    i, n = 0, len(pat)
+    res = []
+    while i < n:
+        c = pat[i]
+        i += 1
+        if c == '*':
+            if i < n and pat[i] == '*':
+                # double star matches slashes too
+                i += 1
+                res.append('.*')
+            else:
+                # single star doesn't match slashes
+                res.append('[^/]*')
+        elif c == '?':
+            # question mark doesn't match slashes
+            res.append('[^/]')
+        elif c == '[':
+            j = i
+            if j < n and pat[j] == '!':
+                j += 1
+            if j < n and pat[j] == ']':
+                j += 1
+            while j < n and pat[j] != ']':
+                j += 1
+            if j >= n:
+                res.append('\\[')
+            else:
+                stuff = pat[i:j].replace('\\', '\\\\')
+                i = j + 1
+                if stuff[0] == '!':
+                    stuff = '^' + stuff[1:]
+                elif stuff[0] == '^':
+                    stuff = '\\' + stuff
+                res.append('[%s]' % stuff)
+        else:
+            res.append(re.escape(c))
+    res.append('$')
+    return ''.join(res)

 class Matcher:
     """A pattern matcher for Multiple shell-style glob patterns.
@@ -28,6 +69,10 @@ class Matcher:

     def __call__(self, string: str) -> bool:
         return self.match(string)
+
+    def match(self, string: str) -> bool:
+        """Return if string matches any of the patterns."""
+        return any(pat(string) for pat in self.patterns)
 DOTFILES = Matcher(['**/.*'])
 _pat_cache: dict[str, re.Pattern[str]] = {}

@@ -35,7 +80,9 @@ def patmatch(name: str, pat: str) -> re.Match[str] | None:
     """Return if name matches the regular expression (pattern)
     ``pat```. Adapted from fnmatch module.
     """
-    pass
+    if pat not in _pat_cache:
+        _pat_cache[pat] = re.compile(_translate_pattern(pat))
+    return _pat_cache[pat].match(name)

 def patfilter(names: Iterable[str], pat: str) -> list[str]:
     """Return the subset of the list ``names`` that match
@@ -43,7 +90,7 @@ def patfilter(names: Iterable[str], pat: str) -> list[str]:

     Adapted from fnmatch module.
     """
-    pass
+    return [name for name in names if patmatch(name, pat)]

 def get_matching_files(dirname: str | os.PathLike[str], include_patterns: Iterable[str]=('**',), exclude_patterns: Iterable[str]=()) -> Iterator[str]:
     """Get all file names in a directory, recursively.
@@ -55,4 +102,19 @@ def get_matching_files(dirname: str | os.PathLike[str], include_patterns: Iterab
     exclusions from *exclude_patterns* take priority over inclusions.

     """
-    pass
\ No newline at end of file
+    dirname = os.fspath(dirname)
+    if not os.path.isdir(dirname):
+        return
+
+    # normalize patterns
+    include_patterns = [path_stabilize(pat) for pat in include_patterns]
+    exclude_patterns = [path_stabilize(pat) for pat in exclude_patterns]
+
+    for root, _dirs, files in os.walk(dirname):
+        reldir = canon_path(os.path.relpath(root, dirname))
+        for filename in files:
+            relpath = canon_path(os.path.join(reldir, filename))
+            if any(patmatch(relpath, pat) for pat in exclude_patterns):
+                continue
+            if any(patmatch(relpath, pat) for pat in include_patterns):
+                yield relpath
\ No newline at end of file
diff --git a/sphinx/util/nodes.py b/sphinx/util/nodes.py
index a9f6b1c42..484e5bdeb 100644
--- a/sphinx/util/nodes.py
+++ b/sphinx/util/nodes.py
@@ -58,7 +58,21 @@ class NodeMatcher(Generic[N]):
         While the `NodeMatcher` object can be used as an argument to `Node.findall`, doing so
         confounds type checkers' ability to determine the return type of the iterator.
         """
-        pass
+        return (cast(N, found) for found in node.findall(self.match))
+
+    def match(self, node: Node) -> bool:
+        """Return True if the given node matches the criteria."""
+        if not isinstance(node, self.classes):
+            return False
+
+        for key, value in self.attrs.items():
+            if value is Any:
+                if not hasattr(node, key):
+                    return False
+            else:
+                if not hasattr(node, key) or getattr(node, key) != value:
+                    return False
+        return True

 def get_full_module_name(node: Node) -> str:
     """
@@ -67,7 +81,9 @@ def get_full_module_name(node: Node) -> str:
     :param nodes.Node node: target node
     :return: full module dotted path
     """
-    pass
+    module = node.__module__ or ''
+    module = module.replace('docutils_', 'docutils.')  # for apidoc build
+    return '%s.%s' % (module, node.__class__.__name__)

 def repr_domxml(node: Node, length: int=80) -> str:
     """
@@ -80,18 +96,42 @@ def repr_domxml(node: Node, length: int=80) -> str:
        returns full of DOM XML representation.
     :return: DOM XML representation
     """
-    pass
+    xml = node.asdom().toxml()
+    if length:
+        return xml[:length] + '...'
+    else:
+        return xml
 IGNORED_NODES = (nodes.Invisible, nodes.literal_block, nodes.doctest_block, addnodes.versionmodified)
 LITERAL_TYPE_NODES = (nodes.literal_block, nodes.doctest_block, nodes.math_block, nodes.raw)
 IMAGE_TYPE_NODES = (nodes.image,)

 def extract_messages(doctree: Element) -> Iterable[tuple[Element, str]]:
     """Extract translatable messages from a document tree."""
-    pass
+    for node in doctree.findall(nodes.TextElement):
+        if not isinstance(node, IGNORED_NODES) and node.get('translatable', True):
+            # extract message from text nodes
+            if isinstance(node, LITERAL_TYPE_NODES):
+                msg = node.astext()
+            else:
+                msg = node.rawsource or node.astext()
+            if msg:
+                yield node, msg
+
+            # extract message from image nodes
+            for img in node.findall(IMAGE_TYPE_NODES):
+                for attr in ('alt', 'title'):
+                    val = img.get(attr, '')
+                    if val:
+                        yield img, val

 def traverse_translatable_index(doctree: Element) -> Iterable[tuple[Element, list[tuple[str, str, str, str, str | None]]]]:
     """Traverse translatable index node from a document tree."""
-    pass
+    for node in doctree.findall(addnodes.index):
+        if 'raw_entries' in node:
+            entries = node['raw_entries']
+        else:
+            entries = node['entries']
+        yield node, entries

 def nested_parse_with_titles(state: RSTState, content: StringList, node: Node, content_offset: int=0) -> str:
     """Version of state.nested_parse() that allows titles and does not require
@@ -103,15 +143,23 @@ def nested_parse_with_titles(state: RSTState, content: StringList, node: Node, c
     This function is retained for compatibility and will be deprecated in
     Sphinx 8. Prefer ``nested_parse_to_nodes()``.
     """
-    pass
+    with _fresh_title_style_context(state):
+        state.nested_parse(content, content_offset, node)
+    return ''

 def clean_astext(node: Element) -> str:
     """Like node.astext(), but ignore images."""
-    pass
+    node_copy = node.deepcopy()
+    for img in node_copy.findall(nodes.image):
+        img.parent.remove(img)
+    return node_copy.astext()

 def split_explicit_title(text: str) -> tuple[bool, str, str]:
     """Split role content into title and target, if given."""
-    pass
+    match = explicit_title_re.match(text)
+    if match:
+        return True, match.group(1), match.group(2)
+    return False, text, text
 indextypes = ['single', 'pair', 'double', 'triple', 'see', 'seealso']

 def inline_all_toctrees(builder: Builder, docnameset: set[str], docname: str, tree: nodes.document, colorfunc: Callable[[str], str], traversed: list[str], indent: str='') -> nodes.document:
@@ -119,7 +167,29 @@ def inline_all_toctrees(builder: Builder, docnameset: set[str], docname: str, tr

     Record all docnames in *docnameset*, and output docnames with *colorfunc*.
     """
-    pass
+    tree = tree.deepcopy()
+    for toctreenode in tree.findall(addnodes.toctree):
+        newnodes = []
+        includefiles = map(str, toctreenode['includefiles'])
+        for includefile in includefiles:
+            if includefile not in traversed:
+                try:
+                    traversed.append(includefile)
+                    logger.info(colorfunc(includefile) + indent)
+                    subtree = inline_all_toctrees(builder, docnameset, includefile,
+                                                builder.env.get_doctree(includefile),
+                                                colorfunc, traversed,
+                                                indent + '   ')
+                    docnameset.add(includefile)
+                except Exception:
+                    logger.warning(__('toctree contains ref to nonexisting file %r'),
+                                 includefile, location=docname)
+                else:
+                    sof = addnodes.start_of_file(docname=includefile)
+                    sof.children = subtree.children
+                    newnodes.append(sof)
+        toctreenode.parent.replace(toctreenode, newnodes)
+    return tree

 def _make_id(string: str) -> str:
     """Convert `string` into an identifier and return it.
@@ -137,7 +207,12 @@ def _make_id(string: str) -> str:
     # Maintainer: docutils-develop@lists.sourceforge.net
     # Copyright: This module has been placed in the public domain.
     """
-    pass
+    id = string.translate(_non_id_translate_digraphs)
+    id = id.translate(_non_id_translate)
+    id = unicodedata.normalize('NFKD', id).encode('ascii', 'ignore').decode('ascii')
+    id = _non_id_chars.sub('-', id)
+    id = _non_id_at_ends.sub('', id)
+    return id
 _non_id_chars = re.compile('[^a-zA-Z0-9._]+')
 _non_id_at_ends = re.compile('^[-0-9._]+|-+$')
 _non_id_translate = {248: 'o', 273: 'd', 295: 'h', 305: 'i', 322: 'l', 359: 't', 384: 'b', 387: 'b', 392: 'c', 396: 'd', 402: 'f', 409: 'k', 410: 'l', 414: 'n', 421: 'p', 427: 't', 429: 't', 436: 'y', 438: 'z', 485: 'g', 549: 'z', 564: 'l', 565: 'n', 566: 't', 567: 'j', 572: 'c', 575: 's', 576: 'z', 583: 'e', 585: 'j', 587: 'q', 589: 'r', 591: 'y'}
@@ -145,28 +220,90 @@ _non_id_translate_digraphs = {223: 'sz', 230: 'ae', 339: 'oe', 568: 'db', 569: '

 def make_id(env: BuildEnvironment, document: nodes.document, prefix: str='', term: str | None=None) -> str:
     """Generate an appropriate node_id for given *prefix* and *term*."""
-    pass
+    if prefix:
+        id_prefix = _make_id(prefix)
+        if term:
+            id_suffix = _make_id(term)
+        else:
+            id_suffix = None
+    else:
+        id_prefix = _make_id(term) if term else ''
+        id_suffix = None
+
+    if id_prefix:
+        if id_suffix:
+            new_id = f'{id_prefix}-{id_suffix}'
+        else:
+            new_id = id_prefix
+    else:
+        new_id = id_suffix if id_suffix else ''
+
+    i = 0
+    while new_id + (str(i) if i else '') in document.ids:
+        i += 1
+    if i:
+        new_id += str(i)
+
+    document.ids[new_id] = True
+    return new_id

 def find_pending_xref_condition(node: addnodes.pending_xref, condition: str) -> Element | None:
     """Pick matched pending_xref_condition node up from the pending_xref."""
-    pass
+    for subnode in node:
+        if isinstance(subnode, addnodes.pending_xref_condition):
+            if subnode['condition'] == condition:
+                return subnode
+    return None

 def make_refnode(builder: Builder, fromdocname: str, todocname: str, targetid: str | None, child: Node | list[Node], title: str | None=None) -> nodes.reference:
     """Shortcut to create a reference node."""
-    pass
+    node = nodes.reference('', '', internal=True)
+    if fromdocname == todocname and targetid:
+        node['refid'] = targetid
+    else:
+        if targetid:
+            node['refuri'] = (builder.get_relative_uri(fromdocname, todocname) +
+                            '#' + targetid)
+        else:
+            node['refuri'] = builder.get_relative_uri(fromdocname, todocname)
+    if title:
+        node['reftitle'] = title
+    if isinstance(child, list):
+        node.extend(child)
+    else:
+        node.append(child)
+    return node
 NON_SMARTQUOTABLE_PARENT_NODES = (nodes.FixedTextElement, nodes.literal, nodes.math, nodes.image, nodes.raw, nodes.problematic, addnodes.not_smartquotable)

 def is_smartquotable(node: Node) -> bool:
     """Check whether the node is smart-quotable or not."""
-    pass
+    for ancestor in node.traverse(ascending=True, include_self=True):
+        if isinstance(ancestor, NON_SMARTQUOTABLE_PARENT_NODES):
+            return False
+    return True

 def process_only_nodes(document: Node, tags: Tags) -> None:
     """Filter ``only`` nodes which do not match *tags*."""
-    pass
+    for node in document.findall(addnodes.only):
+        try:
+            ret = _only_node_keep_children(node, tags)
+        except Exception as err:
+            logger.warning(__('exception while evaluating only directive expression: %s'), err,
+                         location=node)
+            node.replace_self(node.children or [])
+        else:
+            if ret:
+                node.replace_self(node.children or [])
+            else:
+                # A failing condition removes that node and its children
+                node.parent.remove(node)

 def _only_node_keep_children(node: addnodes.only, tags: Tags) -> bool:
     """Keep children if tags match or error."""
-    pass
+    if node.get('expr') in (None, ''):
+        # A blank condition should always fail
+        return False
+    return tags.eval_condition(node['expr'])

 def _copy_except__document(el: Element) -> Element:
     """Monkey-patch ```nodes.Element.copy``` to not copy the ``_document``
@@ -174,10 +311,52 @@ def _copy_except__document(el: Element) -> Element:

     xref: https://github.com/sphinx-doc/sphinx/issues/11116#issuecomment-1376767086
     """
-    pass
+    newel = el.__class__()
+    for attr in el.attlist():
+        if attr != '_document':
+            setattr(newel, attr, getattr(el, attr))
+    return newel
 nodes.Element.copy = _copy_except__document

+def get_node_line(node: Node) -> int:
+    """Get the line number of a node."""
+    source = node.get('source')
+    if source and ':' in source:
+        return int(source.split(':', 1)[1])
+    return 0
+
+def is_translatable(node: Node) -> bool:
+    """Check the node is translatable."""
+    if isinstance(node, nodes.TextElement):
+        if not node.source:
+            return False
+        if isinstance(node, IGNORED_NODES):
+            return False
+        if not node.get('translatable', True):
+            return False
+        return True
+
+    return False
+
+def apply_source_workaround(node: Node) -> None:
+    """Apply a workaround for source-attributes of nodes.
+
+    Docutils appends a line number to the source-attribute of nodes if possible.
+    Some builders don't want the line number in the source-attribute.
+    This function removes the line number.
+    """
+    try:
+        source = node.get('source')
+        if source and ':' in source:
+            node['source'] = source.split(':', 1)[0]
+    except Exception:
+        pass
+
 def _deepcopy(el: Element) -> Element:
     """Monkey-patch ```nodes.Element.deepcopy``` for speed."""
-    pass
+    copy = el.copy()
+    copy.children = [child.deepcopy() for child in el.children]
+    for child in copy.children:
+        child.parent = copy
+    return copy
 nodes.Element.deepcopy = _deepcopy
\ No newline at end of file
diff --git a/sphinx/util/osutil.py b/sphinx/util/osutil.py
index 8fe2fac65..581cf23d5 100644
--- a/sphinx/util/osutil.py
+++ b/sphinx/util/osutil.py
@@ -19,19 +19,36 @@ SEP = '/'

 def canon_path(native_path: str | os.PathLike[str], /) -> str:
     """Return path in OS-independent form"""
-    pass
+    return str(native_path).replace(os.path.sep, SEP)

 def path_stabilize(filepath: str | os.PathLike[str], /) -> str:
     """Normalize path separator and unicode string"""
-    pass
+    filepath = str(filepath)
+    filepath = unicodedata.normalize('NFC', filepath)
+    return filepath.replace(os.path.sep, SEP)

 def relative_uri(base: str, to: str) -> str:
     """Return a relative URL from ``base`` to ``to``."""
-    pass
+    if not base or not to:
+        return to
+    b2 = base.split(SEP)
+    t2 = to.split(SEP)
+    # remove common segments
+    for x, y in zip(b2, t2):
+        if x != y:
+            break
+        b2.pop(0)
+        t2.pop(0)
+    if not b2 and not t2:
+        return ''
+    return ('../' * (len(b2) - 1) + './' * bool(b2) +
+            SEP.join(t2) + ('/' if to.endswith('/') else ''))

 def ensuredir(file: str | os.PathLike[str]) -> None:
     """Ensure that a path exists."""
-    pass
+    path = os.path.dirname(os.fspath(file))
+    if path and not os.path.exists(path):
+        os.makedirs(path)

 def _last_modified_time(source: str | os.PathLike[str], /) -> int:
     """Return the last modified time of ``filename``.
@@ -43,11 +60,13 @@ def _last_modified_time(source: str | os.PathLike[str], /) -> int:
     We prefer to err on the side of re-rendering a file,
     so we round up to the nearest microsecond.
     """
-    pass
+    st = os.stat(source)
+    return int(st.st_mtime * 1_000_000)

 def _copy_times(source: str | os.PathLike[str], dest: str | os.PathLike[str]) -> None:
     """Copy a file's modification times."""
-    pass
+    st = os.stat(source)
+    os.utime(dest, (st.st_atime, st.st_mtime))

 def copyfile(source: str | os.PathLike[str], dest: str | os.PathLike[str], *, force: bool=False) -> None:
     """Copy a file and its modification times, if possible.
@@ -59,9 +78,31 @@ def copyfile(source: str | os.PathLike[str], dest: str | os.PathLike[str], *, fo

     .. note:: :func:`copyfile` is a no-op if *source* and *dest* are identical.
     """
-    pass
+    source = os.fspath(source)
+    dest = os.fspath(dest)
+
+    if not os.path.exists(source):
+        raise FileNotFoundError(source)
+    if source == dest:
+        return
+
+    if os.path.exists(dest) and not force:
+        if filecmp.cmp(source, dest, shallow=True):
+            return
+        msg = __('Cannot copy %r to %r: file exists') % (source, dest)
+        raise OSError(msg)
+
+    try:
+        shutil.copyfile(source, dest)
+        _copy_times(source, dest)
+    except shutil.SameFileError:
+        pass
 _no_fn_re = re.compile('[^a-zA-Z0-9_-]')

+def make_filename(string: str) -> str:
+    """Convert string to a filename-friendly string."""
+    return _no_fn_re.sub('', string)
+
 def relpath(path: str | os.PathLike[str], start: str | os.PathLike[str] | None=os.curdir) -> str:
     """Return a relative filepath to *path* either from the current directory or
     from an optional *start* directory.
@@ -69,10 +110,22 @@ def relpath(path: str | os.PathLike[str], start: str | os.PathLike[str] | None=o
     This is an alternative of ``os.path.relpath()``.  This returns original path
     if *path* and *start* are on different drives (for Windows platform).
     """
-    pass
+    path = os.fspath(path)
+    if start is None:
+        start = os.curdir
+    start = os.fspath(start)
+
+    try:
+        return os.path.relpath(path, start)
+    except ValueError:
+        # if on Windows, and path and start are on different drives, return original path
+        if os.name == 'nt':
+            return path
+        raise
 safe_relpath = relpath
 fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
 abspath = path.abspath
+os_path = path

 class _chdir:
     """Remove this fall-back once support for Python 3.10 is removed."""
@@ -109,7 +162,23 @@ class FileAvoidWrite:

     def close(self) -> None:
         """Stop accepting writes and write file, if needed."""
-        pass
+        if not self._io:
+            return
+
+        content = self._io.getvalue()
+        self._io.close()
+        self._io = None
+
+        try:
+            with open(self._path, 'r', encoding='utf-8') as f:
+                original_content = f.read()
+                if content == original_content:
+                    return
+        except Exception:
+            pass
+
+        with open(self._path, 'w', encoding='utf-8') as f:
+            f.write(content)

     def __enter__(self) -> FileAvoidWrite:
         return self
diff --git a/sphinx/util/parsing.py b/sphinx/util/parsing.py
index c19c62af9..d5b2619a8 100644
--- a/sphinx/util/parsing.py
+++ b/sphinx/util/parsing.py
@@ -8,6 +8,24 @@ if TYPE_CHECKING:
     from collections.abc import Iterator
     from docutils.parsers.rst.states import RSTState

+@contextlib.contextmanager
+def _fresh_title_style_context(state: RSTState) -> Iterator[None]:
+    """Context manager that temporarily resets the title style context.
+
+    This is used when parsing content that comes from a different context,
+    such as docstrings, where title decorations (e.g. underlines) do not
+    need to match the surrounding document.
+    """
+    old_title_styles = state.memo.title_styles
+    old_section_level = state.memo.section_level
+    state.memo.title_styles = []
+    state.memo.section_level = 0
+    try:
+        yield
+    finally:
+        state.memo.title_styles = old_title_styles
+        state.memo.section_level = old_section_level
+
 def nested_parse_to_nodes(state: RSTState, text: str | StringList, *, source: str='<generated text>', offset: int=0, allow_section_headings: bool=True, keep_title_context: bool=False) -> list[Node]:
     """Parse *text* into nodes.

@@ -37,4 +55,22 @@ def nested_parse_to_nodes(state: RSTState, text: str | StringList, *, source: st

     .. versionadded:: 7.4
     """
-    pass
\ No newline at end of file
+    if isinstance(text, str):
+        text = StringList(string2lines(text), source=source)
+    if offset:
+        text.parent = text.items[:]
+        text.items = [(source, i + offset) for i in range(len(text.items))]
+
+    node = Element()
+    if not allow_section_headings:
+        with state.nested_parse(text, 0, node, match_titles=False):
+            pass
+    elif not keep_title_context:
+        with _fresh_title_style_context(state):
+            with state.nested_parse(text, 0, node):
+                pass
+    else:
+        with state.nested_parse(text, 0, node):
+            pass
+
+    return node.children
\ No newline at end of file
diff --git a/sphinx/util/tags.py b/sphinx/util/tags.py
index bd097ec83..6af1c649a 100644
--- a/sphinx/util/tags.py
+++ b/sphinx/util/tags.py
@@ -20,7 +20,7 @@ class Tags:
         self._condition_cache: dict[str, bool] = {}

     def __str__(self) -> str:
-        return f'{self.__class__.__name__}({', '.join(sorted(self._tags))})'
+        return f"{self.__class__.__name__}({', '.join(sorted(self._tags))})"

     def __repr__(self) -> str:
         return f'{self.__class__.__name__}({tuple(sorted(self._tags))})'
@@ -38,4 +38,33 @@ class Tags:
         are permitted, and operate on tag names, where truthy values mean
         the tag is present and vice versa.
         """
-        pass
\ No newline at end of file
+        # memoization
+        if condition in self._condition_cache:
+            return self._condition_cache[condition]
+
+        try:
+            parser = BooleanParser(_ENV, condition, state='variable')
+            ast = parser.parse()
+            if not isinstance(ast, jinja2.nodes.Expr):
+                msg = 'only expressions can be used in condtions'
+                raise ValueError(msg)
+            result = self.eval_node(ast.node)
+            self._condition_cache[condition] = result
+            return result
+        except Exception as err:
+            warnings.warn('invalid condition: %r: %s' % (condition, err),
+                        RemovedInSphinx90Warning, stacklevel=2)
+            return False
+
+    def eval_node(self, node: jinja2.nodes.Node) -> bool:
+        """Evaluate a parsed Jinja2 node."""
+        if isinstance(node, jinja2.nodes.Name):
+            return node.name in self._tags
+        elif isinstance(node, jinja2.nodes.Not):
+            return not self.eval_node(node.node)
+        elif isinstance(node, jinja2.nodes.And):
+            return self.eval_node(node.left) and self.eval_node(node.right)
+        elif isinstance(node, jinja2.nodes.Or):
+            return self.eval_node(node.left) or self.eval_node(node.right)
+        msg = f'invalid node type: {node.__class__.__name__}'
+        raise ValueError(msg)
\ No newline at end of file