Skip to content

back to Reference (Gold) summary

Reference (Gold): babel

Pytest Summary for test tests

status count
passed 5663
skipped 1042
total 6705
collected 6705

Failed pytests:

Patch diff

diff --git a/babel/core.py b/babel/core.py
index 44904d4..207c13b 100644
--- a/babel/core.py
+++ b/babel/core.py
@@ -7,29 +7,57 @@
     :copyright: (c) 2013-2023 by the Babel Team.
     :license: BSD, see LICENSE for more details.
 """
+
 from __future__ import annotations
+
 import os
 import pickle
 from collections.abc import Iterable, Mapping
 from typing import TYPE_CHECKING, Any
+
 from babel import localedata
 from babel.plural import PluralRule
-__all__ = ['UnknownLocaleError', 'Locale', 'default_locale',
-    'negotiate_locale', 'parse_locale']
+
+__all__ = ['UnknownLocaleError', 'Locale', 'default_locale', 'negotiate_locale',
+           'parse_locale']
+
 if TYPE_CHECKING:
     from typing_extensions import Literal, TypeAlias
-    _GLOBAL_KEY: TypeAlias = Literal['all_currencies', 'currency_fractions',
-        'language_aliases', 'likely_subtags', 'meta_zones',
-        'parent_exceptions', 'script_aliases', 'territory_aliases',
-        'territory_currencies', 'territory_languages', 'territory_zones',
-        'variant_aliases', 'windows_zone_mapping', 'zone_aliases',
-        'zone_territories']
+
+    _GLOBAL_KEY: TypeAlias = Literal[
+        "all_currencies",
+        "currency_fractions",
+        "language_aliases",
+        "likely_subtags",
+        "meta_zones",
+        "parent_exceptions",
+        "script_aliases",
+        "territory_aliases",
+        "territory_currencies",
+        "territory_languages",
+        "territory_zones",
+        "variant_aliases",
+        "windows_zone_mapping",
+        "zone_aliases",
+        "zone_territories",
+    ]
+
     _global_data: Mapping[_GLOBAL_KEY, Mapping[str, Any]] | None
+
 _global_data = None
 _default_plural_rule = PluralRule({})


-def get_global(key: _GLOBAL_KEY) ->Mapping[str, Any]:
+def _raise_no_data_error():
+    raise RuntimeError('The babel data files are not available. '
+                       'This usually happens because you are using '
+                       'a source checkout from Babel and you did '
+                       'not build the data files.  Just make sure '
+                       'to run "python setup.py import_cldr" before '
+                       'installing the library.')
+
+
+def get_global(key: _GLOBAL_KEY) -> Mapping[str, Any]:
     """Return the dictionary for the given key in the global data.

     The global data is stored in the ``babel/global.dat`` file and contains
@@ -63,18 +91,28 @@ def get_global(key: _GLOBAL_KEY) ->Mapping[str, Any]:

     :param key: the data key
     """
-    pass
-
-
-LOCALE_ALIASES = {'ar': 'ar_SY', 'bg': 'bg_BG', 'bs': 'bs_BA', 'ca':
-    'ca_ES', 'cs': 'cs_CZ', 'da': 'da_DK', 'de': 'de_DE', 'el': 'el_GR',
-    'en': 'en_US', 'es': 'es_ES', 'et': 'et_EE', 'fa': 'fa_IR', 'fi':
-    'fi_FI', 'fr': 'fr_FR', 'gl': 'gl_ES', 'he': 'he_IL', 'hu': 'hu_HU',
-    'id': 'id_ID', 'is': 'is_IS', 'it': 'it_IT', 'ja': 'ja_JP', 'km':
-    'km_KH', 'ko': 'ko_KR', 'lt': 'lt_LT', 'lv': 'lv_LV', 'mk': 'mk_MK',
-    'nl': 'nl_NL', 'nn': 'nn_NO', 'no': 'nb_NO', 'pl': 'pl_PL', 'pt':
-    'pt_PT', 'ro': 'ro_RO', 'ru': 'ru_RU', 'sk': 'sk_SK', 'sl': 'sl_SI',
-    'sv': 'sv_SE', 'th': 'th_TH', 'tr': 'tr_TR', 'uk': 'uk_UA'}
+    global _global_data
+    if _global_data is None:
+        dirname = os.path.join(os.path.dirname(__file__))
+        filename = os.path.join(dirname, 'global.dat')
+        if not os.path.isfile(filename):
+            _raise_no_data_error()
+        with open(filename, 'rb') as fileobj:
+            _global_data = pickle.load(fileobj)
+            assert _global_data is not None
+    return _global_data.get(key, {})
+
+
+LOCALE_ALIASES = {
+    'ar': 'ar_SY', 'bg': 'bg_BG', 'bs': 'bs_BA', 'ca': 'ca_ES', 'cs': 'cs_CZ',
+    'da': 'da_DK', 'de': 'de_DE', 'el': 'el_GR', 'en': 'en_US', 'es': 'es_ES',
+    'et': 'et_EE', 'fa': 'fa_IR', 'fi': 'fi_FI', 'fr': 'fr_FR', 'gl': 'gl_ES',
+    'he': 'he_IL', 'hu': 'hu_HU', 'id': 'id_ID', 'is': 'is_IS', 'it': 'it_IT',
+    'ja': 'ja_JP', 'km': 'km_KH', 'ko': 'ko_KR', 'lt': 'lt_LT', 'lv': 'lv_LV',
+    'mk': 'mk_MK', 'nl': 'nl_NL', 'nn': 'nn_NO', 'no': 'nb_NO', 'pl': 'pl_PL',
+    'pt': 'pt_PT', 'ro': 'ro_RO', 'ru': 'ru_RU', 'sk': 'sk_SK', 'sl': 'sl_SI',
+    'sv': 'sv_SE', 'th': 'th_TH', 'tr': 'tr_TR', 'uk': 'uk_UA',
+}


 class UnknownLocaleError(Exception):
@@ -82,12 +120,14 @@ class UnknownLocaleError(Exception):
     is available.
     """

-    def __init__(self, identifier: str) ->None:
+    def __init__(self, identifier: str) -> None:
         """Create the exception.

         :param identifier: the identifier string of the unsupported locale
         """
-        Exception.__init__(self, f'unknown locale {identifier!r}')
+        Exception.__init__(self, f"unknown locale {identifier!r}")
+
+        #: The identifier of the locale that could not be found.
         self.identifier = identifier


@@ -123,9 +163,14 @@ class Locale:
     For more information see :rfc:`3066`.
     """

-    def __init__(self, language: str, territory: (str | None)=None, script:
-        (str | None)=None, variant: (str | None)=None, modifier: (str |
-        None)=None) ->None:
+    def __init__(
+        self,
+        language: str,
+        territory: str | None = None,
+        script: str | None = None,
+        variant: str | None = None,
+        modifier: str | None = None,
+    ) -> None:
         """Initialize the locale object from the given identifier components.

         >>> locale = Locale('en', 'US')
@@ -142,20 +187,25 @@ class Locale:
         :raise `UnknownLocaleError`: if no locale data is available for the
                                      requested locale
         """
+        #: the language code
         self.language = language
+        #: the territory (country or region) code
         self.territory = territory
+        #: the script code
         self.script = script
+        #: the variant code
         self.variant = variant
+        #: the modifier
         self.modifier = modifier
         self.__data: localedata.LocaleDataDict | None = None
+
         identifier = str(self)
         identifier_without_modifier = identifier.partition('@')[0]
         if not localedata.exists(identifier_without_modifier):
             raise UnknownLocaleError(identifier)

     @classmethod
-    def default(cls, category: (str | None)=None, aliases: Mapping[str, str
-        ]=LOCALE_ALIASES) ->Locale:
+    def default(cls, category: str | None = None, aliases: Mapping[str, str] = LOCALE_ALIASES) -> Locale:
         """Return the system default locale for the specified category.

         >>> for name in ['LANGUAGE', 'LC_ALL', 'LC_CTYPE', 'LC_MESSAGES']:
@@ -174,12 +224,19 @@ class Locale:
         :param category: one of the ``LC_XXX`` environment variable names
         :param aliases: a dictionary of aliases for locale identifiers
         """
-        pass
+        # XXX: use likely subtag expansion here instead of the
+        # aliases dictionary.
+        locale_string = default_locale(category, aliases=aliases)
+        return cls.parse(locale_string)

     @classmethod
-    def negotiate(cls, preferred: Iterable[str], available: Iterable[str],
-        sep: str='_', aliases: Mapping[str, str]=LOCALE_ALIASES) ->(Locale |
-        None):
+    def negotiate(
+        cls,
+        preferred: Iterable[str],
+        available: Iterable[str],
+        sep: str = '_',
+        aliases: Mapping[str, str] = LOCALE_ALIASES,
+    ) -> Locale | None:
         """Find the best match between available and requested locale strings.

         >>> Locale.negotiate(['de_DE', 'en_US'], ['de_DE', 'de_AT'])
@@ -199,11 +256,19 @@ class Locale:
         :param available: the list of locale identifiers available
         :param aliases: a dictionary of aliases for locale identifiers
         """
-        pass
+        identifier = negotiate_locale(preferred, available, sep=sep,
+                                      aliases=aliases)
+        if identifier:
+            return Locale.parse(identifier, sep=sep)
+        return None

     @classmethod
-    def parse(cls, identifier: (str | Locale | None), sep: str='_',
-        resolve_likely_subtags: bool=True) ->Locale:
+    def parse(
+        cls,
+        identifier: str | Locale | None,
+        sep: str = '_',
+        resolve_likely_subtags: bool = True,
+    ) -> Locale:
         """Create a `Locale` instance for the given locale identifier.

         >>> l = Locale.parse('de-DE', sep='-')
@@ -257,39 +322,124 @@ class Locale:
                                      requested locale
         :raise `TypeError`: if the identifier is not a string or a `Locale`
         """
-        pass
-
-    def __eq__(self, other: object) ->bool:
+        if isinstance(identifier, Locale):
+            return identifier
+        elif not isinstance(identifier, str):
+            raise TypeError(f"Unexpected value for identifier: {identifier!r}")
+
+        parts = parse_locale(identifier, sep=sep)
+        input_id = get_locale_identifier(parts)
+
+        def _try_load(parts):
+            try:
+                return cls(*parts)
+            except UnknownLocaleError:
+                return None
+
+        def _try_load_reducing(parts):
+            # Success on first hit, return it.
+            locale = _try_load(parts)
+            if locale is not None:
+                return locale
+
+            # Now try without script and variant
+            locale = _try_load(parts[:2])
+            if locale is not None:
+                return locale
+
+        locale = _try_load(parts)
+        if locale is not None:
+            return locale
+        if not resolve_likely_subtags:
+            raise UnknownLocaleError(input_id)
+
+        # From here onwards is some very bad likely subtag resolving.  This
+        # whole logic is not entirely correct but good enough (tm) for the
+        # time being.  This has been added so that zh_TW does not cause
+        # errors for people when they upgrade.  Later we should properly
+        # implement ICU like fuzzy locale objects and provide a way to
+        # maximize and minimize locale tags.
+
+        if len(parts) == 5:
+            language, territory, script, variant, modifier = parts
+        else:
+            language, territory, script, variant = parts
+            modifier = None
+        language = get_global('language_aliases').get(language, language)
+        territory = get_global('territory_aliases').get(territory or '', (territory,))[0]
+        script = get_global('script_aliases').get(script or '', script)
+        variant = get_global('variant_aliases').get(variant or '', variant)
+
+        if territory == 'ZZ':
+            territory = None
+        if script == 'Zzzz':
+            script = None
+
+        parts = language, territory, script, variant, modifier
+
+        # First match: try the whole identifier
+        new_id = get_locale_identifier(parts)
+        likely_subtag = get_global('likely_subtags').get(new_id)
+        if likely_subtag is not None:
+            locale = _try_load_reducing(parse_locale(likely_subtag))
+            if locale is not None:
+                return locale
+
+        # If we did not find anything so far, try again with a
+        # simplified identifier that is just the language
+        likely_subtag = get_global('likely_subtags').get(language)
+        if likely_subtag is not None:
+            parts2 = parse_locale(likely_subtag)
+            if len(parts2) == 5:
+                language2, _, script2, variant2, modifier2 = parts2
+            else:
+                language2, _, script2, variant2 = parts2
+                modifier2 = None
+            locale = _try_load_reducing((language2, territory, script2, variant2, modifier2))
+            if locale is not None:
+                return locale
+
+        raise UnknownLocaleError(input_id)
+
+    def __eq__(self, other: object) -> bool:
         for key in ('language', 'territory', 'script', 'variant', 'modifier'):
             if not hasattr(other, key):
                 return False
-        return self.language == getattr(other, 'language'
-            ) and self.territory == getattr(other, 'territory'
-            ) and self.script == getattr(other, 'script'
-            ) and self.variant == getattr(other, 'variant'
-            ) and self.modifier == getattr(other, 'modifier')
+        return (
+            self.language == getattr(other, 'language') and  # noqa: B009
+            self.territory == getattr(other, 'territory') and  # noqa: B009
+            self.script == getattr(other, 'script') and  # noqa: B009
+            self.variant == getattr(other, 'variant') and  # noqa: B009
+            self.modifier == getattr(other, 'modifier')  # noqa: B009
+        )

-    def __ne__(self, other: object) ->bool:
+    def __ne__(self, other: object) -> bool:
         return not self.__eq__(other)

-    def __hash__(self) ->int:
-        return hash((self.language, self.territory, self.script, self.
-            variant, self.modifier))
+    def __hash__(self) -> int:
+        return hash((self.language, self.territory, self.script,
+                     self.variant, self.modifier))

-    def __repr__(self) ->str:
+    def __repr__(self) -> str:
         parameters = ['']
         for key in ('territory', 'script', 'variant', 'modifier'):
             value = getattr(self, key)
             if value is not None:
-                parameters.append(f'{key}={value!r}')
+                parameters.append(f"{key}={value!r}")
         return f"Locale({self.language!r}{', '.join(parameters)})"

-    def __str__(self) ->str:
-        return get_locale_identifier((self.language, self.territory, self.
-            script, self.variant, self.modifier))
+    def __str__(self) -> str:
+        return get_locale_identifier((self.language, self.territory,
+                                      self.script, self.variant,
+                                      self.modifier))
+
+    @property
+    def _data(self) -> localedata.LocaleDataDict:
+        if self.__data is None:
+            self.__data = localedata.LocaleDataDict(localedata.load(str(self)))
+        return self.__data

-    def get_display_name(self, locale: (Locale | str | None)=None) ->(str |
-        None):
+    def get_display_name(self, locale: Locale | str | None = None) -> str | None:
         """Return the display name of the locale using the given locale.

         The display name will include the language, territory, script, and
@@ -305,9 +455,27 @@ class Locale:

         :param locale: the locale to use
         """
-        pass
-    display_name = property(get_display_name, doc=
-        """        The localized display name of the locale.
+        if locale is None:
+            locale = self
+        locale = Locale.parse(locale)
+        retval = locale.languages.get(self.language)
+        if retval and (self.territory or self.script or self.variant):
+            details = []
+            if self.script:
+                details.append(locale.scripts.get(self.script))
+            if self.territory:
+                details.append(locale.territories.get(self.territory))
+            if self.variant:
+                details.append(locale.variants.get(self.variant))
+            if self.modifier:
+                details.append(self.modifier)
+            detail_string = ', '.join(atom for atom in details if atom)
+            if detail_string:
+                retval += f" ({detail_string})"
+        return retval
+
+    display_name = property(get_display_name, doc="""\
+        The localized display name of the locale.

         >>> Locale('en').display_name
         u'English'
@@ -317,11 +485,9 @@ class Locale:
         u'svenska'

         :type: `unicode`
-        """
-        )
+        """)

-    def get_language_name(self, locale: (Locale | str | None)=None) ->(str |
-        None):
+    def get_language_name(self, locale: Locale | str | None = None) -> str | None:
         """Return the language of this locale in the given locale.

         >>> Locale('zh', 'CN', script='Hans').get_language_name('de')
@@ -331,41 +497,48 @@ class Locale:

         :param locale: the locale to use
         """
-        pass
-    language_name = property(get_language_name, doc=
-        """        The localized language name of the locale.
+        if locale is None:
+            locale = self
+        locale = Locale.parse(locale)
+        return locale.languages.get(self.language)
+
+    language_name = property(get_language_name, doc="""\
+        The localized language name of the locale.

         >>> Locale('en', 'US').language_name
         u'English'
-    """
-        )
+    """)

-    def get_territory_name(self, locale: (Locale | str | None)=None) ->(str |
-        None):
+    def get_territory_name(self, locale: Locale | str | None = None) -> str | None:
         """Return the territory name in the given locale."""
-        pass
-    territory_name = property(get_territory_name, doc=
-        """        The localized territory name of the locale if available.
+        if locale is None:
+            locale = self
+        locale = Locale.parse(locale)
+        return locale.territories.get(self.territory or '')
+
+    territory_name = property(get_territory_name, doc="""\
+        The localized territory name of the locale if available.

         >>> Locale('de', 'DE').territory_name
         u'Deutschland'
-    """
-        )
+    """)

-    def get_script_name(self, locale: (Locale | str | None)=None) ->(str | None
-        ):
+    def get_script_name(self, locale: Locale | str | None = None) -> str | None:
         """Return the script name in the given locale."""
-        pass
-    script_name = property(get_script_name, doc=
-        """        The localized script name of the locale if available.
+        if locale is None:
+            locale = self
+        locale = Locale.parse(locale)
+        return locale.scripts.get(self.script or '')
+
+    script_name = property(get_script_name, doc="""\
+        The localized script name of the locale if available.

         >>> Locale('sr', 'ME', script='Latn').script_name
         u'latinica'
-    """
-        )
+    """)

     @property
-    def english_name(self) ->(str | None):
+    def english_name(self) -> str | None:
         """The english display name of the locale.

         >>> Locale('de').english_name
@@ -374,10 +547,12 @@ class Locale:
         u'German (Germany)'

         :type: `unicode`"""
-        pass
+        return self.get_display_name(Locale('en'))
+
+    # { General Locale Display Names

     @property
-    def languages(self) ->localedata.LocaleDataDict:
+    def languages(self) -> localedata.LocaleDataDict:
         """Mapping of language codes to translated language names.

         >>> Locale('de', 'DE').languages['ja']
@@ -386,10 +561,10 @@ class Locale:
         See `ISO 639 <http://www.loc.gov/standards/iso639-2/>`_ for
         more information.
         """
-        pass
+        return self._data['languages']

     @property
-    def scripts(self) ->localedata.LocaleDataDict:
+    def scripts(self) -> localedata.LocaleDataDict:
         """Mapping of script codes to translated script names.

         >>> Locale('en', 'US').scripts['Hira']
@@ -398,10 +573,10 @@ class Locale:
         See `ISO 15924 <http://www.evertype.com/standards/iso15924/>`_
         for more information.
         """
-        pass
+        return self._data['scripts']

     @property
-    def territories(self) ->localedata.LocaleDataDict:
+    def territories(self) -> localedata.LocaleDataDict:
         """Mapping of script codes to translated script names.

         >>> Locale('es', 'CO').territories['DE']
@@ -410,19 +585,21 @@ class Locale:
         See `ISO 3166 <http://www.iso.org/iso/en/prods-services/iso3166ma/>`_
         for more information.
         """
-        pass
+        return self._data['territories']

     @property
-    def variants(self) ->localedata.LocaleDataDict:
+    def variants(self) -> localedata.LocaleDataDict:
         """Mapping of script codes to translated script names.

         >>> Locale('de', 'DE').variants['1901']
         u'Alte deutsche Rechtschreibung'
         """
-        pass
+        return self._data['variants']
+
+    # { Number Formatting

     @property
-    def currencies(self) ->localedata.LocaleDataDict:
+    def currencies(self) -> localedata.LocaleDataDict:
         """Mapping of currency codes to translated currency names.  This
         only returns the generic form of the currency name, not the count
         specific one.  If an actual number is requested use the
@@ -433,10 +610,10 @@ class Locale:
         >>> Locale('de', 'DE').currencies['COP']
         u'Kolumbianischer Peso'
         """
-        pass
+        return self._data['currency_names']

     @property
-    def currency_symbols(self) ->localedata.LocaleDataDict:
+    def currency_symbols(self) -> localedata.LocaleDataDict:
         """Mapping of currency codes to symbols.

         >>> Locale('en', 'US').currency_symbols['USD']
@@ -444,10 +621,10 @@ class Locale:
         >>> Locale('es', 'CO').currency_symbols['USD']
         u'US$'
         """
-        pass
+        return self._data['currency_symbols']

     @property
-    def number_symbols(self) ->localedata.LocaleDataDict:
+    def number_symbols(self) -> localedata.LocaleDataDict:
         """Symbols used in number formatting by number system.

         .. note:: The format of the value returned may change between
@@ -460,10 +637,10 @@ class Locale:
         >>> Locale('fa', 'IR').number_symbols["latn"]['decimal']
         u'.'
         """
-        pass
+        return self._data['number_symbols']

     @property
-    def other_numbering_systems(self) ->localedata.LocaleDataDict:
+    def other_numbering_systems(self) -> localedata.LocaleDataDict:
         """
         Mapping of other numbering systems available for the locale.
         See: https://www.unicode.org/reports/tr35/tr35-numbers.html#otherNumberingSystems
@@ -474,18 +651,18 @@ class Locale:
         .. note:: The format of the value returned may change between
                   Babel versions.
         """
-        pass
+        return self._data['numbering_systems']

     @property
-    def default_numbering_system(self) ->str:
+    def default_numbering_system(self) -> str:
         """The default numbering system used by the locale.
         >>> Locale('el', 'GR').default_numbering_system
         u'latn'
         """
-        pass
+        return self._data['default_numbering_system']

     @property
-    def decimal_formats(self) ->localedata.LocaleDataDict:
+    def decimal_formats(self) -> localedata.LocaleDataDict:
         """Locale patterns for decimal number formatting.

         .. note:: The format of the value returned may change between
@@ -494,10 +671,10 @@ class Locale:
         >>> Locale('en', 'US').decimal_formats[None]
         <NumberPattern u'#,##0.###'>
         """
-        pass
+        return self._data['decimal_formats']

     @property
-    def compact_decimal_formats(self) ->localedata.LocaleDataDict:
+    def compact_decimal_formats(self) -> localedata.LocaleDataDict:
         """Locale patterns for compact decimal number formatting.

         .. note:: The format of the value returned may change between
@@ -506,10 +683,10 @@ class Locale:
         >>> Locale('en', 'US').compact_decimal_formats["short"]["one"]["1000"]
         <NumberPattern u'0K'>
         """
-        pass
+        return self._data['compact_decimal_formats']

     @property
-    def currency_formats(self) ->localedata.LocaleDataDict:
+    def currency_formats(self) -> localedata.LocaleDataDict:
         """Locale patterns for currency number formatting.

         .. note:: The format of the value returned may change between
@@ -520,10 +697,10 @@ class Locale:
         >>> Locale('en', 'US').currency_formats['accounting']
         <NumberPattern u'\\xa4#,##0.00;(\\xa4#,##0.00)'>
         """
-        pass
+        return self._data['currency_formats']

     @property
-    def compact_currency_formats(self) ->localedata.LocaleDataDict:
+    def compact_currency_formats(self) -> localedata.LocaleDataDict:
         """Locale patterns for compact currency number formatting.

         .. note:: The format of the value returned may change between
@@ -532,10 +709,10 @@ class Locale:
         >>> Locale('en', 'US').compact_currency_formats["short"]["one"]["1000"]
         <NumberPattern u'¤0K'>
         """
-        pass
+        return self._data['compact_currency_formats']

     @property
-    def percent_formats(self) ->localedata.LocaleDataDict:
+    def percent_formats(self) -> localedata.LocaleDataDict:
         """Locale patterns for percent number formatting.

         .. note:: The format of the value returned may change between
@@ -544,10 +721,10 @@ class Locale:
         >>> Locale('en', 'US').percent_formats[None]
         <NumberPattern u'#,##0%'>
         """
-        pass
+        return self._data['percent_formats']

     @property
-    def scientific_formats(self) ->localedata.LocaleDataDict:
+    def scientific_formats(self) -> localedata.LocaleDataDict:
         """Locale patterns for scientific number formatting.

         .. note:: The format of the value returned may change between
@@ -556,60 +733,65 @@ class Locale:
         >>> Locale('en', 'US').scientific_formats[None]
         <NumberPattern u'#E0'>
         """
-        pass
+        return self._data['scientific_formats']
+
+    # { Calendar Information and Date Formatting

     @property
-    def periods(self) ->localedata.LocaleDataDict:
+    def periods(self) -> localedata.LocaleDataDict:
         """Locale display names for day periods (AM/PM).

         >>> Locale('en', 'US').periods['am']
         u'AM'
         """
-        pass
+        try:
+            return self._data['day_periods']['stand-alone']['wide']
+        except KeyError:
+            return localedata.LocaleDataDict({})  # pragma: no cover

     @property
-    def day_periods(self) ->localedata.LocaleDataDict:
+    def day_periods(self) -> localedata.LocaleDataDict:
         """Locale display names for various day periods (not necessarily only AM/PM).

         These are not meant to be used without the relevant `day_period_rules`.
         """
-        pass
+        return self._data['day_periods']

     @property
-    def day_period_rules(self) ->localedata.LocaleDataDict:
+    def day_period_rules(self) -> localedata.LocaleDataDict:
         """Day period rules for the locale.  Used by `get_period_id`.
         """
-        pass
+        return self._data.get('day_period_rules', localedata.LocaleDataDict({}))

     @property
-    def days(self) ->localedata.LocaleDataDict:
+    def days(self) -> localedata.LocaleDataDict:
         """Locale display names for weekdays.

         >>> Locale('de', 'DE').days['format']['wide'][3]
         u'Donnerstag'
         """
-        pass
+        return self._data['days']

     @property
-    def months(self) ->localedata.LocaleDataDict:
+    def months(self) -> localedata.LocaleDataDict:
         """Locale display names for months.

         >>> Locale('de', 'DE').months['format']['wide'][10]
         u'Oktober'
         """
-        pass
+        return self._data['months']

     @property
-    def quarters(self) ->localedata.LocaleDataDict:
+    def quarters(self) -> localedata.LocaleDataDict:
         """Locale display names for quarters.

         >>> Locale('de', 'DE').quarters['format']['wide'][1]
         u'1. Quartal'
         """
-        pass
+        return self._data['quarters']

     @property
-    def eras(self) ->localedata.LocaleDataDict:
+    def eras(self) -> localedata.LocaleDataDict:
         """Locale display names for eras.

         .. note:: The format of the value returned may change between
@@ -620,10 +802,10 @@ class Locale:
         >>> Locale('en', 'US').eras['abbreviated'][0]
         u'BC'
         """
-        pass
+        return self._data['eras']

     @property
-    def time_zones(self) ->localedata.LocaleDataDict:
+    def time_zones(self) -> localedata.LocaleDataDict:
         """Locale display names for time zones.

         .. note:: The format of the value returned may change between
@@ -632,12 +814,12 @@ class Locale:
         >>> Locale('en', 'US').time_zones['Europe/London']['long']['daylight']
         u'British Summer Time'
         >>> Locale('en', 'US').time_zones['America/St_Johns']['city']
-        u'St. John’s'
+        u'St. John\u2019s'
         """
-        pass
+        return self._data['time_zones']

     @property
-    def meta_zones(self) ->localedata.LocaleDataDict:
+    def meta_zones(self) -> localedata.LocaleDataDict:
         """Locale display names for meta time zones.

         Meta time zones are basically groups of different Olson time zones that
@@ -651,10 +833,10 @@ class Locale:

         .. versionadded:: 0.9
         """
-        pass
+        return self._data['meta_zones']

     @property
-    def zone_formats(self) ->localedata.LocaleDataDict:
+    def zone_formats(self) -> localedata.LocaleDataDict:
         """Patterns related to the formatting of time zones.

         .. note:: The format of the value returned may change between
@@ -667,10 +849,10 @@ class Locale:

         .. versionadded:: 0.9
         """
-        pass
+        return self._data['zone_formats']

     @property
-    def first_week_day(self) ->int:
+    def first_week_day(self) -> int:
         """The first day of a week, with 0 being Monday.

         >>> Locale('de', 'DE').first_week_day
@@ -678,38 +860,38 @@ class Locale:
         >>> Locale('en', 'US').first_week_day
         6
         """
-        pass
+        return self._data['week_data']['first_day']

     @property
-    def weekend_start(self) ->int:
+    def weekend_start(self) -> int:
         """The day the weekend starts, with 0 being Monday.

         >>> Locale('de', 'DE').weekend_start
         5
         """
-        pass
+        return self._data['week_data']['weekend_start']

     @property
-    def weekend_end(self) ->int:
+    def weekend_end(self) -> int:
         """The day the weekend ends, with 0 being Monday.

         >>> Locale('de', 'DE').weekend_end
         6
         """
-        pass
+        return self._data['week_data']['weekend_end']

     @property
-    def min_week_days(self) ->int:
+    def min_week_days(self) -> int:
         """The minimum number of days in a week so that the week is counted as
         the first week of a year or month.

         >>> Locale('de', 'DE').min_week_days
         4
         """
-        pass
+        return self._data['week_data']['min_days']

     @property
-    def date_formats(self) ->localedata.LocaleDataDict:
+    def date_formats(self) -> localedata.LocaleDataDict:
         """Locale patterns for date formatting.

         .. note:: The format of the value returned may change between
@@ -720,24 +902,24 @@ class Locale:
         >>> Locale('fr', 'FR').date_formats['long']
         <DateTimePattern u'd MMMM y'>
         """
-        pass
+        return self._data['date_formats']

     @property
-    def time_formats(self) ->localedata.LocaleDataDict:
+    def time_formats(self) -> localedata.LocaleDataDict:
         """Locale patterns for time formatting.

         .. note:: The format of the value returned may change between
                   Babel versions.

         >>> Locale('en', 'US').time_formats['short']
-        <DateTimePattern u'h:mm a'>
+        <DateTimePattern u'h:mm\u202fa'>
         >>> Locale('fr', 'FR').time_formats['long']
         <DateTimePattern u'HH:mm:ss z'>
         """
-        pass
+        return self._data['time_formats']

     @property
-    def datetime_formats(self) ->localedata.LocaleDataDict:
+    def datetime_formats(self) -> localedata.LocaleDataDict:
         """Locale patterns for datetime formatting.

         .. note:: The format of the value returned may change between
@@ -748,10 +930,10 @@ class Locale:
         >>> Locale('th').datetime_formats['medium']
         u'{1} {0}'
         """
-        pass
+        return self._data['datetime_formats']

     @property
-    def datetime_skeletons(self) ->localedata.LocaleDataDict:
+    def datetime_skeletons(self) -> localedata.LocaleDataDict:
         """Locale patterns for formatting parts of a datetime.

         >>> Locale('en').datetime_skeletons['MEd']
@@ -761,10 +943,10 @@ class Locale:
         >>> Locale('fr').datetime_skeletons['H']
         <DateTimePattern u"HH 'h'">
         """
-        pass
+        return self._data['datetime_skeletons']

     @property
-    def interval_formats(self) ->localedata.LocaleDataDict:
+    def interval_formats(self) -> localedata.LocaleDataDict:
         """Locale patterns for interval formatting.

         .. note:: The format of the value returned may change between
@@ -774,7 +956,7 @@ class Locale:
         smallest changing component:

         >>> Locale('fi_FI').interval_formats['MEd']['d']
-        [u'E d. – ', u'E d.M.']
+        [u'E d.\u2009\u2013\u2009', u'E d.M.']

         .. seealso::

@@ -783,10 +965,10 @@ class Locale:

         :rtype: dict[str, dict[str, list[str]]]
         """
-        pass
+        return self._data['interval_formats']

     @property
-    def plural_form(self) ->PluralRule:
+    def plural_form(self) -> PluralRule:
         """Plural rules for the locale.

         >>> Locale('en').plural_form(1)
@@ -798,10 +980,10 @@ class Locale:
         >>> Locale('ru').plural_form(100)
         'many'
         """
-        pass
+        return self._data.get('plural_form', _default_plural_rule)

     @property
-    def list_patterns(self) ->localedata.LocaleDataDict:
+    def list_patterns(self) -> localedata.LocaleDataDict:
         """Patterns for generating lists

         .. note:: The format of the value returned may change between
@@ -814,10 +996,10 @@ class Locale:
         >>> Locale('en_GB').list_patterns['standard']['end']
         u'{0} and {1}'
         """
-        pass
+        return self._data['list_patterns']

     @property
-    def ordinal_form(self) ->PluralRule:
+    def ordinal_form(self) -> PluralRule:
         """Plural rules for the locale.

         >>> Locale('en').ordinal_form(1)
@@ -831,10 +1013,10 @@ class Locale:
         >>> Locale('ru').ordinal_form(100)
         'other'
         """
-        pass
+        return self._data.get('ordinal_form', _default_plural_rule)

     @property
-    def measurement_systems(self) ->localedata.LocaleDataDict:
+    def measurement_systems(self) -> localedata.LocaleDataDict:
         """Localized names for various measurement systems.

         >>> Locale('fr', 'FR').measurement_systems['US']
@@ -843,10 +1025,10 @@ class Locale:
         u'US'

         """
-        pass
+        return self._data['measurement_systems']

     @property
-    def character_order(self) ->str:
+    def character_order(self) -> str:
         """The text direction for the language.

         >>> Locale('de', 'DE').character_order
@@ -854,10 +1036,10 @@ class Locale:
         >>> Locale('ar', 'SA').character_order
         'right-to-left'
         """
-        pass
+        return self._data['character_order']

     @property
-    def text_direction(self) ->str:
+    def text_direction(self) -> str:
         """The text direction for the language in CSS short-hand form.

         >>> Locale('de', 'DE').text_direction
@@ -865,10 +1047,10 @@ class Locale:
         >>> Locale('ar', 'SA').text_direction
         'rtl'
         """
-        pass
+        return ''.join(word[0] for word in self.character_order.split('-'))

     @property
-    def unit_display_names(self) ->localedata.LocaleDataDict:
+    def unit_display_names(self) -> localedata.LocaleDataDict:
         """Display names for units of measurement.

         .. seealso::
@@ -879,11 +1061,10 @@ class Locale:
                   Babel versions.

         """
-        pass
+        return self._data['unit_display_names']


-def default_locale(category: (str | None)=None, aliases: Mapping[str, str]=
-    LOCALE_ALIASES) ->(str | None):
+def default_locale(category: str | None = None, aliases: Mapping[str, str] = LOCALE_ALIASES) -> str | None:
     """Returns the system default locale for a given category, based on
     environment variables.

@@ -910,11 +1091,26 @@ def default_locale(category: (str | None)=None, aliases: Mapping[str, str]=
     :param category: one of the ``LC_XXX`` environment variable names
     :param aliases: a dictionary of aliases for locale identifiers
     """
-    pass
-
-
-def negotiate_locale(preferred: Iterable[str], available: Iterable[str],
-    sep: str='_', aliases: Mapping[str, str]=LOCALE_ALIASES) ->(str | None):
+    varnames = (category, 'LANGUAGE', 'LC_ALL', 'LC_CTYPE', 'LANG')
+    for name in filter(None, varnames):
+        locale = os.getenv(name)
+        if locale:
+            if name == 'LANGUAGE' and ':' in locale:
+                # the LANGUAGE variable may contain a colon-separated list of
+                # language codes; we just pick the language on the list
+                locale = locale.split(':')[0]
+            if locale.split('.')[0] in ('C', 'POSIX'):
+                locale = 'en_US_POSIX'
+            elif aliases and locale in aliases:
+                locale = aliases[locale]
+            try:
+                return get_locale_identifier(parse_locale(locale))
+            except ValueError:
+                pass
+    return None
+
+
+def negotiate_locale(preferred: Iterable[str], available: Iterable[str], sep: str = '_', aliases: Mapping[str, str] = LOCALE_ALIASES) -> str | None:
     """Find the best match between available and requested locale strings.

     >>> negotiate_locale(['de_DE', 'en_US'], ['de_DE', 'de_AT'])
@@ -960,12 +1156,27 @@ def negotiate_locale(preferred: Iterable[str], available: Iterable[str],
                 strings
     :param aliases: a dictionary of aliases for locale identifiers
     """
-    pass
-
-
-def parse_locale(identifier: str, sep: str='_') ->(tuple[str, str | None, 
-    str | None, str | None] | tuple[str, str | None, str | None, str | None,
-    str | None]):
+    available = [a.lower() for a in available if a]
+    for locale in preferred:
+        ll = locale.lower()
+        if ll in available:
+            return locale
+        if aliases:
+            alias = aliases.get(ll)
+            if alias:
+                alias = alias.replace('_', sep)
+                if alias.lower() in available:
+                    return alias
+        parts = locale.split(sep)
+        if len(parts) > 1 and parts[0].lower() in available:
+            return parts[0]
+    return None
+
+
+def parse_locale(
+    identifier: str,
+    sep: str = '_',
+) -> tuple[str, str | None, str | None, str | None] | tuple[str, str | None, str | None, str | None, str | None]:
     """Parse a locale identifier into a tuple of the form ``(language,
     territory, script, variant, modifier)``.

@@ -1020,13 +1231,50 @@ def parse_locale(identifier: str, sep: str='_') ->(tuple[str, str | None,
     :raise `ValueError`: if the string does not appear to be a valid locale
                          identifier
     """
-    pass
-
-
-def get_locale_identifier(tup: (tuple[str] | tuple[str, str | None] | tuple
-    [str, str | None, str | None] | tuple[str, str | None, str | None, str |
-    None] | tuple[str, str | None, str | None, str | None, str | None]),
-    sep: str='_') ->str:
+    identifier, _, modifier = identifier.partition('@')
+    if '.' in identifier:
+        # this is probably the charset/encoding, which we don't care about
+        identifier = identifier.split('.', 1)[0]
+
+    parts = identifier.split(sep)
+    lang = parts.pop(0).lower()
+    if not lang.isalpha():
+        raise ValueError(f"expected only letters, got {lang!r}")
+
+    script = territory = variant = None
+    if parts and len(parts[0]) == 4 and parts[0].isalpha():
+        script = parts.pop(0).title()
+
+    if parts:
+        if len(parts[0]) == 2 and parts[0].isalpha():
+            territory = parts.pop(0).upper()
+        elif len(parts[0]) == 3 and parts[0].isdigit():
+            territory = parts.pop(0)
+
+    if parts and (
+        len(parts[0]) == 4 and parts[0][0].isdigit() or
+        len(parts[0]) >= 5 and parts[0][0].isalpha()
+    ):
+        variant = parts.pop().upper()
+
+    if parts:
+        raise ValueError(f"{identifier!r} is not a valid locale identifier")
+
+    # TODO(3.0): always return a 5-tuple
+    if modifier:
+        return lang, territory, script, variant, modifier
+    else:
+        return lang, territory, script, variant
+
+
+def get_locale_identifier(
+    tup: tuple[str]
+    | tuple[str, str | None]
+    | tuple[str, str | None, str | None]
+    | tuple[str, str | None, str | None, str | None]
+    | tuple[str, str | None, str | None, str | None, str | None],
+    sep: str = "_",
+) -> str:
     """The reverse of :func:`parse_locale`.  It creates a locale identifier out
     of a ``(language, territory, script, variant, modifier)`` tuple.  Items can be set to
     ``None`` and trailing ``None``\\s can also be left out of the tuple.
@@ -1042,4 +1290,7 @@ def get_locale_identifier(tup: (tuple[str] | tuple[str, str | None] | tuple
     :param tup: the tuple as returned by :func:`parse_locale`.
     :param sep: the separator for the identifier.
     """
-    pass
+    tup = tuple(tup[:5])  # type: ignore  # length should be no more than 5
+    lang, territory, script, variant, modifier = tup + (None,) * (5 - len(tup))
+    ret = sep.join(filter(None, (lang, script, territory, variant)))
+    return f'{ret}@{modifier}' if modifier else ret
diff --git a/babel/dates.py b/babel/dates.py
index 894d925..40d9509 100644
--- a/babel/dates.py
+++ b/babel/dates.py
@@ -14,37 +14,67 @@
     :copyright: (c) 2013-2023 by the Babel Team.
     :license: BSD, see LICENSE for more details.
 """
+
 from __future__ import annotations
+
 import re
 import warnings
 from functools import lru_cache
 from typing import TYPE_CHECKING, SupportsInt
+
 try:
     import pytz
 except ModuleNotFoundError:
     pytz = None
     import zoneinfo
+
 import datetime
 from collections.abc import Iterable
+
 from babel import localtime
 from babel.core import Locale, default_locale, get_global
 from babel.localedata import LocaleDataDict
+
 if TYPE_CHECKING:
     from typing_extensions import Literal, TypeAlias
     _Instant: TypeAlias = datetime.date | datetime.time | float | None
-    _PredefinedTimeFormat: TypeAlias = Literal['full', 'long', 'medium',
-        'short']
+    _PredefinedTimeFormat: TypeAlias = Literal['full', 'long', 'medium', 'short']
     _Context: TypeAlias = Literal['format', 'stand-alone']
-    _DtOrTzinfo: TypeAlias = (datetime.datetime | datetime.tzinfo | str |
-        int | datetime.time | None)
-NO_INHERITANCE_MARKER = '∅∅∅'
+    _DtOrTzinfo: TypeAlias = datetime.datetime | datetime.tzinfo | str | int | datetime.time | None
+
+# "If a given short metazone form is known NOT to be understood in a given
+#  locale and the parent locale has this value such that it would normally
+#  be inherited, the inheritance of this value can be explicitly disabled by
+#  use of the 'no inheritance marker' as the value, which is 3 simultaneous [sic]
+#  empty set characters ( U+2205 )."
+#  - https://www.unicode.org/reports/tr35/tr35-dates.html#Metazone_Names
+
+NO_INHERITANCE_MARKER = '\u2205\u2205\u2205'
+
 UTC = datetime.timezone.utc
 LOCALTZ = localtime.LOCALTZ
+
 LC_TIME = default_locale('LC_TIME')


-def _get_dt_and_tzinfo(dt_or_tzinfo: _DtOrTzinfo) ->tuple[datetime.datetime |
-    None, datetime.tzinfo]:
+def _localize(tz: datetime.tzinfo, dt: datetime.datetime) -> datetime.datetime:
+    # Support localizing with both pytz and zoneinfo tzinfos
+    # nothing to do
+    if dt.tzinfo is tz:
+        return dt
+
+    if hasattr(tz, 'localize'):  # pytz
+        return tz.localize(dt)
+
+    if dt.tzinfo is None:
+        # convert naive to localized
+        return dt.replace(tzinfo=tz)
+
+    # convert timezones
+    return dt.astimezone(tz)
+
+
+def _get_dt_and_tzinfo(dt_or_tzinfo: _DtOrTzinfo) -> tuple[datetime.datetime | None, datetime.tzinfo]:
     """
     Parse a `dt_or_tzinfo` value into a datetime and a tzinfo.

@@ -52,19 +82,40 @@ def _get_dt_and_tzinfo(dt_or_tzinfo: _DtOrTzinfo) ->tuple[datetime.datetime |

     :rtype: tuple[datetime, tzinfo]
     """
-    pass
-
-
-def _get_tz_name(dt_or_tzinfo: _DtOrTzinfo) ->str:
+    if dt_or_tzinfo is None:
+        dt = datetime.datetime.now()
+        tzinfo = LOCALTZ
+    elif isinstance(dt_or_tzinfo, str):
+        dt = None
+        tzinfo = get_timezone(dt_or_tzinfo)
+    elif isinstance(dt_or_tzinfo, int):
+        dt = None
+        tzinfo = UTC
+    elif isinstance(dt_or_tzinfo, (datetime.datetime, datetime.time)):
+        dt = _get_datetime(dt_or_tzinfo)
+        tzinfo = dt.tzinfo if dt.tzinfo is not None else UTC
+    else:
+        dt = None
+        tzinfo = dt_or_tzinfo
+    return dt, tzinfo
+
+
+def _get_tz_name(dt_or_tzinfo: _DtOrTzinfo) -> str:
     """
     Get the timezone name out of a time, datetime, or tzinfo object.

     :rtype: str
     """
-    pass
+    dt, tzinfo = _get_dt_and_tzinfo(dt_or_tzinfo)
+    if hasattr(tzinfo, 'zone'):  # pytz object
+        return tzinfo.zone
+    elif hasattr(tzinfo, 'key') and tzinfo.key is not None:  # ZoneInfo object
+        return tzinfo.key
+    else:
+        return tzinfo.tzname(dt or datetime.datetime.now(UTC))


-def _get_datetime(instant: _Instant) ->datetime.datetime:
+def _get_datetime(instant: _Instant) -> datetime.datetime:
     """
     Get a datetime out of an "instant" (date, time, datetime, number).

@@ -95,11 +146,19 @@ def _get_datetime(instant: _Instant) ->datetime.datetime:
     :return: a datetime
     :rtype: datetime
     """
-    pass
-
-
-def _ensure_datetime_tzinfo(dt: datetime.datetime, tzinfo: (datetime.tzinfo |
-    None)=None) ->datetime.datetime:
+    if instant is None:
+        return datetime.datetime.now(UTC).replace(tzinfo=None)
+    elif isinstance(instant, (int, float)):
+        return datetime.datetime.fromtimestamp(instant, UTC).replace(tzinfo=None)
+    elif isinstance(instant, datetime.time):
+        return datetime.datetime.combine(datetime.date.today(), instant)
+    elif isinstance(instant, datetime.date) and not isinstance(instant, datetime.datetime):
+        return datetime.datetime.combine(instant, datetime.time())
+    # TODO (3.x): Add an assertion/type check for this fallthrough branch:
+    return instant
+
+
+def _ensure_datetime_tzinfo(dt: datetime.datetime, tzinfo: datetime.tzinfo | None = None) -> datetime.datetime:
     """
     Ensure the datetime passed has an attached tzinfo.

@@ -120,11 +179,19 @@ def _ensure_datetime_tzinfo(dt: datetime.datetime, tzinfo: (datetime.tzinfo |
     :return: datetime with tzinfo
     :rtype: datetime
     """
-    pass
-
-
-def _get_time(time: (datetime.time | datetime.datetime | None), tzinfo: (
-    datetime.tzinfo | None)=None) ->datetime.time:
+    if dt.tzinfo is None:
+        dt = dt.replace(tzinfo=UTC)
+    if tzinfo is not None:
+        dt = dt.astimezone(get_timezone(tzinfo))
+        if hasattr(tzinfo, 'normalize'):  # pytz
+            dt = tzinfo.normalize(dt)
+    return dt
+
+
+def _get_time(
+    time: datetime.time | datetime.datetime | None,
+    tzinfo: datetime.tzinfo | None = None,
+) -> datetime.time:
     """
     Get a timezoned time from a given instant.

@@ -133,10 +200,26 @@ def _get_time(time: (datetime.time | datetime.datetime | None), tzinfo: (
     :param time: time, datetime or None
     :rtype: time
     """
-    pass
-
-
-def get_timezone(zone: (str | datetime.tzinfo | None)=None) ->datetime.tzinfo:
+    if time is None:
+        time = datetime.datetime.now(UTC)
+    elif isinstance(time, (int, float)):
+        time = datetime.datetime.fromtimestamp(time, UTC)
+
+    if time.tzinfo is None:
+        time = time.replace(tzinfo=UTC)
+
+    if isinstance(time, datetime.datetime):
+        if tzinfo is not None:
+            time = time.astimezone(tzinfo)
+            if hasattr(tzinfo, 'normalize'):  # pytz
+                time = tzinfo.normalize(time)
+        time = time.timetz()
+    elif tzinfo is not None:
+        time = time.replace(tzinfo=tzinfo)
+    return time
+
+
+def get_timezone(zone: str | datetime.tzinfo | None = None) -> datetime.tzinfo:
     """Looks up a timezone by name and returns it.  The timezone object
     returned comes from ``pytz`` or ``zoneinfo``, whichever is available.
     It corresponds to the `tzinfo` interface and can be used with all of
@@ -148,12 +231,28 @@ def get_timezone(zone: (str | datetime.tzinfo | None)=None) ->datetime.tzinfo:
     :param zone: the name of the timezone to look up.  If a timezone object
                  itself is passed in, it's returned unchanged.
     """
-    pass
-
-
-def get_period_names(width: Literal['abbreviated', 'narrow', 'wide']='wide',
-    context: _Context='stand-alone', locale: (Locale | str | None)=LC_TIME
-    ) ->LocaleDataDict:
+    if zone is None:
+        return LOCALTZ
+    if not isinstance(zone, str):
+        return zone
+
+    if pytz:
+        try:
+            return pytz.timezone(zone)
+        except pytz.UnknownTimeZoneError as e:
+            exc = e
+    else:
+        assert zoneinfo
+        try:
+            return zoneinfo.ZoneInfo(zone)
+        except zoneinfo.ZoneInfoNotFoundError as e:
+            exc = e
+
+    raise LookupError(f"Unknown timezone {zone}") from exc
+
+
+def get_period_names(width: Literal['abbreviated', 'narrow', 'wide'] = 'wide',
+                     context: _Context = 'stand-alone', locale: Locale | str | None = LC_TIME) -> LocaleDataDict:
     """Return the names for day periods (AM/PM) used by the locale.

     >>> get_period_names(locale='en_US')['am']
@@ -163,12 +262,11 @@ def get_period_names(width: Literal['abbreviated', 'narrow', 'wide']='wide',
     :param context: the context, either "format" or "stand-alone"
     :param locale: the `Locale` object, or a locale string
     """
-    pass
+    return Locale.parse(locale).day_periods[context][width]


-def get_day_names(width: Literal['abbreviated', 'narrow', 'short', 'wide']=
-    'wide', context: _Context='format', locale: (Locale | str | None)=LC_TIME
-    ) ->LocaleDataDict:
+def get_day_names(width: Literal['abbreviated', 'narrow', 'short', 'wide'] = 'wide',
+                  context: _Context = 'format', locale: Locale | str | None = LC_TIME) -> LocaleDataDict:
     """Return the day names used by the locale for the specified format.

     >>> get_day_names('wide', locale='en_US')[1]
@@ -184,12 +282,11 @@ def get_day_names(width: Literal['abbreviated', 'narrow', 'short', 'wide']=
     :param context: the context, either "format" or "stand-alone"
     :param locale: the `Locale` object, or a locale string
     """
-    pass
+    return Locale.parse(locale).days[context][width]


-def get_month_names(width: Literal['abbreviated', 'narrow', 'wide']='wide',
-    context: _Context='format', locale: (Locale | str | None)=LC_TIME
-    ) ->LocaleDataDict:
+def get_month_names(width: Literal['abbreviated', 'narrow', 'wide'] = 'wide',
+                    context: _Context = 'format', locale: Locale | str | None = LC_TIME) -> LocaleDataDict:
     """Return the month names used by the locale for the specified format.

     >>> get_month_names('wide', locale='en_US')[1]
@@ -203,12 +300,11 @@ def get_month_names(width: Literal['abbreviated', 'narrow', 'wide']='wide',
     :param context: the context, either "format" or "stand-alone"
     :param locale: the `Locale` object, or a locale string
     """
-    pass
+    return Locale.parse(locale).months[context][width]


-def get_quarter_names(width: Literal['abbreviated', 'narrow', 'wide']=
-    'wide', context: _Context='format', locale: (Locale | str | None)=LC_TIME
-    ) ->LocaleDataDict:
+def get_quarter_names(width: Literal['abbreviated', 'narrow', 'wide'] = 'wide',
+                      context: _Context = 'format', locale: Locale | str | None = LC_TIME) -> LocaleDataDict:
     """Return the quarter names used by the locale for the specified format.

     >>> get_quarter_names('wide', locale='en_US')[1]
@@ -222,11 +318,11 @@ def get_quarter_names(width: Literal['abbreviated', 'narrow', 'wide']=
     :param context: the context, either "format" or "stand-alone"
     :param locale: the `Locale` object, or a locale string
     """
-    pass
+    return Locale.parse(locale).quarters[context][width]


-def get_era_names(width: Literal['abbreviated', 'narrow', 'wide']='wide',
-    locale: (Locale | str | None)=LC_TIME) ->LocaleDataDict:
+def get_era_names(width: Literal['abbreviated', 'narrow', 'wide'] = 'wide',
+                  locale: Locale | str | None = LC_TIME) -> LocaleDataDict:
     """Return the era names used by the locale for the specified format.

     >>> get_era_names('wide', locale='en_US')[1]
@@ -237,11 +333,10 @@ def get_era_names(width: Literal['abbreviated', 'narrow', 'wide']='wide',
     :param width: the width to use, either "wide", "abbreviated", or "narrow"
     :param locale: the `Locale` object, or a locale string
     """
-    pass
+    return Locale.parse(locale).eras[width]


-def get_date_format(format: _PredefinedTimeFormat='medium', locale: (Locale |
-    str | None)=LC_TIME) ->DateTimePattern:
+def get_date_format(format: _PredefinedTimeFormat = 'medium', locale: Locale | str | None = LC_TIME) -> DateTimePattern:
     """Return the date formatting patterns used by the locale for the specified
     format.

@@ -254,11 +349,10 @@ def get_date_format(format: _PredefinedTimeFormat='medium', locale: (Locale |
                    "short"
     :param locale: the `Locale` object, or a locale string
     """
-    pass
+    return Locale.parse(locale).date_formats[format]


-def get_datetime_format(format: _PredefinedTimeFormat='medium', locale: (
-    Locale | str | None)=LC_TIME) ->DateTimePattern:
+def get_datetime_format(format: _PredefinedTimeFormat = 'medium', locale: Locale | str | None = LC_TIME) -> DateTimePattern:
     """Return the datetime formatting patterns used by the locale for the
     specified format.

@@ -269,16 +363,18 @@ def get_datetime_format(format: _PredefinedTimeFormat='medium', locale: (
                    "short"
     :param locale: the `Locale` object, or a locale string
     """
-    pass
+    patterns = Locale.parse(locale).datetime_formats
+    if format not in patterns:
+        format = None
+    return patterns[format]


-def get_time_format(format: _PredefinedTimeFormat='medium', locale: (Locale |
-    str | None)=LC_TIME) ->DateTimePattern:
+def get_time_format(format: _PredefinedTimeFormat = 'medium', locale: Locale | str | None = LC_TIME) -> DateTimePattern:
     """Return the time formatting patterns used by the locale for the specified
     format.

     >>> get_time_format(locale='en_US')
-    <DateTimePattern u'h:mm:ss a'>
+    <DateTimePattern u'h:mm:ss\u202fa'>
     >>> get_time_format('full', locale='de_DE')
     <DateTimePattern u'HH:mm:ss zzzz'>

@@ -286,12 +382,15 @@ def get_time_format(format: _PredefinedTimeFormat='medium', locale: (Locale |
                    "short"
     :param locale: the `Locale` object, or a locale string
     """
-    pass
+    return Locale.parse(locale).time_formats[format]


-def get_timezone_gmt(datetime: _Instant=None, width: Literal['long',
-    'short', 'iso8601', 'iso8601_short']='long', locale: (Locale | str |
-    None)=LC_TIME, return_z: bool=False) ->str:
+def get_timezone_gmt(
+    datetime: _Instant = None,
+    width: Literal['long', 'short', 'iso8601', 'iso8601_short'] = 'long',
+    locale: Locale | str | None = LC_TIME,
+    return_z: bool = False,
+) -> str:
     """Return the timezone associated with the given `datetime` object formatted
     as string indicating the offset from GMT.

@@ -327,11 +426,30 @@ def get_timezone_gmt(datetime: _Instant=None, width: Literal['long',
     :param return_z: True or False; Function returns indicator "Z"
                      when local time offset is 0
     """
-    pass
-
-
-def get_timezone_location(dt_or_tzinfo: _DtOrTzinfo=None, locale: (Locale |
-    str | None)=LC_TIME, return_city: bool=False) ->str:
+    datetime = _ensure_datetime_tzinfo(_get_datetime(datetime))
+    locale = Locale.parse(locale)
+
+    offset = datetime.tzinfo.utcoffset(datetime)
+    seconds = offset.days * 24 * 60 * 60 + offset.seconds
+    hours, seconds = divmod(seconds, 3600)
+    if return_z and hours == 0 and seconds == 0:
+        return 'Z'
+    elif seconds == 0 and width == 'iso8601_short':
+        return '%+03d' % hours
+    elif width == 'short' or width == 'iso8601_short':
+        pattern = '%+03d%02d'
+    elif width == 'iso8601':
+        pattern = '%+03d:%02d'
+    else:
+        pattern = locale.zone_formats['gmt'] % '%+03d:%02d'
+    return pattern % (hours, seconds // 60)
+
+
+def get_timezone_location(
+    dt_or_tzinfo: _DtOrTzinfo = None,
+    locale: Locale | str | None = LC_TIME,
+    return_city: bool = False,
+) -> str:
     """Return a representation of the given timezone using "location format".

     The result depends on both the local display name of the country and the
@@ -366,14 +484,56 @@ def get_timezone_location(dt_or_tzinfo: _DtOrTzinfo=None, locale: (Locale |
     :return: the localized timezone name using location format

     """
-    pass
-
-
-def get_timezone_name(dt_or_tzinfo: _DtOrTzinfo=None, width: Literal['long',
-    'short']='long', uncommon: bool=False, locale: (Locale | str | None)=
-    LC_TIME, zone_variant: (Literal['generic', 'daylight', 'standard'] |
-    None)=None, return_zone: bool=False) ->str:
-    """Return the localized display name for the given timezone. The timezone
+    locale = Locale.parse(locale)
+
+    zone = _get_tz_name(dt_or_tzinfo)
+
+    # Get the canonical time-zone code
+    zone = get_global('zone_aliases').get(zone, zone)
+
+    info = locale.time_zones.get(zone, {})
+
+    # Otherwise, if there is only one timezone for the country, return the
+    # localized country name
+    region_format = locale.zone_formats['region']
+    territory = get_global('zone_territories').get(zone)
+    if territory not in locale.territories:
+        territory = 'ZZ'  # invalid/unknown
+    territory_name = locale.territories[territory]
+    if not return_city and territory and len(get_global('territory_zones').get(territory, [])) == 1:
+        return region_format % territory_name
+
+    # Otherwise, include the city in the output
+    fallback_format = locale.zone_formats['fallback']
+    if 'city' in info:
+        city_name = info['city']
+    else:
+        metazone = get_global('meta_zones').get(zone)
+        metazone_info = locale.meta_zones.get(metazone, {})
+        if 'city' in metazone_info:
+            city_name = metazone_info['city']
+        elif '/' in zone:
+            city_name = zone.split('/', 1)[1].replace('_', ' ')
+        else:
+            city_name = zone.replace('_', ' ')
+
+    if return_city:
+        return city_name
+    return region_format % (fallback_format % {
+        '0': city_name,
+        '1': territory_name,
+    })
+
+
+def get_timezone_name(
+    dt_or_tzinfo: _DtOrTzinfo = None,
+    width: Literal['long', 'short'] = 'long',
+    uncommon: bool = False,
+    locale: Locale | str | None = LC_TIME,
+    zone_variant: Literal['generic', 'daylight', 'standard'] | None = None,
+    return_zone: bool = False,
+) -> str:
+    r"""Return the localized display name for the given timezone. The timezone
     may be specified using a ``datetime`` or `tzinfo` object.

     >>> from datetime import time
@@ -402,9 +562,9 @@ def get_timezone_name(dt_or_tzinfo: _DtOrTzinfo=None, width: Literal['long',

     >>> tz = get_timezone('Europe/Berlin')
     >>> get_timezone_name(tz, locale='de_DE')
-    u'Mitteleurop\\xe4ische Zeit'
+    u'Mitteleurop\xe4ische Zeit'
     >>> get_timezone_name(tz, locale='pt_BR')
-    u'Hor\\xe1rio da Europa Central'
+    u'Hor\xe1rio da Europa Central'

     On the other hand, if the country uses multiple timezones, the city is also
     included in the representation:
@@ -443,12 +603,55 @@ def get_timezone_name(dt_or_tzinfo: _DtOrTzinfo=None, width: Literal['long',
     :param return_zone: True or False. If true then function
                         returns long time zone ID
     """
-    pass
+    dt, tzinfo = _get_dt_and_tzinfo(dt_or_tzinfo)
+    locale = Locale.parse(locale)

+    zone = _get_tz_name(dt_or_tzinfo)

-def format_date(date: (datetime.date | None)=None, format: (
-    _PredefinedTimeFormat | str)='medium', locale: (Locale | str | None)=
-    LC_TIME) ->str:
+    if zone_variant is None:
+        if dt is None:
+            zone_variant = 'generic'
+        else:
+            dst = tzinfo.dst(dt)
+            zone_variant = "daylight" if dst else "standard"
+    else:
+        if zone_variant not in ('generic', 'standard', 'daylight'):
+            raise ValueError('Invalid zone variation')
+
+    # Get the canonical time-zone code
+    zone = get_global('zone_aliases').get(zone, zone)
+    if return_zone:
+        return zone
+    info = locale.time_zones.get(zone, {})
+    # Try explicitly translated zone names first
+    if width in info and zone_variant in info[width]:
+        return info[width][zone_variant]
+
+    metazone = get_global('meta_zones').get(zone)
+    if metazone:
+        metazone_info = locale.meta_zones.get(metazone, {})
+        if width in metazone_info:
+            name = metazone_info[width].get(zone_variant)
+            if width == 'short' and name == NO_INHERITANCE_MARKER:
+                # If the short form is marked no-inheritance,
+                # try to fall back to the long name instead.
+                name = metazone_info.get('long', {}).get(zone_variant)
+            if name:
+                return name
+
+    # If we have a concrete datetime, we assume that the result can't be
+    # independent of daylight savings time, so we return the GMT offset
+    if dt is not None:
+        return get_timezone_gmt(dt, width=width, locale=locale)
+
+    return get_timezone_location(dt_or_tzinfo, locale=locale)
+
+
+def format_date(
+    date: datetime.date | None = None,
+    format: _PredefinedTimeFormat | str = 'medium',
+    locale: Locale | str | None = LC_TIME,
+) -> str:
     """Return a date formatted according to the given pattern.

     >>> from datetime import date
@@ -470,18 +673,30 @@ def format_date(date: (datetime.date | None)=None, format: (
                    date/time pattern
     :param locale: a `Locale` object or a locale identifier
     """
-    pass
-
-
-def format_datetime(datetime: _Instant=None, format: (_PredefinedTimeFormat |
-    str)='medium', tzinfo: (datetime.tzinfo | None)=None, locale: (Locale |
-    str | None)=LC_TIME) ->str:
-    """Return a date formatted according to the given pattern.
+    if date is None:
+        date = datetime.date.today()
+    elif isinstance(date, datetime.datetime):
+        date = date.date()
+
+    locale = Locale.parse(locale)
+    if format in ('full', 'long', 'medium', 'short'):
+        format = get_date_format(format, locale=locale)
+    pattern = parse_pattern(format)
+    return pattern.apply(date, locale)
+
+
+def format_datetime(
+    datetime: _Instant = None,
+    format: _PredefinedTimeFormat | str = 'medium',
+    tzinfo: datetime.tzinfo | None = None,
+    locale: Locale | str | None = LC_TIME,
+) -> str:
+    r"""Return a date formatted according to the given pattern.

     >>> from datetime import datetime
     >>> dt = datetime(2007, 4, 1, 15, 30)
     >>> format_datetime(dt, locale='en_US')
-    u'Apr 1, 2007, 3:30:00\\u202fPM'
+    u'Apr 1, 2007, 3:30:00\u202fPM'

     For any pattern requiring the display of the timezone:

@@ -499,18 +714,30 @@ def format_datetime(datetime: _Instant=None, format: (_PredefinedTimeFormat |
     :param tzinfo: the timezone to apply to the time for display
     :param locale: a `Locale` object or a locale identifier
     """
-    pass
-
-
-def format_time(time: (datetime.time | datetime.datetime | float | None)=
-    None, format: (_PredefinedTimeFormat | str)='medium', tzinfo: (datetime
-    .tzinfo | None)=None, locale: (Locale | str | None)=LC_TIME) ->str:
-    """Return a time formatted according to the given pattern.
+    datetime = _ensure_datetime_tzinfo(_get_datetime(datetime), tzinfo)
+
+    locale = Locale.parse(locale)
+    if format in ('full', 'long', 'medium', 'short'):
+        return get_datetime_format(format, locale=locale) \
+            .replace("'", "") \
+            .replace('{0}', format_time(datetime, format, tzinfo=None,
+                                        locale=locale)) \
+            .replace('{1}', format_date(datetime, format, locale=locale))
+    else:
+        return parse_pattern(format).apply(datetime, locale)
+
+
+def format_time(
+    time: datetime.time | datetime.datetime | float | None = None,
+    format: _PredefinedTimeFormat | str = 'medium',
+    tzinfo: datetime.tzinfo | None = None, locale: Locale | str | None = LC_TIME,
+) -> str:
+    r"""Return a time formatted according to the given pattern.

     >>> from datetime import datetime, time
     >>> t = time(15, 30)
     >>> format_time(t, locale='en_US')
-    u'3:30:00\\u202fPM'
+    u'3:30:00\u202fPM'
     >>> format_time(t, format='short', locale='de_DE')
     u'15:30'

@@ -548,10 +775,10 @@ def format_time(time: (datetime.time | datetime.datetime | float | None)=
     >>> t = time(15, 30)
     >>> format_time(t, format='full', tzinfo=get_timezone('Europe/Paris'),
     ...             locale='fr_FR')  # doctest: +SKIP
-    u'15:30:00 heure normale d\\u2019Europe centrale'
+    u'15:30:00 heure normale d\u2019Europe centrale'
     >>> format_time(t, format='full', tzinfo=get_timezone('US/Eastern'),
     ...             locale='en_US')  # doctest: +SKIP
-    u'3:30:00\\u202fPM Eastern Standard Time'
+    u'3:30:00\u202fPM Eastern Standard Time'

     :param time: the ``time`` or ``datetime`` object; if `None`, the current
                  time in UTC is used
@@ -560,13 +787,27 @@ def format_time(time: (datetime.time | datetime.datetime | float | None)=
     :param tzinfo: the time-zone to apply to the time for display
     :param locale: a `Locale` object or a locale identifier
     """
-    pass

+    # get reference date for if we need to find the right timezone variant
+    # in the pattern
+    ref_date = time.date() if isinstance(time, datetime.datetime) else None
+
+    time = _get_time(time, tzinfo)
+
+    locale = Locale.parse(locale)
+    if format in ('full', 'long', 'medium', 'short'):
+        format = get_time_format(format, locale=locale)
+    return parse_pattern(format).apply(time, locale, reference_date=ref_date)

-def format_skeleton(skeleton: str, datetime: _Instant=None, tzinfo: (
-    datetime.tzinfo | None)=None, fuzzy: bool=True, locale: (Locale | str |
-    None)=LC_TIME) ->str:
-    """Return a time and/or date formatted according to the given pattern.
+
+def format_skeleton(
+    skeleton: str,
+    datetime: _Instant = None,
+    tzinfo: datetime.tzinfo | None = None,
+    fuzzy: bool = True,
+    locale: Locale | str | None = LC_TIME,
+) -> str:
+    r"""Return a time and/or date formatted according to the given pattern.

     The skeletons are defined in the CLDR data and provide more flexibility
     than the simple short/long/medium formats, but are a bit harder to use.
@@ -597,19 +838,32 @@ def format_skeleton(skeleton: str, datetime: _Instant=None, tzinfo: (
                   close enough to it.
     :param locale: a `Locale` object or a locale identifier
     """
-    pass
-
-
-TIMEDELTA_UNITS: tuple[tuple[str, int], ...] = (('year', 3600 * 24 * 365),
-    ('month', 3600 * 24 * 30), ('week', 3600 * 24 * 7), ('day', 3600 * 24),
-    ('hour', 3600), ('minute', 60), ('second', 1))
-
-
-def format_timedelta(delta: (datetime.timedelta | int), granularity:
-    Literal['year', 'month', 'week', 'day', 'hour', 'minute', 'second']=
-    'second', threshold: float=0.85, add_direction: bool=False, format:
-    Literal['narrow', 'short', 'medium', 'long']='long', locale: (Locale |
-    str | None)=LC_TIME) ->str:
+    locale = Locale.parse(locale)
+    if fuzzy and skeleton not in locale.datetime_skeletons:
+        skeleton = match_skeleton(skeleton, locale.datetime_skeletons)
+    format = locale.datetime_skeletons[skeleton]
+    return format_datetime(datetime, format, tzinfo, locale)
+
+
+TIMEDELTA_UNITS: tuple[tuple[str, int], ...] = (
+    ('year', 3600 * 24 * 365),
+    ('month', 3600 * 24 * 30),
+    ('week', 3600 * 24 * 7),
+    ('day', 3600 * 24),
+    ('hour', 3600),
+    ('minute', 60),
+    ('second', 1),
+)
+
+
+def format_timedelta(
+    delta: datetime.timedelta | int,
+    granularity: Literal['year', 'month', 'week', 'day', 'hour', 'minute', 'second'] = 'second',
+    threshold: float = .85,
+    add_direction: bool = False,
+    format: Literal['narrow', 'short', 'medium', 'long'] = 'long',
+    locale: Locale | str | None = LC_TIME,
+) -> str:
     """Return a time delta according to the rules of the given locale.

     >>> from datetime import timedelta
@@ -665,27 +919,104 @@ def format_timedelta(delta: (datetime.timedelta | int), granularity:
                    maintain compatibility)
     :param locale: a `Locale` object or a locale identifier
     """
-    pass
-
-
-def format_interval(start: _Instant, end: _Instant, skeleton: (str | None)=
-    None, tzinfo: (datetime.tzinfo | None)=None, fuzzy: bool=True, locale:
-    (Locale | str | None)=LC_TIME) ->str:
+    if format not in ('narrow', 'short', 'medium', 'long'):
+        raise TypeError('Format must be one of "narrow", "short" or "long"')
+    if format == 'medium':
+        warnings.warn(
+            '"medium" value for format param of format_timedelta'
+            ' is deprecated. Use "long" instead',
+            category=DeprecationWarning,
+            stacklevel=2,
+        )
+        format = 'long'
+    if isinstance(delta, datetime.timedelta):
+        seconds = int((delta.days * 86400) + delta.seconds)
+    else:
+        seconds = delta
+    locale = Locale.parse(locale)
+
+    def _iter_patterns(a_unit):
+        if add_direction:
+            unit_rel_patterns = locale._data['date_fields'][a_unit]
+            if seconds >= 0:
+                yield unit_rel_patterns['future']
+            else:
+                yield unit_rel_patterns['past']
+        a_unit = f"duration-{a_unit}"
+        yield locale._data['unit_patterns'].get(a_unit, {}).get(format)
+
+    for unit, secs_per_unit in TIMEDELTA_UNITS:
+        value = abs(seconds) / secs_per_unit
+        if value >= threshold or unit == granularity:
+            if unit == granularity and value > 0:
+                value = max(1, value)
+            value = int(round(value))
+            plural_form = locale.plural_form(value)
+            pattern = None
+            for patterns in _iter_patterns(unit):
+                if patterns is not None:
+                    pattern = patterns.get(plural_form) or patterns.get('other')
+                    break
+            # This really should not happen
+            if pattern is None:
+                return ''
+            return pattern.replace('{0}', str(value))
+
+    return ''
+
+
+def _format_fallback_interval(
+    start: _Instant,
+    end: _Instant,
+    skeleton: str | None,
+    tzinfo: datetime.tzinfo | None,
+    locale: Locale | str | None = LC_TIME,
+) -> str:
+    if skeleton in locale.datetime_skeletons:  # Use the given skeleton
+        format = lambda dt: format_skeleton(skeleton, dt, tzinfo, locale=locale)
+    elif all((isinstance(d, datetime.date) and not isinstance(d, datetime.datetime)) for d in (start, end)):  # Both are just dates
+        format = lambda dt: format_date(dt, locale=locale)
+    elif all((isinstance(d, datetime.time) and not isinstance(d, datetime.date)) for d in (start, end)):  # Both are times
+        format = lambda dt: format_time(dt, tzinfo=tzinfo, locale=locale)
+    else:
+        format = lambda dt: format_datetime(dt, tzinfo=tzinfo, locale=locale)
+
+    formatted_start = format(start)
+    formatted_end = format(end)
+
+    if formatted_start == formatted_end:
+        return format(start)
+
+    return (
+        locale.interval_formats.get(None, "{0}-{1}").
+        replace("{0}", formatted_start).
+        replace("{1}", formatted_end)
+    )
+
+
+def format_interval(
+    start: _Instant,
+    end: _Instant,
+    skeleton: str | None = None,
+    tzinfo: datetime.tzinfo | None = None,
+    fuzzy: bool = True,
+    locale: Locale | str | None = LC_TIME,
+) -> str:
     """
     Format an interval between two instants according to the locale's rules.

     >>> from datetime import date, time
     >>> format_interval(date(2016, 1, 15), date(2016, 1, 17), "yMd", locale="fi")
-    u'15.–17.1.2016'
+    u'15.\u201317.1.2016'

     >>> format_interval(time(12, 12), time(16, 16), "Hm", locale="en_GB")
-    '12:12–16:16'
+    '12:12\u201316:16'

     >>> format_interval(time(5, 12), time(16, 16), "hm", locale="en_US")
-    '5:12 AM – 4:16 PM'
+    '5:12\u202fAM\u2009–\u20094:16\u202fPM'

     >>> format_interval(time(16, 18), time(16, 24), "Hm", locale="it")
-    '16:18–16:24'
+    '16:18\u201316:24'

     If the start instant equals the end instant, the interval is formatted like the instant.

@@ -695,13 +1026,13 @@ def format_interval(start: _Instant, end: _Instant, skeleton: (str | None)=
     Unknown skeletons fall back to "default" formatting.

     >>> format_interval(date(2015, 1, 1), date(2017, 1, 1), "wzq", locale="ja")
-    '2015/01/01~2017/01/01'
+    '2015/01/01\uff5e2017/01/01'

     >>> format_interval(time(16, 18), time(16, 24), "xxx", locale="ja")
-    '16:18:00~16:24:00'
+    '16:18:00\uff5e16:24:00'

     >>> format_interval(date(2016, 1, 15), date(2016, 1, 17), "xxx", locale="de")
-    '15.01.2016 – 17.01.2016'
+    '15.01.2016\u2009–\u200917.01.2016'

     :param start: First instant (datetime/date/time)
     :param end: Second instant (datetime/date/time)
@@ -712,12 +1043,68 @@ def format_interval(start: _Instant, end: _Instant, skeleton: (str | None)=
     :param locale: A locale object or identifier.
     :return: Formatted interval
     """
-    pass
+    locale = Locale.parse(locale)
+
+    # NB: The quote comments below are from the algorithm description in
+    #     https://www.unicode.org/reports/tr35/tr35-dates.html#intervalFormats
+
+    # > Look for the intervalFormatItem element that matches the "skeleton",
+    # > starting in the current locale and then following the locale fallback
+    # > chain up to, but not including root.
+
+    interval_formats = locale.interval_formats
+
+    if skeleton not in interval_formats or not skeleton:
+        # > If no match was found from the previous step, check what the closest
+        # > match is in the fallback locale chain, as in availableFormats. That
+        # > is, this allows for adjusting the string value field's width,
+        # > including adjusting between "MMM" and "MMMM", and using different
+        # > variants of the same field, such as 'v' and 'z'.
+        if skeleton and fuzzy:
+            skeleton = match_skeleton(skeleton, interval_formats)
+        else:
+            skeleton = None
+        if not skeleton:  # Still no match whatsoever?
+            # > Otherwise, format the start and end datetime using the fallback pattern.
+            return _format_fallback_interval(start, end, skeleton, tzinfo, locale)
+
+    skel_formats = interval_formats[skeleton]
+
+    if start == end:
+        return format_skeleton(skeleton, start, tzinfo, fuzzy=fuzzy, locale=locale)
+
+    start = _ensure_datetime_tzinfo(_get_datetime(start), tzinfo=tzinfo)
+    end = _ensure_datetime_tzinfo(_get_datetime(end), tzinfo=tzinfo)

+    start_fmt = DateTimeFormat(start, locale=locale)
+    end_fmt = DateTimeFormat(end, locale=locale)

-def get_period_id(time: _Instant, tzinfo: (datetime.tzinfo | None)=None,
-    type: (Literal['selection'] | None)=None, locale: (Locale | str | None)
-    =LC_TIME) ->str:
+    # > If a match is found from previous steps, compute the calendar field
+    # > with the greatest difference between start and end datetime. If there
+    # > is no difference among any of the fields in the pattern, format as a
+    # > single date using availableFormats, and return.
+
+    for field in PATTERN_CHAR_ORDER:  # These are in largest-to-smallest order
+        if field in skel_formats and start_fmt.extract(field) != end_fmt.extract(field):
+            # > If there is a match, use the pieces of the corresponding pattern to
+            # > format the start and end datetime, as above.
+            return "".join(
+                parse_pattern(pattern).apply(instant, locale)
+                for pattern, instant
+                in zip(skel_formats[field], (start, end))
+            )
+
+    # > Otherwise, format the start and end datetime using the fallback pattern.
+
+    return _format_fallback_interval(start, end, skeleton, tzinfo, locale)
+
+
+def get_period_id(
+    time: _Instant,
+    tzinfo: datetime.tzinfo | None = None,
+    type: Literal['selection'] | None = None,
+    locale: Locale | str | None = LC_TIME,
+) -> str:
     """
     Get the day period ID for a given time.

@@ -741,15 +1128,62 @@ def get_period_id(time: _Instant, tzinfo: (datetime.tzinfo | None)=None,
     :param locale: the `Locale` object, or a locale string
     :return: period ID. Something is always returned -- even if it's just "am" or "pm".
     """
-    pass
+    time = _get_time(time, tzinfo)
+    seconds_past_midnight = int(time.hour * 60 * 60 + time.minute * 60 + time.second)
+    locale = Locale.parse(locale)
+
+    # The LDML rules state that the rules may not overlap, so iterating in arbitrary
+    # order should be alright, though `at` periods should be preferred.
+    rulesets = locale.day_period_rules.get(type, {}).items()
+
+    for rule_id, rules in rulesets:
+        for rule in rules:
+            if "at" in rule and rule["at"] == seconds_past_midnight:
+                return rule_id
+
+    for rule_id, rules in rulesets:
+        for rule in rules:
+            if "from" in rule and "before" in rule:
+                if rule["from"] < rule["before"]:
+                    if rule["from"] <= seconds_past_midnight < rule["before"]:
+                        return rule_id
+                else:
+                    # e.g. from="21:00" before="06:00"
+                    if rule["from"] <= seconds_past_midnight < 86400 or \
+                            0 <= seconds_past_midnight < rule["before"]:
+                        return rule_id
+
+            start_ok = end_ok = False
+
+            if "from" in rule and seconds_past_midnight >= rule["from"]:
+                start_ok = True
+            if "to" in rule and seconds_past_midnight <= rule["to"]:
+                # This rule type does not exist in the present CLDR data;
+                # excuse the lack of test coverage.
+                end_ok = True
+            if "before" in rule and seconds_past_midnight < rule["before"]:
+                end_ok = True
+            if "after" in rule:
+                raise NotImplementedError("'after' is deprecated as of CLDR 29.")
+
+            if start_ok and end_ok:
+                return rule_id
+
+    if seconds_past_midnight < 43200:
+        return "am"
+    else:
+        return "pm"


 class ParseError(ValueError):
     pass


-def parse_date(string: str, locale: (Locale | str | None)=LC_TIME, format:
-    _PredefinedTimeFormat='medium') ->datetime.date:
+def parse_date(
+    string: str,
+    locale: Locale | str | None = LC_TIME,
+    format: _PredefinedTimeFormat = 'medium',
+) -> datetime.date:
     """Parse a date from a string.

     This function first tries to interpret the string as ISO-8601
@@ -769,11 +1203,47 @@ def parse_date(string: str, locale: (Locale | str | None)=LC_TIME, format:
     :param locale: a `Locale` object or a locale identifier
     :param format: the format to use (see ``get_date_format``)
     """
-    pass
-
-
-def parse_time(string: str, locale: (Locale | str | None)=LC_TIME, format:
-    _PredefinedTimeFormat='medium') ->datetime.time:
+    numbers = re.findall(r'(\d+)', string)
+    if not numbers:
+        raise ParseError("No numbers were found in input")
+
+    # we try ISO-8601 format first, meaning similar to formats
+    # extended YYYY-MM-DD or basic YYYYMMDD
+    iso_alike = re.match(r'^(\d{4})-?([01]\d)-?([0-3]\d)$',
+                         string, flags=re.ASCII)  # allow only ASCII digits
+    if iso_alike:
+        try:
+            return datetime.date(*map(int, iso_alike.groups()))
+        except ValueError:
+            pass  # a locale format might fit better, so let's continue
+
+    format_str = get_date_format(format=format, locale=locale).pattern.lower()
+    year_idx = format_str.index('y')
+    month_idx = format_str.index('m')
+    if month_idx < 0:
+        month_idx = format_str.index('l')
+    day_idx = format_str.index('d')
+
+    indexes = sorted([(year_idx, 'Y'), (month_idx, 'M'), (day_idx, 'D')])
+    indexes = {item[1]: idx for idx, item in enumerate(indexes)}
+
+    # FIXME: this currently only supports numbers, but should also support month
+    #        names, both in the requested locale, and english
+
+    year = numbers[indexes['Y']]
+    year = 2000 + int(year) if len(year) == 2 else int(year)
+    month = int(numbers[indexes['M']])
+    day = int(numbers[indexes['D']])
+    if month > 12:
+        month, day = day, month
+    return datetime.date(year, month, day)
+
+
+def parse_time(
+    string: str,
+    locale: Locale | str | None = LC_TIME,
+    format: _PredefinedTimeFormat = 'medium',
+) -> datetime.time:
     """Parse a time from a string.

     This function uses the time format for the locale as a hint to determine
@@ -788,7 +1258,37 @@ def parse_time(string: str, locale: (Locale | str | None)=LC_TIME, format:
     :return: the parsed time
     :rtype: `time`
     """
-    pass
+    numbers = re.findall(r'(\d+)', string)
+    if not numbers:
+        raise ParseError("No numbers were found in input")
+
+    # TODO: try ISO format first?
+    format_str = get_time_format(format=format, locale=locale).pattern.lower()
+    hour_idx = format_str.index('h')
+    if hour_idx < 0:
+        hour_idx = format_str.index('k')
+    min_idx = format_str.index('m')
+    sec_idx = format_str.index('s')
+
+    indexes = sorted([(hour_idx, 'H'), (min_idx, 'M'), (sec_idx, 'S')])
+    indexes = {item[1]: idx for idx, item in enumerate(indexes)}
+
+    # TODO: support time zones
+
+    # Check if the format specifies a period to be used;
+    # if it does, look for 'pm' to figure out an offset.
+    hour_offset = 0
+    if 'a' in format_str and 'pm' in string.lower():
+        hour_offset = 12
+
+    # Parse up to three numbers from the string.
+    minute = second = 0
+    hour = int(numbers[indexes['H']]) + hour_offset
+    if len(numbers) > 1:
+        minute = int(numbers[indexes['M']])
+        if len(numbers) > 2:
+            second = int(numbers[indexes['S']])
+    return datetime.time(hour, minute, second)


 class DateTimePattern:
@@ -797,33 +1297,43 @@ class DateTimePattern:
         self.pattern = pattern
         self.format = format

-    def __repr__(self) ->str:
-        return f'<{type(self).__name__} {self.pattern!r}>'
+    def __repr__(self) -> str:
+        return f"<{type(self).__name__} {self.pattern!r}>"

-    def __str__(self) ->str:
+    def __str__(self) -> str:
         pat = self.pattern
         return pat

-    def __mod__(self, other: DateTimeFormat) ->str:
+    def __mod__(self, other: DateTimeFormat) -> str:
         if not isinstance(other, DateTimeFormat):
             return NotImplemented
         return self.format % other

+    def apply(
+        self,
+        datetime: datetime.date | datetime.time,
+        locale: Locale | str | None,
+        reference_date: datetime.date | None = None,
+    ) -> str:
+        return self % DateTimeFormat(datetime, locale, reference_date)
+

 class DateTimeFormat:

-    def __init__(self, value: (datetime.date | datetime.time), locale: (
-        Locale | str), reference_date: (datetime.date | None)=None) ->None:
-        assert isinstance(value, (datetime.date, datetime.datetime,
-            datetime.time))
-        if isinstance(value, (datetime.datetime, datetime.time)
-            ) and value.tzinfo is None:
+    def __init__(
+        self,
+        value: datetime.date | datetime.time,
+        locale: Locale | str,
+        reference_date: datetime.date | None = None,
+    ) -> None:
+        assert isinstance(value, (datetime.date, datetime.datetime, datetime.time))
+        if isinstance(value, (datetime.datetime, datetime.time)) and value.tzinfo is None:
             value = value.replace(tzinfo=UTC)
         self.value = value
         self.locale = Locale.parse(locale)
         self.reference_date = reference_date

-    def __getitem__(self, name: str) ->str:
+    def __getitem__(self, name: str) -> str:
         char = name[0]
         num = len(name)
         if char == 'G':
@@ -871,9 +1381,73 @@ class DateTimeFormat:
         elif char in ('z', 'Z', 'v', 'V', 'x', 'X', 'O'):
             return self.format_timezone(char, num)
         else:
-            raise KeyError(f'Unsupported date/time field {char!r}')
-
-    def format_weekday(self, char: str='E', num: int=4) ->str:
+            raise KeyError(f"Unsupported date/time field {char!r}")
+
+    def extract(self, char: str) -> int:
+        char = str(char)[0]
+        if char == 'y':
+            return self.value.year
+        elif char == 'M':
+            return self.value.month
+        elif char == 'd':
+            return self.value.day
+        elif char == 'H':
+            return self.value.hour
+        elif char == 'h':
+            return self.value.hour % 12 or 12
+        elif char == 'm':
+            return self.value.minute
+        elif char == 'a':
+            return int(self.value.hour >= 12)  # 0 for am, 1 for pm
+        else:
+            raise NotImplementedError(f"Not implemented: extracting {char!r} from {self.value!r}")
+
+    def format_era(self, char: str, num: int) -> str:
+        width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[max(3, num)]
+        era = int(self.value.year >= 0)
+        return get_era_names(width, self.locale)[era]
+
+    def format_year(self, char: str, num: int) -> str:
+        value = self.value.year
+        if char.isupper():
+            value = self.value.isocalendar()[0]
+        year = self.format(value, num)
+        if num == 2:
+            year = year[-2:]
+        return year
+
+    def format_quarter(self, char: str, num: int) -> str:
+        quarter = (self.value.month - 1) // 3 + 1
+        if num <= 2:
+            return '%0*d' % (num, quarter)
+        width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[num]
+        context = {'Q': 'format', 'q': 'stand-alone'}[char]
+        return get_quarter_names(width, context, self.locale)[quarter]
+
+    def format_month(self, char: str, num: int) -> str:
+        if num <= 2:
+            return '%0*d' % (num, self.value.month)
+        width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[num]
+        context = {'M': 'format', 'L': 'stand-alone'}[char]
+        return get_month_names(width, context, self.locale)[self.value.month]
+
+    def format_week(self, char: str, num: int) -> str:
+        if char.islower():  # week of year
+            day_of_year = self.get_day_of_year()
+            week = self.get_week_number(day_of_year)
+            if week == 0:
+                date = self.value - datetime.timedelta(days=day_of_year)
+                week = self.get_week_number(self.get_day_of_year(date),
+                                            date.weekday())
+            return self.format(week, num)
+        else:  # week of month
+            week = self.get_week_number(self.value.day)
+            if week == 0:
+                date = self.value - datetime.timedelta(days=self.value.day)
+                week = self.get_week_number(date.day, date.weekday())
+            return str(week)
+
+    def format_weekday(self, char: str = 'E', num: int = 4) -> str:
         """
         Return weekday from parsed datetime according to format pattern.

@@ -901,9 +1475,23 @@ class DateTimeFormat:
         :param num: count of format character

         """
-        pass
-
-    def format_period(self, char: str, num: int) ->str:
+        if num < 3:
+            if char.islower():
+                value = 7 - self.locale.first_week_day + self.value.weekday()
+                return self.format(value % 7 + 1, num)
+            num = 3
+        weekday = self.value.weekday()
+        width = {3: 'abbreviated', 4: 'wide', 5: 'narrow', 6: 'short'}[num]
+        context = "stand-alone" if char == "c" else "format"
+        return get_day_names(width, context, self.locale)[weekday]
+
+    def format_day_of_year(self, num: int) -> str:
+        return self.format(self.get_day_of_year(), num)
+
+    def format_day_of_week_in_month(self) -> str:
+        return str((self.value.day - 1) // 7 + 1)
+
+    def format_period(self, char: str, num: int) -> str:
         """
         Return period from parsed datetime according to format pattern.

@@ -932,17 +1520,94 @@ class DateTimeFormat:
         :param num: count of format character

         """
-        pass
-
-    def format_frac_seconds(self, num: int) ->str:
+        widths = [{3: 'abbreviated', 4: 'wide', 5: 'narrow'}[max(3, num)],
+                  'wide', 'narrow', 'abbreviated']
+        if char == 'a':
+            period = 'pm' if self.value.hour >= 12 else 'am'
+            context = 'format'
+        else:
+            period = get_period_id(self.value, locale=self.locale)
+            context = 'format' if char == 'B' else 'stand-alone'
+        for width in widths:
+            period_names = get_period_names(context=context, width=width, locale=self.locale)
+            if period in period_names:
+                return period_names[period]
+        raise ValueError(f"Could not format period {period} in {self.locale}")
+
+    def format_frac_seconds(self, num: int) -> str:
         """ Return fractional seconds.

-        Rounds the time's microseconds to the precision given by the number         of digits passed in.
+        Rounds the time's microseconds to the precision given by the number \
+        of digits passed in.
         """
-        pass
-
-    def get_week_number(self, day_of_period: int, day_of_week: (int | None)
-        =None) ->int:
+        value = self.value.microsecond / 1000000
+        return self.format(round(value, num) * 10**num, num)
+
+    def format_milliseconds_in_day(self, num):
+        msecs = self.value.microsecond // 1000 + self.value.second * 1000 + \
+            self.value.minute * 60000 + self.value.hour * 3600000
+        return self.format(msecs, num)
+
+    def format_timezone(self, char: str, num: int) -> str:
+        width = {3: 'short', 4: 'long', 5: 'iso8601'}[max(3, num)]
+
+        # It could be that we only receive a time to format, but also have a
+        # reference date which is important to distinguish between timezone
+        # variants (summer/standard time)
+        value = self.value
+        if self.reference_date:
+            value = datetime.datetime.combine(self.reference_date, self.value)
+
+        if char == 'z':
+            return get_timezone_name(value, width, locale=self.locale)
+        elif char == 'Z':
+            if num == 5:
+                return get_timezone_gmt(value, width, locale=self.locale, return_z=True)
+            return get_timezone_gmt(value, width, locale=self.locale)
+        elif char == 'O':
+            if num == 4:
+                return get_timezone_gmt(value, width, locale=self.locale)
+        # TODO: To add support for O:1
+        elif char == 'v':
+            return get_timezone_name(value.tzinfo, width,
+                                     locale=self.locale)
+        elif char == 'V':
+            if num == 1:
+                return get_timezone_name(value.tzinfo, width,
+                                         uncommon=True, locale=self.locale)
+            elif num == 2:
+                return get_timezone_name(value.tzinfo, locale=self.locale, return_zone=True)
+            elif num == 3:
+                return get_timezone_location(value.tzinfo, locale=self.locale, return_city=True)
+            return get_timezone_location(value.tzinfo, locale=self.locale)
+        # Included additional elif condition to add support for 'Xx' in timezone format
+        elif char == 'X':
+            if num == 1:
+                return get_timezone_gmt(value, width='iso8601_short', locale=self.locale,
+                                        return_z=True)
+            elif num in (2, 4):
+                return get_timezone_gmt(value, width='short', locale=self.locale,
+                                        return_z=True)
+            elif num in (3, 5):
+                return get_timezone_gmt(value, width='iso8601', locale=self.locale,
+                                        return_z=True)
+        elif char == 'x':
+            if num == 1:
+                return get_timezone_gmt(value, width='iso8601_short', locale=self.locale)
+            elif num in (2, 4):
+                return get_timezone_gmt(value, width='short', locale=self.locale)
+            elif num in (3, 5):
+                return get_timezone_gmt(value, width='iso8601', locale=self.locale)
+
+    def format(self, value: SupportsInt, length: int) -> str:
+        return '%0*d' % (length, value)
+
+    def get_day_of_year(self, date: datetime.date | None = None) -> int:
+        if date is None:
+            date = self.value
+        return (date - date.replace(month=1, day=1)).days + 1
+
+    def get_week_number(self, day_of_period: int, day_of_week: int | None = None) -> int:
         """Return the number of the week of a day within a period. This may be
         the week number in a year or the week number in a month.

@@ -961,22 +1626,53 @@ class DateTimeFormat:
         :param day_of_week: the week day; if omitted, the week day of the
                             current date is assumed
         """
-        pass
-
-
-PATTERN_CHARS: dict[str, list[int] | None] = {'G': [1, 2, 3, 4, 5], 'y':
-    None, 'Y': None, 'u': None, 'Q': [1, 2, 3, 4, 5], 'q': [1, 2, 3, 4, 5],
-    'M': [1, 2, 3, 4, 5], 'L': [1, 2, 3, 4, 5], 'w': [1, 2], 'W': [1], 'd':
-    [1, 2], 'D': [1, 2, 3], 'F': [1], 'g': None, 'E': [1, 2, 3, 4, 5, 6],
-    'e': [1, 2, 3, 4, 5, 6], 'c': [1, 3, 4, 5, 6], 'a': [1, 2, 3, 4, 5],
-    'b': [1, 2, 3, 4, 5], 'B': [1, 2, 3, 4, 5], 'h': [1, 2], 'H': [1, 2],
-    'K': [1, 2], 'k': [1, 2], 'm': [1, 2], 's': [1, 2], 'S': None, 'A':
-    None, 'z': [1, 2, 3, 4], 'Z': [1, 2, 3, 4, 5], 'O': [1, 4], 'v': [1, 4],
-    'V': [1, 2, 3, 4], 'x': [1, 2, 3, 4, 5], 'X': [1, 2, 3, 4, 5]}
-PATTERN_CHAR_ORDER = 'GyYuUQqMLlwWdDFgEecabBChHKkjJmsSAzZOvVXx'
-
-
-def parse_pattern(pattern: (str | DateTimePattern)) ->DateTimePattern:
+        if day_of_week is None:
+            day_of_week = self.value.weekday()
+        first_day = (day_of_week - self.locale.first_week_day -
+                     day_of_period + 1) % 7
+        if first_day < 0:
+            first_day += 7
+        week_number = (day_of_period + first_day - 1) // 7
+
+        if 7 - first_day >= self.locale.min_week_days:
+            week_number += 1
+
+        if self.locale.first_week_day == 0:
+            # Correct the weeknumber in case of iso-calendar usage (first_week_day=0).
+            # If the weeknumber exceeds the maximum number of weeks for the given year
+            # we must count from zero.For example the above calculation gives week 53
+            # for 2018-12-31. By iso-calender definition 2018 has a max of 52
+            # weeks, thus the weeknumber must be 53-52=1.
+            max_weeks = datetime.date(year=self.value.year, day=28, month=12).isocalendar()[1]
+            if week_number > max_weeks:
+                week_number -= max_weeks
+
+        return week_number
+
+
+PATTERN_CHARS: dict[str, list[int] | None] = {
+    'G': [1, 2, 3, 4, 5],                                               # era
+    'y': None, 'Y': None, 'u': None,                                    # year
+    'Q': [1, 2, 3, 4, 5], 'q': [1, 2, 3, 4, 5],                         # quarter
+    'M': [1, 2, 3, 4, 5], 'L': [1, 2, 3, 4, 5],                         # month
+    'w': [1, 2], 'W': [1],                                              # week
+    'd': [1, 2], 'D': [1, 2, 3], 'F': [1], 'g': None,                   # day
+    'E': [1, 2, 3, 4, 5, 6], 'e': [1, 2, 3, 4, 5, 6], 'c': [1, 3, 4, 5, 6],  # week day
+    'a': [1, 2, 3, 4, 5], 'b': [1, 2, 3, 4, 5], 'B': [1, 2, 3, 4, 5],   # period
+    'h': [1, 2], 'H': [1, 2], 'K': [1, 2], 'k': [1, 2],                 # hour
+    'm': [1, 2],                                                        # minute
+    's': [1, 2], 'S': None, 'A': None,                                  # second
+    'z': [1, 2, 3, 4], 'Z': [1, 2, 3, 4, 5], 'O': [1, 4], 'v': [1, 4],  # zone
+    'V': [1, 2, 3, 4], 'x': [1, 2, 3, 4, 5], 'X': [1, 2, 3, 4, 5],      # zone
+}
+
+#: The pattern characters declared in the Date Field Symbol Table
+#: (https://www.unicode.org/reports/tr35/tr35-dates.html#Date_Field_Symbol_Table)
+#: in order of decreasing magnitude.
+PATTERN_CHAR_ORDER = "GyYuUQqMLlwWdDFgEecabBChHKkjJmsSAzZOvVXx"
+
+
+def parse_pattern(pattern: str | DateTimePattern) -> DateTimePattern:
     """Parse date, time, and datetime format patterns.

     >>> parse_pattern("MMMMd").format
@@ -997,10 +1693,30 @@ def parse_pattern(pattern: (str | DateTimePattern)) ->DateTimePattern:

     :param pattern: the formatting pattern to parse
     """
-    pass
+    if isinstance(pattern, DateTimePattern):
+        return pattern
+    return _cached_parse_pattern(pattern)
+
+
+@lru_cache(maxsize=1024)
+def _cached_parse_pattern(pattern: str) -> DateTimePattern:
+    result = []
+
+    for tok_type, tok_value in tokenize_pattern(pattern):
+        if tok_type == "chars":
+            result.append(tok_value.replace('%', '%%'))
+        elif tok_type == "field":
+            fieldchar, fieldnum = tok_value
+            limit = PATTERN_CHARS[fieldchar]
+            if limit and fieldnum not in limit:
+                raise ValueError(f"Invalid length for field: {fieldchar * fieldnum!r}")
+            result.append('%%(%s)s' % (fieldchar * fieldnum))
+        else:
+            raise NotImplementedError(f"Unknown token type: {tok_type}")
+    return DateTimePattern(pattern, ''.join(result))


-def tokenize_pattern(pattern: str) ->list[tuple[str, str | tuple[str, int]]]:
+def tokenize_pattern(pattern: str) -> list[tuple[str, str | tuple[str, int]]]:
     """
     Tokenize date format patterns.

@@ -1016,11 +1732,60 @@ def tokenize_pattern(pattern: str) ->list[tuple[str, str | tuple[str, int]]]:
     :type pattern: str
     :rtype: list[tuple]
     """
-    pass
+    result = []
+    quotebuf = None
+    charbuf = []
+    fieldchar = ['']
+    fieldnum = [0]
+
+    def append_chars():
+        result.append(('chars', ''.join(charbuf).replace('\0', "'")))
+        del charbuf[:]
+
+    def append_field():
+        result.append(('field', (fieldchar[0], fieldnum[0])))
+        fieldchar[0] = ''
+        fieldnum[0] = 0
+
+    for char in pattern.replace("''", '\0'):
+        if quotebuf is None:
+            if char == "'":  # quote started
+                if fieldchar[0]:
+                    append_field()
+                elif charbuf:
+                    append_chars()
+                quotebuf = []
+            elif char in PATTERN_CHARS:
+                if charbuf:
+                    append_chars()
+                if char == fieldchar[0]:
+                    fieldnum[0] += 1
+                else:
+                    if fieldchar[0]:
+                        append_field()
+                    fieldchar[0] = char
+                    fieldnum[0] = 1
+            else:
+                if fieldchar[0]:
+                    append_field()
+                charbuf.append(char)
+
+        elif quotebuf is not None:
+            if char == "'":  # end of quote
+                charbuf.extend(quotebuf)
+                quotebuf = None
+            else:  # inside quote
+                quotebuf.append(char)

+    if fieldchar[0]:
+        append_field()
+    elif charbuf:
+        append_chars()

-def untokenize_pattern(tokens: Iterable[tuple[str, str | tuple[str, int]]]
-    ) ->str:
+    return result
+
+
+def untokenize_pattern(tokens: Iterable[tuple[str, str | tuple[str, int]]]) -> str:
     """
     Turn a date format pattern token stream back into a string.

@@ -1029,37 +1794,57 @@ def untokenize_pattern(tokens: Iterable[tuple[str, str | tuple[str, int]]]
     :type tokens: Iterable[tuple]
     :rtype: str
     """
-    pass
+    output = []
+    for tok_type, tok_value in tokens:
+        if tok_type == "field":
+            output.append(tok_value[0] * tok_value[1])
+        elif tok_type == "chars":
+            if not any(ch in PATTERN_CHARS for ch in tok_value):  # No need to quote
+                output.append(tok_value)
+            else:
+                output.append("'%s'" % tok_value.replace("'", "''"))
+    return "".join(output)


-def split_interval_pattern(pattern: str) ->list[str]:
+def split_interval_pattern(pattern: str) -> list[str]:
     """
     Split an interval-describing datetime pattern into multiple pieces.

     > The pattern is then designed to be broken up into two pieces by determining the first repeating field.
     - https://www.unicode.org/reports/tr35/tr35-dates.html#intervalFormats

-    >>> split_interval_pattern(u'E d.M. – E d.M.')
-    [u'E d.M. – ', 'E d.M.']
+    >>> split_interval_pattern(u'E d.M. \u2013 E d.M.')
+    [u'E d.M. \u2013 ', 'E d.M.']
     >>> split_interval_pattern("Y 'text' Y 'more text'")
     ["Y 'text '", "Y 'more text'"]
-    >>> split_interval_pattern(u"E, MMM d – E")
-    [u'E, MMM d – ', u'E']
+    >>> split_interval_pattern(u"E, MMM d \u2013 E")
+    [u'E, MMM d \u2013 ', u'E']
     >>> split_interval_pattern("MMM d")
     ['MMM d']
     >>> split_interval_pattern("y G")
     ['y G']
-    >>> split_interval_pattern(u"MMM d – d")
-    [u'MMM d – ', u'd']
+    >>> split_interval_pattern(u"MMM d \u2013 d")
+    [u'MMM d \u2013 ', u'd']

     :param pattern: Interval pattern string
     :return: list of "subpatterns"
     """
-    pass
+
+    seen_fields = set()
+    parts = [[]]
+
+    for tok_type, tok_value in tokenize_pattern(pattern):
+        if tok_type == "field":
+            if tok_value[0] in seen_fields:  # Repeated field
+                parts.append([])
+                seen_fields.clear()
+            seen_fields.add(tok_value[0])
+        parts[-1].append((tok_type, tok_value))
+
+    return [untokenize_pattern(tokens) for tokens in parts]


-def match_skeleton(skeleton: str, options: Iterable[str],
-    allow_different_fields: bool=False) ->(str | None):
+def match_skeleton(skeleton: str, options: Iterable[str], allow_different_fields: bool = False) -> str | None:
     """
     Find the closest match for the given datetime skeleton among the options given.

@@ -1083,4 +1868,47 @@ def match_skeleton(skeleton: str, options: Iterable[str],
     :return: The closest skeleton match, or if no match was found, None.
     :rtype: str|None
     """
-    pass
+
+    # TODO: maybe implement pattern expansion?
+
+    # Based on the implementation in
+    # http://source.icu-project.org/repos/icu/icu4j/trunk/main/classes/core/src/com/ibm/icu/text/DateIntervalInfo.java
+
+    # Filter out falsy values and sort for stability; when `interval_formats` is passed in, there may be a None key.
+    options = sorted(option for option in options if option)
+
+    if 'z' in skeleton and not any('z' in option for option in options):
+        skeleton = skeleton.replace('z', 'v')
+
+    get_input_field_width = dict(t[1] for t in tokenize_pattern(skeleton) if t[0] == "field").get
+    best_skeleton = None
+    best_distance = None
+    for option in options:
+        get_opt_field_width = dict(t[1] for t in tokenize_pattern(option) if t[0] == "field").get
+        distance = 0
+        for field in PATTERN_CHARS:
+            input_width = get_input_field_width(field, 0)
+            opt_width = get_opt_field_width(field, 0)
+            if input_width == opt_width:
+                continue
+            if opt_width == 0 or input_width == 0:
+                if not allow_different_fields:  # This one is not okay
+                    option = None
+                    break
+                distance += 0x1000  # Magic weight constant for "entirely different fields"
+            elif field == 'M' and ((input_width > 2 and opt_width <= 2) or (input_width <= 2 and opt_width > 2)):
+                distance += 0x100  # Magic weight for "text turns into a number"
+            else:
+                distance += abs(input_width - opt_width)
+
+        if not option:  # We lost the option along the way (probably due to "allow_different_fields")
+            continue
+
+        if not best_skeleton or distance < best_distance:
+            best_skeleton = option
+            best_distance = distance
+
+        if distance == 0:  # Found a perfect match!
+            break
+
+    return best_skeleton
diff --git a/babel/languages.py b/babel/languages.py
index 11dd1ae..564f555 100644
--- a/babel/languages.py
+++ b/babel/languages.py
@@ -1,9 +1,9 @@
 from __future__ import annotations
+
 from babel.core import get_global


-def get_official_languages(territory: str, regional: bool=False, de_facto:
-    bool=False) ->tuple[str, ...]:
+def get_official_languages(territory: str, regional: bool = False, de_facto: bool = False) -> tuple[str, ...]:
     """
     Get the official language(s) for the given territory.

@@ -25,11 +25,25 @@ def get_official_languages(territory: str, regional: bool=False, de_facto:
     :return: Tuple of language codes
     :rtype: tuple[str]
     """
-    pass
+
+    territory = str(territory).upper()
+    allowed_stati = {"official"}
+    if regional:
+        allowed_stati.add("official_regional")
+    if de_facto:
+        allowed_stati.add("de_facto_official")
+
+    languages = get_global("territory_languages").get(territory, {})
+    pairs = [
+        (info['population_percent'], language)
+        for language, info in languages.items()
+        if info.get('official_status') in allowed_stati
+    ]
+    pairs.sort(reverse=True)
+    return tuple(lang for _, lang in pairs)


-def get_territory_language_info(territory: str) ->dict[str, dict[str, float |
-    str | None]]:
+def get_territory_language_info(territory: str) -> dict[str, dict[str, float | str | None]]:
     """
     Get a dictionary of language information for a territory.

@@ -54,4 +68,5 @@ def get_territory_language_info(territory: str) ->dict[str, dict[str, float |
     :return: Language information dictionary
     :rtype: dict[str, dict]
     """
-    pass
+    territory = str(territory).upper()
+    return get_global("territory_languages").get(territory, {}).copy()
diff --git a/babel/lists.py b/babel/lists.py
index cb84e96..a8471fd 100644
--- a/babel/lists.py
+++ b/babel/lists.py
@@ -14,24 +14,28 @@
     :license: BSD, see LICENSE for more details.
 """
 from __future__ import annotations
+
 from collections.abc import Sequence
 from typing import TYPE_CHECKING
+
 from babel.core import Locale, default_locale
+
 if TYPE_CHECKING:
     from typing_extensions import Literal
+
 DEFAULT_LOCALE = default_locale()


-def format_list(lst: Sequence[str], style: Literal['standard',
-    'standard-short', 'or', 'or-short', 'unit', 'unit-short', 'unit-narrow'
-    ]='standard', locale: (Locale | str | None)=DEFAULT_LOCALE) ->str:
+def format_list(lst: Sequence[str],
+                style: Literal['standard', 'standard-short', 'or', 'or-short', 'unit', 'unit-short', 'unit-narrow'] = 'standard',
+                locale: Locale | str | None = DEFAULT_LOCALE) -> str:
     """
     Format the items in `lst` as a list.

     >>> format_list(['apples', 'oranges', 'pears'], locale='en')
     u'apples, oranges, and pears'
     >>> format_list(['apples', 'oranges', 'pears'], locale='zh')
-    u'apples、oranges和pears'
+    u'apples\u3001oranges\u548cpears'
     >>> format_list(['omena', 'peruna', 'aplari'], style='or', locale='fi')
     u'omena, peruna tai aplari'

@@ -66,4 +70,25 @@ def format_list(lst: Sequence[str], style: Literal['standard',
     :param style: the style to format the list with. See above for description.
     :param locale: the locale
     """
-    pass
+    locale = Locale.parse(locale)
+    if not lst:
+        return ''
+    if len(lst) == 1:
+        return lst[0]
+
+    if style not in locale.list_patterns:
+        raise ValueError(
+            f'Locale {locale} does not support list formatting style {style!r} '
+            f'(supported are {sorted(locale.list_patterns)})',
+        )
+    patterns = locale.list_patterns[style]
+
+    if len(lst) == 2:
+        return patterns['2'].format(*lst)
+
+    result = patterns['start'].format(lst[0], lst[1])
+    for elem in lst[2:-1]:
+        result = patterns['middle'].format(result, elem)
+    result = patterns['end'].format(result, lst[-1])
+
+    return result
diff --git a/babel/localedata.py b/babel/localedata.py
index c54dad6..f1e8a12 100644
--- a/babel/localedata.py
+++ b/babel/localedata.py
@@ -10,7 +10,9 @@
     :copyright: (c) 2013-2023 by the Babel Team.
     :license: BSD, see LICENSE for more details.
 """
+
 from __future__ import annotations
+
 import os
 import pickle
 import re
@@ -21,41 +23,60 @@ from collections.abc import Iterator, Mapping, MutableMapping
 from functools import lru_cache
 from itertools import chain
 from typing import Any
+
 _cache: dict[str, Any] = {}
 _cache_lock = threading.RLock()
 _dirname = os.path.join(os.path.dirname(__file__), 'locale-data')
-_windows_reserved_name_re = re.compile('^(con|prn|aux|nul|com[0-9]|lpt[0-9])$',
-    re.I)
+_windows_reserved_name_re = re.compile("^(con|prn|aux|nul|com[0-9]|lpt[0-9])$", re.I)


-def normalize_locale(name: str) ->(str | None):
+def normalize_locale(name: str) -> str | None:
     """Normalize a locale ID by stripping spaces and apply proper casing.

     Returns the normalized locale ID string or `None` if the ID is not
     recognized.
     """
-    pass
+    if not name or not isinstance(name, str):
+        return None
+    name = name.strip().lower()
+    for locale_id in chain.from_iterable([_cache, locale_identifiers()]):
+        if name == locale_id.lower():
+            return locale_id


-def resolve_locale_filename(name: (os.PathLike[str] | str)) ->str:
+def resolve_locale_filename(name: os.PathLike[str] | str) -> str:
     """
     Resolve a locale identifier to a `.dat` path on disk.
     """
-    pass

+    # Clean up any possible relative paths.
+    name = os.path.basename(name)
+
+    # Ensure we're not left with one of the Windows reserved names.
+    if sys.platform == "win32" and _windows_reserved_name_re.match(os.path.splitext(name)[0]):
+        raise ValueError(f"Name {name} is invalid on Windows")
+
+    # Build the path.
+    return os.path.join(_dirname, f"{name}.dat")

-def exists(name: str) ->bool:
+
+def exists(name: str) -> bool:
     """Check whether locale data is available for the given locale.

     Returns `True` if it exists, `False` otherwise.

     :param name: the locale identifier string
     """
-    pass
+    if not name or not isinstance(name, str):
+        return False
+    if name in _cache:
+        return True
+    file_found = os.path.exists(resolve_locale_filename(name))
+    return True if file_found else bool(normalize_locale(name))


 @lru_cache(maxsize=None)
-def locale_identifiers() ->list[str]:
+def locale_identifiers() -> list[str]:
     """Return a list of all locale identifiers for which locale data is
     available.

@@ -66,11 +87,15 @@ def locale_identifiers() ->list[str]:

     :return: a list of locale identifiers (strings)
     """
-    pass
+    return [
+        stem
+        for stem, extension in
+        (os.path.splitext(filename) for filename in os.listdir(_dirname))
+        if extension == '.dat' and stem != 'root'
+    ]


-def load(name: (os.PathLike[str] | str), merge_inherited: bool=True) ->dict[
-    str, Any]:
+def load(name: os.PathLike[str] | str, merge_inherited: bool = True) -> dict[str, Any]:
     """Load the locale data for the given locale.

     The locale data is a dictionary that contains much of the data defined by
@@ -95,10 +120,34 @@ def load(name: (os.PathLike[str] | str), merge_inherited: bool=True) ->dict[
     :raise `IOError`: if no locale data file is found for the given locale
                       identifier, or one of the locales it inherits from
     """
-    pass
-
-
-def merge(dict1: MutableMapping[Any, Any], dict2: Mapping[Any, Any]) ->None:
+    name = os.path.basename(name)
+    _cache_lock.acquire()
+    try:
+        data = _cache.get(name)
+        if not data:
+            # Load inherited data
+            if name == 'root' or not merge_inherited:
+                data = {}
+            else:
+                from babel.core import get_global
+                parent = get_global('parent_exceptions').get(name)
+                if not parent:
+                    parts = name.split('_')
+                    parent = "root" if len(parts) == 1 else "_".join(parts[:-1])
+                data = load(parent).copy()
+            filename = resolve_locale_filename(name)
+            with open(filename, 'rb') as fileobj:
+                if name != 'root' and merge_inherited:
+                    merge(data, pickle.load(fileobj))
+                else:
+                    data = pickle.load(fileobj)
+            _cache[name] = data
+        return data
+    finally:
+        _cache_lock.release()
+
+
+def merge(dict1: MutableMapping[Any, Any], dict2: Mapping[Any, Any]) -> None:
     """Merge the data from `dict2` into the `dict1` dictionary, making copies
     of nested dictionaries.

@@ -110,7 +159,25 @@ def merge(dict1: MutableMapping[Any, Any], dict2: Mapping[Any, Any]) ->None:
     :param dict1: the dictionary to merge into
     :param dict2: the dictionary containing the data that should be merged
     """
-    pass
+    for key, val2 in dict2.items():
+        if val2 is not None:
+            val1 = dict1.get(key)
+            if isinstance(val2, dict):
+                if val1 is None:
+                    val1 = {}
+                if isinstance(val1, Alias):
+                    val1 = (val1, val2)
+                elif isinstance(val1, tuple):
+                    alias, others = val1
+                    others = others.copy()
+                    merge(others, val2)
+                    val1 = (alias, others)
+                else:
+                    val1 = val1.copy()
+                    merge(val1, val2)
+            else:
+                val1 = val2
+            dict1[key] = val1


 class Alias:
@@ -120,14 +187,13 @@ class Alias:
     as specified by the `keys`.
     """

-    def __init__(self, keys: tuple[str, ...]) ->None:
+    def __init__(self, keys: tuple[str, ...]) -> None:
         self.keys = tuple(keys)

-    def __repr__(self) ->str:
-        return f'<{type(self).__name__} {self.keys!r}>'
+    def __repr__(self) -> str:
+        return f"<{type(self).__name__} {self.keys!r}>"

-    def resolve(self, data: Mapping[str | int | None, Any]) ->Mapping[str |
-        int | None, Any]:
+    def resolve(self, data: Mapping[str | int | None, Any]) -> Mapping[str | int | None, Any]:
         """Resolve the alias based on the given data.

         This is done recursively, so if one alias resolves to a second alias,
@@ -136,7 +202,15 @@ class Alias:
         :param data: the locale data
         :type data: `dict`
         """
-        pass
+        base = data
+        for key in self.keys:
+            data = data[key]
+        if isinstance(data, Alias):
+            data = data.resolve(base)
+        elif isinstance(data, tuple):
+            alias, others = data
+            data = alias.resolve(base)
+        return data


 class LocaleDataDict(abc.MutableMapping):
@@ -144,35 +218,37 @@ class LocaleDataDict(abc.MutableMapping):
     values.
     """

-    def __init__(self, data: MutableMapping[str | int | None, Any], base: (
-        Mapping[str | int | None, Any] | None)=None):
+    def __init__(self, data: MutableMapping[str | int | None, Any], base: Mapping[str | int | None, Any] | None = None):
         self._data = data
         if base is None:
             base = data
         self.base = base

-    def __len__(self) ->int:
+    def __len__(self) -> int:
         return len(self._data)

-    def __iter__(self) ->Iterator[str | int | None]:
+    def __iter__(self) -> Iterator[str | int | None]:
         return iter(self._data)

-    def __getitem__(self, key: (str | int | None)) ->Any:
+    def __getitem__(self, key: str | int | None) -> Any:
         orig = val = self._data[key]
-        if isinstance(val, Alias):
+        if isinstance(val, Alias):  # resolve an alias
             val = val.resolve(self.base)
-        if isinstance(val, tuple):
+        if isinstance(val, tuple):  # Merge a partial dict with an alias
             alias, others = val
             val = alias.resolve(self.base).copy()
             merge(val, others)
-        if isinstance(val, dict):
+        if isinstance(val, dict):  # Return a nested alias-resolving dict
             val = LocaleDataDict(val, base=self.base)
         if val is not orig:
             self._data[key] = val
         return val

-    def __setitem__(self, key: (str | int | None), value: Any) ->None:
+    def __setitem__(self, key: str | int | None, value: Any) -> None:
         self._data[key] = value

-    def __delitem__(self, key: (str | int | None)) ->None:
+    def __delitem__(self, key: str | int | None) -> None:
         del self._data[key]
+
+    def copy(self) -> LocaleDataDict:
+        return LocaleDataDict(self._data.copy(), base=self.base)
diff --git a/babel/localtime/_fallback.py b/babel/localtime/_fallback.py
index b8b4fdf..14979a5 100644
--- a/babel/localtime/_fallback.py
+++ b/babel/localtime/_fallback.py
@@ -7,14 +7,38 @@
     :copyright: (c) 2013-2023 by the Babel Team.
     :license: BSD, see LICENSE for more details.
 """
+
 import datetime
 import time
+
 STDOFFSET = datetime.timedelta(seconds=-time.timezone)
-DSTOFFSET = datetime.timedelta(seconds=-time.altzone
-    ) if time.daylight else STDOFFSET
+DSTOFFSET = datetime.timedelta(seconds=-time.altzone) if time.daylight else STDOFFSET
+
 DSTDIFF = DSTOFFSET - STDOFFSET
 ZERO = datetime.timedelta(0)


 class _FallbackLocalTimezone(datetime.tzinfo):
-    pass
+
+    def utcoffset(self, dt: datetime.datetime) -> datetime.timedelta:
+        if self._isdst(dt):
+            return DSTOFFSET
+        else:
+            return STDOFFSET
+
+    def dst(self, dt: datetime.datetime) -> datetime.timedelta:
+        if self._isdst(dt):
+            return DSTDIFF
+        else:
+            return ZERO
+
+    def tzname(self, dt: datetime.datetime) -> str:
+        return time.tzname[self._isdst(dt)]
+
+    def _isdst(self, dt: datetime.datetime) -> bool:
+        tt = (dt.year, dt.month, dt.day,
+              dt.hour, dt.minute, dt.second,
+              dt.weekday(), 0, -1)
+        stamp = time.mktime(tt)
+        tt = time.localtime(stamp)
+        return tt.tm_isdst > 0
diff --git a/babel/localtime/_helpers.py b/babel/localtime/_helpers.py
index 821636c..f27b315 100644
--- a/babel/localtime/_helpers.py
+++ b/babel/localtime/_helpers.py
@@ -11,4 +11,33 @@ def _get_tzinfo(tzenv: str):
     :param tzenv: timezone in the form of Continent/City
     :return: tzinfo object or None if not found
     """
-    pass
+    if pytz:
+        try:
+            return pytz.timezone(tzenv)
+        except pytz.UnknownTimeZoneError:
+            pass
+    else:
+        try:
+            return zoneinfo.ZoneInfo(tzenv)
+        except zoneinfo.ZoneInfoNotFoundError:
+            pass
+
+    return None
+
+
+def _get_tzinfo_or_raise(tzenv: str):
+    tzinfo = _get_tzinfo(tzenv)
+    if tzinfo is None:
+        raise LookupError(
+            f"Can not find timezone {tzenv}. \n"
+            "Timezone names are generally in the form `Continent/City`.",
+        )
+    return tzinfo
+
+
+def _get_tzinfo_from_file(tzfilename: str):
+    with open(tzfilename, 'rb') as tzfile:
+        if pytz:
+            return pytz.tzfile.build_tzinfo('local', tzfile)
+        else:
+            return zoneinfo.ZoneInfo.from_file(tzfile)
diff --git a/babel/localtime/_unix.py b/babel/localtime/_unix.py
index dd709a3..eb81beb 100644
--- a/babel/localtime/_unix.py
+++ b/babel/localtime/_unix.py
@@ -1,10 +1,27 @@
 import datetime
 import os
 import re
-from babel.localtime._helpers import _get_tzinfo, _get_tzinfo_from_file, _get_tzinfo_or_raise

+from babel.localtime._helpers import (
+    _get_tzinfo,
+    _get_tzinfo_from_file,
+    _get_tzinfo_or_raise,
+)

-def _get_localzone(_root: str='/') ->datetime.tzinfo:
+
+def _tz_from_env(tzenv: str) -> datetime.tzinfo:
+    if tzenv[0] == ':':
+        tzenv = tzenv[1:]
+
+    # TZ specifies a file
+    if os.path.exists(tzenv):
+        return _get_tzinfo_from_file(tzenv)
+
+    # TZ specifies a zoneinfo zone.
+    return _get_tzinfo_or_raise(tzenv)
+
+
+def _get_localzone(_root: str = '/') -> datetime.tzinfo:
     """Tries to find the local timezone configuration.
     This method prefers finding the timezone name and passing that to
     zoneinfo or pytz, over passing in the localtime file, as in the later
@@ -13,4 +30,69 @@ def _get_localzone(_root: str='/') ->datetime.tzinfo:
     beneath the _root directory. This is primarily used by the tests.
     In normal usage you call the function without parameters.
     """
-    pass
+
+    tzenv = os.environ.get('TZ')
+    if tzenv:
+        return _tz_from_env(tzenv)
+
+    # This is actually a pretty reliable way to test for the local time
+    # zone on operating systems like OS X.  On OS X especially this is the
+    # only one that actually works.
+    try:
+        link_dst = os.readlink('/etc/localtime')
+    except OSError:
+        pass
+    else:
+        pos = link_dst.find('/zoneinfo/')
+        if pos >= 0:
+            zone_name = link_dst[pos + 10:]
+            tzinfo = _get_tzinfo(zone_name)
+            if tzinfo is not None:
+                return tzinfo
+
+    # Now look for distribution specific configuration files
+    # that contain the timezone name.
+    tzpath = os.path.join(_root, 'etc/timezone')
+    if os.path.exists(tzpath):
+        with open(tzpath, 'rb') as tzfile:
+            data = tzfile.read()
+
+            # Issue #3 in tzlocal was that /etc/timezone was a zoneinfo file.
+            # That's a misconfiguration, but we need to handle it gracefully:
+            if data[:5] != b'TZif2':
+                etctz = data.strip().decode()
+                # Get rid of host definitions and comments:
+                if ' ' in etctz:
+                    etctz, dummy = etctz.split(' ', 1)
+                if '#' in etctz:
+                    etctz, dummy = etctz.split('#', 1)
+
+                return _get_tzinfo_or_raise(etctz.replace(' ', '_'))
+
+    # CentOS has a ZONE setting in /etc/sysconfig/clock,
+    # OpenSUSE has a TIMEZONE setting in /etc/sysconfig/clock and
+    # Gentoo has a TIMEZONE setting in /etc/conf.d/clock
+    # We look through these files for a timezone:
+    timezone_re = re.compile(r'\s*(TIME)?ZONE\s*=\s*"(?P<etctz>.+)"')
+
+    for filename in ('etc/sysconfig/clock', 'etc/conf.d/clock'):
+        tzpath = os.path.join(_root, filename)
+        if not os.path.exists(tzpath):
+            continue
+        with open(tzpath) as tzfile:
+            for line in tzfile:
+                match = timezone_re.match(line)
+                if match is not None:
+                    # We found a timezone
+                    etctz = match.group("etctz")
+                    return _get_tzinfo_or_raise(etctz.replace(' ', '_'))
+
+    # No explicit setting existed. Use localtime
+    for filename in ('etc/localtime', 'usr/local/etc/localtime'):
+        tzpath = os.path.join(_root, filename)
+
+        if not os.path.exists(tzpath):
+            continue
+        return _get_tzinfo_from_file(tzpath)
+
+    raise LookupError('Can not find any timezone configuration')
diff --git a/babel/localtime/_win32.py b/babel/localtime/_win32.py
index 323c08d..1a52567 100644
--- a/babel/localtime/_win32.py
+++ b/babel/localtime/_win32.py
@@ -1,19 +1,98 @@
 from __future__ import annotations
+
 try:
     import winreg
 except ImportError:
     winreg = None
+
 import datetime
 from typing import Any, Dict, cast
+
 from babel.core import get_global
 from babel.localtime._helpers import _get_tzinfo_or_raise
+
+# When building the cldr data on windows this module gets imported.
+# Because at that point there is no global.dat yet this call will
+# fail.  We want to catch it down in that case then and just assume
+# the mapping was empty.
 try:
-    tz_names: dict[str, str] = cast(Dict[str, str], get_global(
-        'windows_zone_mapping'))
+    tz_names: dict[str, str] = cast(Dict[str, str], get_global('windows_zone_mapping'))
 except RuntimeError:
     tz_names = {}


-def valuestodict(key) ->dict[str, Any]:
+def valuestodict(key) -> dict[str, Any]:
     """Convert a registry key's values to a dictionary."""
-    pass
+    dict = {}
+    size = winreg.QueryInfoKey(key)[1]
+    for i in range(size):
+        data = winreg.EnumValue(key, i)
+        dict[data[0]] = data[1]
+    return dict
+
+
+def get_localzone_name() -> str:
+    # Windows is special. It has unique time zone names (in several
+    # meanings of the word) available, but unfortunately, they can be
+    # translated to the language of the operating system, so we need to
+    # do a backwards lookup, by going through all time zones and see which
+    # one matches.
+    handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
+
+    TZLOCALKEYNAME = r'SYSTEM\CurrentControlSet\Control\TimeZoneInformation'
+    localtz = winreg.OpenKey(handle, TZLOCALKEYNAME)
+    keyvalues = valuestodict(localtz)
+    localtz.Close()
+    if 'TimeZoneKeyName' in keyvalues:
+        # Windows 7 (and Vista?)
+
+        # For some reason this returns a string with loads of NUL bytes at
+        # least on some systems. I don't know if this is a bug somewhere, I
+        # just work around it.
+        tzkeyname = keyvalues['TimeZoneKeyName'].split('\x00', 1)[0]
+    else:
+        # Windows 2000 or XP
+
+        # This is the localized name:
+        tzwin = keyvalues['StandardName']
+
+        # Open the list of timezones to look up the real name:
+        TZKEYNAME = r'SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones'
+        tzkey = winreg.OpenKey(handle, TZKEYNAME)
+
+        # Now, match this value to Time Zone information
+        tzkeyname = None
+        for i in range(winreg.QueryInfoKey(tzkey)[0]):
+            subkey = winreg.EnumKey(tzkey, i)
+            sub = winreg.OpenKey(tzkey, subkey)
+            data = valuestodict(sub)
+            sub.Close()
+            if data.get('Std', None) == tzwin:
+                tzkeyname = subkey
+                break
+
+        tzkey.Close()
+        handle.Close()
+
+    if tzkeyname is None:
+        raise LookupError('Can not find Windows timezone configuration')
+
+    timezone = tz_names.get(tzkeyname)
+    if timezone is None:
+        # Nope, that didn't work. Try adding 'Standard Time',
+        # it seems to work a lot of times:
+        timezone = tz_names.get(f"{tzkeyname} Standard Time")
+
+    # Return what we have.
+    if timezone is None:
+        raise LookupError(f"Can not find timezone {tzkeyname}")
+
+    return timezone
+
+
+def _get_localzone() -> datetime.tzinfo:
+    if winreg is None:
+        raise LookupError(
+            'Runtime support not available')
+
+    return _get_tzinfo_or_raise(get_localzone_name())
diff --git a/babel/messages/catalog.py b/babel/messages/catalog.py
index 27256d3..41adfae 100644
--- a/babel/messages/catalog.py
+++ b/babel/messages/catalog.py
@@ -8,6 +8,7 @@
     :license: BSD, see LICENSE for more details.
 """
 from __future__ import annotations
+
 import datetime
 import re
 from collections import OrderedDict
@@ -17,16 +18,19 @@ from difflib import SequenceMatcher
 from email import message_from_string
 from heapq import nlargest
 from typing import TYPE_CHECKING
+
 from babel import __version__ as VERSION
 from babel.core import Locale, UnknownLocaleError
 from babel.dates import format_datetime
 from babel.messages.plurals import get_plural
 from babel.util import LOCALTZ, FixedOffsetTimezone, _cmp, distinct
+
 if TYPE_CHECKING:
     from typing_extensions import TypeAlias
+
     _MessageID: TypeAlias = str | tuple[str, ...] | list[str]
-__all__ = ['Message', 'Catalog', 'TranslationError']

+__all__ = ['Message', 'Catalog', 'TranslationError']

 def get_close_matches(word, possibilities, n=3, cutoff=0.6):
     """A modified version of ``difflib.get_close_matches``.
@@ -34,31 +38,83 @@ def get_close_matches(word, possibilities, n=3, cutoff=0.6):
     It just passes ``autojunk=False`` to the ``SequenceMatcher``, to work
     around https://github.com/python/cpython/issues/90825.
     """
-    pass
-
-
-PYTHON_FORMAT = re.compile(
-    """
-    \\%
-        (?:\\(([\\w]*)\\))?
+    if not n > 0:  # pragma: no cover
+        raise ValueError(f"n must be > 0: {n!r}")
+    if not 0.0 <= cutoff <= 1.0:  # pragma: no cover
+        raise ValueError(f"cutoff must be in [0.0, 1.0]: {cutoff!r}")
+    result = []
+    s = SequenceMatcher(autojunk=False) # only line changed from difflib.py
+    s.set_seq2(word)
+    for x in possibilities:
+        s.set_seq1(x)
+        if s.real_quick_ratio() >= cutoff and \
+           s.quick_ratio() >= cutoff and \
+           s.ratio() >= cutoff:
+            result.append((s.ratio(), x))
+
+    # Move the best scorers to head of list
+    result = nlargest(n, result)
+    # Strip scores for the best n matches
+    return [x for score, x in result]
+
+
+PYTHON_FORMAT = re.compile(r'''
+    \%
+        (?:\(([\w]*)\))?
         (
-            [-#0\\ +]?(?:\\*|[\\d]+)?
-            (?:\\.(?:\\*|[\\d]+))?
+            [-#0\ +]?(?:\*|[\d]+)?
+            (?:\.(?:\*|[\d]+))?
             [hlL]?
         )
         ([diouxXeEfFgGcrs%])
-"""
-    , re.VERBOSE)
+''', re.VERBOSE)
+
+
+def _parse_datetime_header(value: str) -> datetime.datetime:
+    match = re.match(r'^(?P<datetime>.*?)(?P<tzoffset>[+-]\d{4})?$', value)
+
+    dt = datetime.datetime.strptime(match.group('datetime'), '%Y-%m-%d %H:%M')
+
+    # Separate the offset into a sign component, hours, and # minutes
+    tzoffset = match.group('tzoffset')
+    if tzoffset is not None:
+        plus_minus_s, rest = tzoffset[0], tzoffset[1:]
+        hours_offset_s, mins_offset_s = rest[:2], rest[2:]
+
+        # Make them all integers
+        plus_minus = int(f"{plus_minus_s}1")
+        hours_offset = int(hours_offset_s)
+        mins_offset = int(mins_offset_s)
+
+        # Calculate net offset
+        net_mins_offset = hours_offset * 60
+        net_mins_offset += mins_offset
+        net_mins_offset *= plus_minus
+
+        # Create an offset object
+        tzoffset = FixedOffsetTimezone(net_mins_offset)
+
+        # Store the offset in a datetime object
+        dt = dt.replace(tzinfo=tzoffset)
+
+    return dt


 class Message:
     """Representation of a single message in a catalog."""

-    def __init__(self, id: _MessageID, string: (_MessageID | None)='',
-        locations: Iterable[tuple[str, int]]=(), flags: Iterable[str]=(),
-        auto_comments: Iterable[str]=(), user_comments: Iterable[str]=(),
-        previous_id: _MessageID=(), lineno: (int | None)=None, context: (
-        str | None)=None) ->None:
+    def __init__(
+        self,
+        id: _MessageID,
+        string: _MessageID | None = '',
+        locations: Iterable[tuple[str, int]] = (),
+        flags: Iterable[str] = (),
+        auto_comments: Iterable[str] = (),
+        user_comments: Iterable[str] = (),
+        previous_id: _MessageID = (),
+        lineno: int | None = None,
+        context: str | None = None,
+    ) -> None:
         """Create the message object.

         :param id: the message ID, or a ``(singular, plural)`` tuple for
@@ -77,7 +133,7 @@ class Message:
         """
         self.id = id
         if not string and self.pluralizable:
-            string = '', ''
+            string = ('', '')
         self.string = string
         self.locations = list(distinct(locations))
         self.flags = set(flags)
@@ -94,45 +150,49 @@ class Message:
         self.lineno = lineno
         self.context = context

-    def __repr__(self) ->str:
-        return (
-            f'<{type(self).__name__} {self.id!r} (flags: {list(self.flags)!r})>'
-            )
+    def __repr__(self) -> str:
+        return f"<{type(self).__name__} {self.id!r} (flags: {list(self.flags)!r})>"

-    def __cmp__(self, other: object) ->int:
+    def __cmp__(self, other: object) -> int:
         """Compare Messages, taking into account plural ids"""
-
         def values_to_compare(obj):
             if isinstance(obj, Message) and obj.pluralizable:
                 return obj.id[0], obj.context or ''
             return obj.id, obj.context or ''
         return _cmp(values_to_compare(self), values_to_compare(other))

-    def __gt__(self, other: object) ->bool:
+    def __gt__(self, other: object) -> bool:
         return self.__cmp__(other) > 0

-    def __lt__(self, other: object) ->bool:
+    def __lt__(self, other: object) -> bool:
         return self.__cmp__(other) < 0

-    def __ge__(self, other: object) ->bool:
+    def __ge__(self, other: object) -> bool:
         return self.__cmp__(other) >= 0

-    def __le__(self, other: object) ->bool:
+    def __le__(self, other: object) -> bool:
         return self.__cmp__(other) <= 0

-    def __eq__(self, other: object) ->bool:
+    def __eq__(self, other: object) -> bool:
         return self.__cmp__(other) == 0

-    def __ne__(self, other: object) ->bool:
+    def __ne__(self, other: object) -> bool:
         return self.__cmp__(other) != 0

-    def is_identical(self, other: Message) ->bool:
+    def is_identical(self, other: Message) -> bool:
         """Checks whether messages are identical, taking into account all
         properties.
         """
-        pass
+        assert isinstance(other, Message)
+        return self.__dict__ == other.__dict__
+
+    def clone(self) -> Message:
+        return Message(*map(copy, (self.id, self.string, self.locations,
+                                   self.flags, self.auto_comments,
+                                   self.user_comments, self.previous_id,
+                                   self.lineno, self.context)))

-    def check(self, catalog: (Catalog | None)=None) ->list[TranslationError]:
+    def check(self, catalog: Catalog | None = None) -> list[TranslationError]:
         """Run various validation checks on the message.  Some validations
         are only performed if the catalog is provided.  This method returns
         a sequence of `TranslationError` objects.
@@ -142,10 +202,17 @@ class Message:
         :see: `Catalog.check` for a way to perform checks for all messages
               in a catalog.
         """
-        pass
+        from babel.messages.checkers import checkers
+        errors: list[TranslationError] = []
+        for checker in checkers:
+            try:
+                checker(catalog, self)
+            except TranslationError as e:
+                errors.append(e)
+        return errors

     @property
-    def fuzzy(self) ->bool:
+    def fuzzy(self) -> bool:
         """Whether the translation is fuzzy.

         >>> Message('foo').fuzzy
@@ -157,10 +224,10 @@ class Message:
         <Message 'foo' (flags: ['fuzzy'])>

         :type:  `bool`"""
-        pass
+        return 'fuzzy' in self.flags

     @property
-    def pluralizable(self) ->bool:
+    def pluralizable(self) -> bool:
         """Whether the message is plurizable.

         >>> Message('foo').pluralizable
@@ -169,10 +236,10 @@ class Message:
         True

         :type:  `bool`"""
-        pass
+        return isinstance(self.id, (list, tuple))

     @property
-    def python_format(self) ->bool:
+    def python_format(self) -> bool:
         """Whether the message contains Python-style parameters.

         >>> Message('foo %(name)s bar').python_format
@@ -181,7 +248,10 @@ class Message:
         True

         :type:  `bool`"""
-        pass
+        ids = self.id
+        if not isinstance(ids, (list, tuple)):
+            ids = [ids]
+        return any(PYTHON_FORMAT.search(id) for id in ids)


 class TranslationError(Exception):
@@ -189,24 +259,41 @@ class TranslationError(Exception):
     translations are encountered."""


-DEFAULT_HEADER = """# Translations template for PROJECT.
+DEFAULT_HEADER = """\
+# Translations template for PROJECT.
 # Copyright (C) YEAR ORGANIZATION
 # This file is distributed under the same license as the PROJECT project.
 # FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
 #"""


+def parse_separated_header(value: str) -> dict[str, str]:
+    # Adapted from https://peps.python.org/pep-0594/#cgi
+    from email.message import Message
+    m = Message()
+    m['content-type'] = value
+    return dict(m.get_params())
+
+
 class Catalog:
     """Representation of a message catalog."""

-    def __init__(self, locale: (str | Locale | None)=None, domain: (str |
-        None)=None, header_comment: (str | None)=DEFAULT_HEADER, project: (
-        str | None)=None, version: (str | None)=None, copyright_holder: (
-        str | None)=None, msgid_bugs_address: (str | None)=None,
-        creation_date: (datetime.datetime | str | None)=None, revision_date:
-        (datetime.datetime | datetime.time | float | str | None)=None,
-        last_translator: (str | None)=None, language_team: (str | None)=
-        None, charset: (str | None)=None, fuzzy: bool=True) ->None:
+    def __init__(
+        self,
+        locale: str | Locale | None = None,
+        domain: str | None = None,
+        header_comment: str | None = DEFAULT_HEADER,
+        project: str | None = None,
+        version: str | None = None,
+        copyright_holder: str | None = None,
+        msgid_bugs_address: str | None = None,
+        creation_date: datetime.datetime | str | None = None,
+        revision_date: datetime.datetime | datetime.time | float | str | None = None,
+        last_translator: str | None = None,
+        language_team: str | None = None,
+        charset: str | None = None,
+        fuzzy: bool = True,
+    ) -> None:
         """Initialize the catalog object.

         :param locale: the locale identifier or `Locale` object, or `None`
@@ -230,39 +317,86 @@ class Catalog:
         self.domain = domain
         self.locale = locale
         self._header_comment = header_comment
-        self._messages: OrderedDict[str | tuple[str, str], Message
-            ] = OrderedDict()
+        self._messages: OrderedDict[str | tuple[str, str], Message] = OrderedDict()
+
         self.project = project or 'PROJECT'
         self.version = version or 'VERSION'
         self.copyright_holder = copyright_holder or 'ORGANIZATION'
         self.msgid_bugs_address = msgid_bugs_address or 'EMAIL@ADDRESS'
+
         self.last_translator = last_translator or 'FULL NAME <EMAIL@ADDRESS>'
         """Name and email address of the last translator."""
         self.language_team = language_team or 'LANGUAGE <LL@li.org>'
         """Name and email address of the language team."""
+
         self.charset = charset or 'utf-8'
+
         if creation_date is None:
             creation_date = datetime.datetime.now(LOCALTZ)
-        elif isinstance(creation_date, datetime.datetime
-            ) and not creation_date.tzinfo:
+        elif isinstance(creation_date, datetime.datetime) and not creation_date.tzinfo:
             creation_date = creation_date.replace(tzinfo=LOCALTZ)
         self.creation_date = creation_date
         if revision_date is None:
             revision_date = 'YEAR-MO-DA HO:MI+ZONE'
-        elif isinstance(revision_date, datetime.datetime
-            ) and not revision_date.tzinfo:
+        elif isinstance(revision_date, datetime.datetime) and not revision_date.tzinfo:
             revision_date = revision_date.replace(tzinfo=LOCALTZ)
         self.revision_date = revision_date
         self.fuzzy = fuzzy
-        self.obsolete: OrderedDict[str | tuple[str, str], Message
-            ] = OrderedDict()
+
+        # Dictionary of obsolete messages
+        self.obsolete: OrderedDict[str | tuple[str, str], Message] = OrderedDict()
         self._num_plurals = None
         self._plural_expr = None
+
+    def _set_locale(self, locale: Locale | str | None) -> None:
+        if locale is None:
+            self._locale_identifier = None
+            self._locale = None
+            return
+
+        if isinstance(locale, Locale):
+            self._locale_identifier = str(locale)
+            self._locale = locale
+            return
+
+        if isinstance(locale, str):
+            self._locale_identifier = str(locale)
+            try:
+                self._locale = Locale.parse(locale)
+            except UnknownLocaleError:
+                self._locale = None
+            return
+
+        raise TypeError(f"`locale` must be a Locale, a locale identifier string, or None; got {locale!r}")
+
+    def _get_locale(self) -> Locale | None:
+        return self._locale
+
+    def _get_locale_identifier(self) -> str | None:
+        return self._locale_identifier
+
     locale = property(_get_locale, _set_locale)
     locale_identifier = property(_get_locale_identifier)
-    header_comment = property(_get_header_comment, _set_header_comment, doc
-        =
-        """    The header comment for the catalog.
+
+    def _get_header_comment(self) -> str:
+        comment = self._header_comment
+        year = datetime.datetime.now(LOCALTZ).strftime('%Y')
+        if hasattr(self.revision_date, 'strftime'):
+            year = self.revision_date.strftime('%Y')
+        comment = comment.replace('PROJECT', self.project) \
+                         .replace('VERSION', self.version) \
+                         .replace('YEAR', year) \
+                         .replace('ORGANIZATION', self.copyright_holder)
+        locale_name = (self.locale.english_name if self.locale else self.locale_identifier)
+        if locale_name:
+            comment = comment.replace("Translations template", f"{locale_name} translations")
+        return comment
+
+    def _set_header_comment(self, string: str | None) -> None:
+        self._header_comment = string
+
+    header_comment = property(_get_header_comment, _set_header_comment, doc="""\
+    The header comment for the catalog.

     >>> catalog = Catalog(project='Foobar', version='1.0',
     ...                   copyright_holder='Foo Company')
@@ -292,10 +426,79 @@ class Catalog:
     #

     :type: `unicode`
-    """
-        )
-    mime_headers = property(_get_mime_headers, _set_mime_headers, doc=
-        """    The MIME headers of the catalog, used for the special ``msgid ""`` entry.
+    """)
+
+    def _get_mime_headers(self) -> list[tuple[str, str]]:
+        headers: list[tuple[str, str]] = []
+        headers.append(("Project-Id-Version", f"{self.project} {self.version}"))
+        headers.append(('Report-Msgid-Bugs-To', self.msgid_bugs_address))
+        headers.append(('POT-Creation-Date',
+                        format_datetime(self.creation_date, 'yyyy-MM-dd HH:mmZ',
+                                        locale='en')))
+        if isinstance(self.revision_date, (datetime.datetime, datetime.time, int, float)):
+            headers.append(('PO-Revision-Date',
+                            format_datetime(self.revision_date,
+                                            'yyyy-MM-dd HH:mmZ', locale='en')))
+        else:
+            headers.append(('PO-Revision-Date', self.revision_date))
+        headers.append(('Last-Translator', self.last_translator))
+        if self.locale_identifier:
+            headers.append(('Language', str(self.locale_identifier)))
+        if self.locale_identifier and ('LANGUAGE' in self.language_team):
+            headers.append(('Language-Team',
+                            self.language_team.replace('LANGUAGE',
+                                                       str(self.locale_identifier))))
+        else:
+            headers.append(('Language-Team', self.language_team))
+        if self.locale is not None:
+            headers.append(('Plural-Forms', self.plural_forms))
+        headers.append(('MIME-Version', '1.0'))
+        headers.append(("Content-Type", f"text/plain; charset={self.charset}"))
+        headers.append(('Content-Transfer-Encoding', '8bit'))
+        headers.append(("Generated-By", f"Babel {VERSION}\n"))
+        return headers
+
+    def _force_text(self, s: str | bytes, encoding: str = 'utf-8', errors: str = 'strict') -> str:
+        if isinstance(s, str):
+            return s
+        if isinstance(s, bytes):
+            return s.decode(encoding, errors)
+        return str(s)
+
+    def _set_mime_headers(self, headers: Iterable[tuple[str, str]]) -> None:
+        for name, value in headers:
+            name = self._force_text(name.lower(), encoding=self.charset)
+            value = self._force_text(value, encoding=self.charset)
+            if name == 'project-id-version':
+                parts = value.split(' ')
+                self.project = ' '.join(parts[:-1])
+                self.version = parts[-1]
+            elif name == 'report-msgid-bugs-to':
+                self.msgid_bugs_address = value
+            elif name == 'last-translator':
+                self.last_translator = value
+            elif name == 'language':
+                value = value.replace('-', '_')
+                self._set_locale(value)
+            elif name == 'language-team':
+                self.language_team = value
+            elif name == 'content-type':
+                params = parse_separated_header(value)
+                if 'charset' in params:
+                    self.charset = params['charset'].lower()
+            elif name == 'plural-forms':
+                params = parse_separated_header(f" ;{value}")
+                self._num_plurals = int(params.get('nplurals', 2))
+                self._plural_expr = params.get('plural', '(n != 1)')
+            elif name == 'pot-creation-date':
+                self.creation_date = _parse_datetime_header(value)
+            elif name == 'po-revision-date':
+                # Keep the value if it's not the default one
+                if 'YEAR' not in value:
+                    self.revision_date = _parse_datetime_header(value)
+
+    mime_headers = property(_get_mime_headers, _set_mime_headers, doc="""\
+    The MIME headers of the catalog, used for the special ``msgid ""`` entry.

     The behavior of this property changes slightly depending on whether a locale
     is set or not, the latter indicating that the catalog is actually a template
@@ -344,11 +547,10 @@ class Catalog:
     Generated-By: Babel ...

     :type: `list`
-    """
-        )
+    """)

     @property
-    def num_plurals(self) ->int:
+    def num_plurals(self) -> int:
         """The number of plurals used by the catalog or locale.

         >>> Catalog(locale='en').num_plurals
@@ -357,10 +559,15 @@ class Catalog:
         5

         :type: `int`"""
-        pass
+        if self._num_plurals is None:
+            num = 2
+            if self.locale:
+                num = get_plural(self.locale)[0]
+            self._num_plurals = num
+        return self._num_plurals

     @property
-    def plural_expr(self) ->str:
+    def plural_expr(self) -> str:
         """The plural expression used by the catalog or locale.

         >>> Catalog(locale='en').plural_expr
@@ -371,10 +578,15 @@ class Catalog:
         '(n != 1)'

         :type: `str`"""
-        pass
+        if self._plural_expr is None:
+            expr = '(n != 1)'
+            if self.locale:
+                expr = get_plural(self.locale)[1]
+            self._plural_expr = expr
+        return self._plural_expr

     @property
-    def plural_forms(self) ->str:
+    def plural_forms(self) -> str:
         """Return the plural forms declaration for the locale.

         >>> Catalog(locale='en').plural_forms
@@ -383,26 +595,26 @@ class Catalog:
         'nplurals=2; plural=(n > 1);'

         :type: `str`"""
-        pass
+        return f"nplurals={self.num_plurals}; plural={self.plural_expr};"

-    def __contains__(self, id: _MessageID) ->bool:
+    def __contains__(self, id: _MessageID) -> bool:
         """Return whether the catalog has a message with the specified ID."""
         return self._key_for(id) in self._messages

-    def __len__(self) ->int:
+    def __len__(self) -> int:
         """The number of messages in the catalog.

         This does not include the special ``msgid ""`` entry."""
         return len(self._messages)

-    def __iter__(self) ->Iterator[Message]:
+    def __iter__(self) -> Iterator[Message]:
         """Iterates through all the entries in the catalog, in the order they
         were added, yielding a `Message` object for every entry.

         :rtype: ``iterator``"""
         buf = []
         for name, value in self.mime_headers:
-            buf.append(f'{name}: {value}')
+            buf.append(f"{name}: {value}")
         flags = set()
         if self.fuzzy:
             flags |= {'fuzzy'}
@@ -410,24 +622,24 @@ class Catalog:
         for key in self._messages:
             yield self._messages[key]

-    def __repr__(self) ->str:
+    def __repr__(self) -> str:
         locale = ''
         if self.locale:
-            locale = f' {self.locale}'
-        return f'<{type(self).__name__} {self.domain!r}{locale}>'
+            locale = f" {self.locale}"
+        return f"<{type(self).__name__} {self.domain!r}{locale}>"

-    def __delitem__(self, id: _MessageID) ->None:
+    def __delitem__(self, id: _MessageID) -> None:
         """Delete the message with the specified ID."""
         self.delete(id)

-    def __getitem__(self, id: _MessageID) ->Message:
+    def __getitem__(self, id: _MessageID) -> Message:
         """Return the message with the specified ID.

         :param id: the message ID
         """
         return self.get(id)

-    def __setitem__(self, id: _MessageID, message: Message) ->None:
+    def __setitem__(self, id: _MessageID, message: Message) -> None:
         """Add or update the message with the specified ID.

         >>> catalog = Catalog()
@@ -454,32 +666,40 @@ class Catalog:
         current = self._messages.get(key)
         if current:
             if message.pluralizable and not current.pluralizable:
+                # The new message adds pluralization
                 current.id = message.id
                 current.string = message.string
-            current.locations = list(distinct(current.locations + message.
-                locations))
+            current.locations = list(distinct(current.locations +
+                                              message.locations))
             current.auto_comments = list(distinct(current.auto_comments +
-                message.auto_comments))
+                                                  message.auto_comments))
             current.user_comments = list(distinct(current.user_comments +
-                message.user_comments))
+                                                  message.user_comments))
             current.flags |= message.flags
             message = current
         elif id == '':
+            # special treatment for the header message
             self.mime_headers = message_from_string(message.string).items()
-            self.header_comment = '\n'.join([f'# {c}'.rstrip() for c in
-                message.user_comments])
+            self.header_comment = "\n".join([f"# {c}".rstrip() for c in message.user_comments])
             self.fuzzy = message.fuzzy
         else:
             if isinstance(id, (list, tuple)):
-                assert isinstance(message.string, (list, tuple)
-                    ), f'Expected sequence but got {type(message.string)}'
+                assert isinstance(message.string, (list, tuple)), \
+                    f"Expected sequence but got {type(message.string)}"
             self._messages[key] = message

-    def add(self, id: _MessageID, string: (_MessageID | None)=None,
-        locations: Iterable[tuple[str, int]]=(), flags: Iterable[str]=(),
-        auto_comments: Iterable[str]=(), user_comments: Iterable[str]=(),
-        previous_id: _MessageID=(), lineno: (int | None)=None, context: (
-        str | None)=None) ->Message:
+    def add(
+        self,
+        id: _MessageID,
+        string: _MessageID | None = None,
+        locations: Iterable[tuple[str, int]] = (),
+        flags: Iterable[str] = (),
+        auto_comments: Iterable[str] = (),
+        user_comments: Iterable[str] = (),
+        previous_id: _MessageID = (),
+        lineno: int | None = None,
+        context: str | None = None,
+    ) -> Message:
         """Add or update the message with the specified ID.

         >>> catalog = Catalog()
@@ -505,9 +725,13 @@ class Catalog:
                        PO file, if any
         :param context: the message context
         """
-        pass
+        message = Message(id, string, list(locations), flags, auto_comments,
+                          user_comments, previous_id, lineno=lineno,
+                          context=context)
+        self[id] = message
+        return message

-    def check(self) ->Iterable[tuple[Message, list[TranslationError]]]:
+    def check(self) -> Iterable[tuple[Message, list[TranslationError]]]:
         """Run various validation checks on the translations in the catalog.

         For every message which fails validation, this method yield a
@@ -516,28 +740,37 @@ class Catalog:

         :rtype: ``generator`` of ``(message, errors)``
         """
-        pass
+        for message in self._messages.values():
+            errors = message.check(catalog=self)
+            if errors:
+                yield message, errors

-    def get(self, id: _MessageID, context: (str | None)=None) ->(Message | None
-        ):
+    def get(self, id: _MessageID, context: str | None = None) -> Message | None:
         """Return the message with the specified ID and context.

         :param id: the message ID
         :param context: the message context, or ``None`` for no context
         """
-        pass
+        return self._messages.get(self._key_for(id, context))

-    def delete(self, id: _MessageID, context: (str | None)=None) ->None:
+    def delete(self, id: _MessageID, context: str | None = None) -> None:
         """Delete the message with the specified ID and context.

         :param id: the message ID
         :param context: the message context, or ``None`` for no context
         """
-        pass
-
-    def update(self, template: Catalog, no_fuzzy_matching: bool=False,
-        update_header_comment: bool=False, keep_user_comments: bool=True,
-        update_creation_date: bool=True) ->None:
+        key = self._key_for(id, context)
+        if key in self._messages:
+            del self._messages[key]
+
+    def update(
+        self,
+        template: Catalog,
+        no_fuzzy_matching: bool = False,
+        update_header_comment: bool = False,
+        keep_user_comments: bool = True,
+        update_creation_date: bool = True,
+    ) -> None:
         """Update the catalog based on the given template catalog.

         >>> from babel.messages import Catalog
@@ -590,22 +823,126 @@ class Catalog:
         :param template: the reference catalog, usually read from a POT file
         :param no_fuzzy_matching: whether to use fuzzy matching of message IDs
         """
-        pass
-
-    def _to_fuzzy_match_key(self, key: (tuple[str, str] | str)) ->str:
+        messages = self._messages
+        remaining = messages.copy()
+        self._messages = OrderedDict()
+
+        # Prepare for fuzzy matching
+        fuzzy_candidates = {}
+        if not no_fuzzy_matching:
+            for msgid in messages:
+                if msgid and messages[msgid].string:
+                    key = self._key_for(msgid)
+                    ctxt = messages[msgid].context
+                    fuzzy_candidates[self._to_fuzzy_match_key(key)] = (key, ctxt)
+        fuzzy_matches = set()
+
+        def _merge(message: Message, oldkey: tuple[str, str] | str, newkey: tuple[str, str] | str) -> None:
+            message = message.clone()
+            fuzzy = False
+            if oldkey != newkey:
+                fuzzy = True
+                fuzzy_matches.add(oldkey)
+                oldmsg = messages.get(oldkey)
+                assert oldmsg is not None
+                if isinstance(oldmsg.id, str):
+                    message.previous_id = [oldmsg.id]
+                else:
+                    message.previous_id = list(oldmsg.id)
+            else:
+                oldmsg = remaining.pop(oldkey, None)
+                assert oldmsg is not None
+            message.string = oldmsg.string
+
+            if keep_user_comments:
+                message.user_comments = list(distinct(oldmsg.user_comments))
+
+            if isinstance(message.id, (list, tuple)):
+                if not isinstance(message.string, (list, tuple)):
+                    fuzzy = True
+                    message.string = tuple(
+                        [message.string] + ([''] * (len(message.id) - 1)),
+                    )
+                elif len(message.string) != self.num_plurals:
+                    fuzzy = True
+                    message.string = tuple(message.string[:len(oldmsg.string)])
+            elif isinstance(message.string, (list, tuple)):
+                fuzzy = True
+                message.string = message.string[0]
+            message.flags |= oldmsg.flags
+            if fuzzy:
+                message.flags |= {'fuzzy'}
+            self[message.id] = message
+
+        for message in template:
+            if message.id:
+                key = self._key_for(message.id, message.context)
+                if key in messages:
+                    _merge(message, key, key)
+                else:
+                    if not no_fuzzy_matching:
+                        # do some fuzzy matching with difflib
+                        matches = get_close_matches(
+                            self._to_fuzzy_match_key(key),
+                            fuzzy_candidates.keys(),
+                            1,
+                        )
+                        if matches:
+                            modified_key = matches[0]
+                            newkey, newctxt = fuzzy_candidates[modified_key]
+                            if newctxt is not None:
+                                newkey = newkey, newctxt
+                            _merge(message, newkey, key)
+                            continue
+
+                    self[message.id] = message
+
+        for msgid in remaining:
+            if no_fuzzy_matching or msgid not in fuzzy_matches:
+                self.obsolete[msgid] = remaining[msgid]
+
+        if update_header_comment:
+            # Allow the updated catalog's header to be rewritten based on the
+            # template's header
+            self.header_comment = template.header_comment
+
+        # Make updated catalog's POT-Creation-Date equal to the template
+        # used to update the catalog
+        if update_creation_date:
+            self.creation_date = template.creation_date
+
+    def _to_fuzzy_match_key(self, key: tuple[str, str] | str) -> str:
         """Converts a message key to a string suitable for fuzzy matching."""
-        pass
+        if isinstance(key, tuple):
+            matchkey = key[0]  # just the msgid, no context
+        else:
+            matchkey = key
+        return matchkey.lower().strip()

-    def _key_for(self, id: _MessageID, context: (str | None)=None) ->(tuple
-        [str, str] | str):
+    def _key_for(self, id: _MessageID, context: str | None = None) -> tuple[str, str] | str:
         """The key for a message is just the singular ID even for pluralizable
         messages, but is a ``(msgid, msgctxt)`` tuple for context-specific
         messages.
         """
-        pass
-
-    def is_identical(self, other: Catalog) ->bool:
+        key = id
+        if isinstance(key, (list, tuple)):
+            key = id[0]
+        if context is not None:
+            key = (key, context)
+        return key
+
+    def is_identical(self, other: Catalog) -> bool:
         """Checks if catalogs are identical, taking into account messages and
         headers.
         """
-        pass
+        assert isinstance(other, Catalog)
+        for key in self._messages.keys() | other._messages.keys():
+            message_1 = self.get(key)
+            message_2 = other.get(key)
+            if (
+                message_1 is None
+                or message_2 is None
+                or not message_1.is_identical(message_2)
+            ):
+                return False
+        return dict(self.mime_headers) == dict(other.mime_headers)
diff --git a/babel/messages/checkers.py b/babel/messages/checkers.py
index 0161061..e5448e0 100644
--- a/babel/messages/checkers.py
+++ b/babel/messages/checkers.py
@@ -10,23 +10,56 @@
     :license: BSD, see LICENSE for more details.
 """
 from __future__ import annotations
+
 from collections.abc import Callable
+
 from babel.messages.catalog import PYTHON_FORMAT, Catalog, Message, TranslationError
-_string_format_compatibilities = [{'i', 'd', 'u'}, {'x', 'X'}, {'f', 'F',
-    'g', 'G'}]

+#: list of format chars that are compatible to each other
+_string_format_compatibilities = [
+    {'i', 'd', 'u'},
+    {'x', 'X'},
+    {'f', 'F', 'g', 'G'},
+]

-def num_plurals(catalog: (Catalog | None), message: Message) ->None:
+
+def num_plurals(catalog: Catalog | None, message: Message) -> None:
     """Verify the number of plurals in the translation."""
-    pass
+    if not message.pluralizable:
+        if not isinstance(message.string, str):
+            raise TranslationError("Found plural forms for non-pluralizable "
+                                   "message")
+        return
+
+    # skip further tests if no catalog is provided.
+    elif catalog is None:
+        return

+    msgstrs = message.string
+    if not isinstance(msgstrs, (list, tuple)):
+        msgstrs = (msgstrs,)
+    if len(msgstrs) != catalog.num_plurals:
+        raise TranslationError("Wrong number of plural forms (expected %d)" %
+                               catalog.num_plurals)

-def python_format(catalog: (Catalog | None), message: Message) ->None:
+
+def python_format(catalog: Catalog | None, message: Message) -> None:
     """Verify the format string placeholders in the translation."""
-    pass
+    if 'python-format' not in message.flags:
+        return
+    msgids = message.id
+    if not isinstance(msgids, (list, tuple)):
+        msgids = (msgids,)
+    msgstrs = message.string
+    if not isinstance(msgstrs, (list, tuple)):
+        msgstrs = (msgstrs,)
+
+    for msgid, msgstr in zip(msgids, msgstrs):
+        if msgstr:
+            _validate_format(msgid, msgstr)


-def _validate_format(format: str, alternative: str) ->None:
+def _validate_format(format: str, alternative: str) -> None:
     """Test format string `alternative` against `format`.  `format` can be the
     msgid of a message and `alternative` one of the `msgstr`\\s.  The two
     arguments are not interchangeable as `alternative` may contain less
@@ -57,7 +90,84 @@ def _validate_format(format: str, alternative: str) ->None:
                         against format
     :raises TranslationError: on formatting errors
     """
-    pass
+
+    def _parse(string: str) -> list[tuple[str, str]]:
+        result: list[tuple[str, str]] = []
+        for match in PYTHON_FORMAT.finditer(string):
+            name, format, typechar = match.groups()
+            if typechar == '%' and name is None:
+                continue
+            result.append((name, str(typechar)))
+        return result
+
+    def _compatible(a: str, b: str) -> bool:
+        if a == b:
+            return True
+        for set in _string_format_compatibilities:
+            if a in set and b in set:
+                return True
+        return False
+
+    def _check_positional(results: list[tuple[str, str]]) -> bool:
+        positional = None
+        for name, _char in results:
+            if positional is None:
+                positional = name is None
+            else:
+                if (name is None) != positional:
+                    raise TranslationError('format string mixes positional '
+                                           'and named placeholders')
+        return bool(positional)
+
+    a, b = map(_parse, (format, alternative))
+
+    # now check if both strings are positional or named
+    a_positional, b_positional = map(_check_positional, (a, b))
+    if a_positional and not b_positional and not b:
+        raise TranslationError('placeholders are incompatible')
+    elif a_positional != b_positional:
+        raise TranslationError('the format strings are of different kinds')
+
+    # if we are operating on positional strings both must have the
+    # same number of format chars and those must be compatible
+    if a_positional:
+        if len(a) != len(b):
+            raise TranslationError('positional format placeholders are '
+                                   'unbalanced')
+        for idx, ((_, first), (_, second)) in enumerate(zip(a, b)):
+            if not _compatible(first, second):
+                raise TranslationError('incompatible format for placeholder '
+                                       '%d: %r and %r are not compatible' %
+                                       (idx + 1, first, second))
+
+    # otherwise the second string must not have names the first one
+    # doesn't have and the types of those included must be compatible
+    else:
+        type_map = dict(a)
+        for name, typechar in b:
+            if name not in type_map:
+                raise TranslationError(f'unknown named placeholder {name!r}')
+            elif not _compatible(typechar, type_map[name]):
+                raise TranslationError(
+                    f'incompatible format for placeholder {name!r}: '
+                    f'{typechar!r} and {type_map[name]!r} are not compatible',
+                )
+
+
+def _find_checkers() -> list[Callable[[Catalog | None, Message], object]]:
+    checkers: list[Callable[[Catalog | None, Message], object]] = []
+    try:
+        from pkg_resources import working_set
+    except ImportError:
+        pass
+    else:
+        for entry_point in working_set.iter_entry_points('babel.checkers'):
+            checkers.append(entry_point.load())
+    if len(checkers) == 0:
+        # if pkg_resources is not available or no usable egg-info was found
+        # (see #230), just resort to hard-coded checkers
+        return [num_plurals, python_format]
+    return checkers


 checkers: list[Callable[[Catalog | None, Message], object]] = _find_checkers()
diff --git a/babel/messages/extract.py b/babel/messages/extract.py
index 8af8da4..b13f1a9 100644
--- a/babel/messages/extract.py
+++ b/babel/messages/extract.py
@@ -16,72 +16,115 @@
     :license: BSD, see LICENSE for more details.
 """
 from __future__ import annotations
+
 import ast
 import io
 import os
 import sys
 import tokenize
-from collections.abc import Callable, Collection, Generator, Iterable, Mapping, MutableSequence
+from collections.abc import (
+    Callable,
+    Collection,
+    Generator,
+    Iterable,
+    Mapping,
+    MutableSequence,
+)
 from os.path import relpath
 from textwrap import dedent
 from tokenize import COMMENT, NAME, OP, STRING, generate_tokens
 from typing import TYPE_CHECKING, Any
+
 from babel.util import parse_encoding, parse_future_flags, pathmatch
+
 if TYPE_CHECKING:
     from typing import IO, Protocol
+
     from _typeshed import SupportsItems, SupportsRead, SupportsReadline
     from typing_extensions import Final, TypeAlias, TypedDict

-
-    class _PyOptions(TypedDict, total=(False)):
+    class _PyOptions(TypedDict, total=False):
         encoding: str

-
-    class _JSOptions(TypedDict, total=(False)):
+    class _JSOptions(TypedDict, total=False):
         encoding: str
         jsx: bool
         template_string: bool
         parse_template_string: bool

-
     class _FileObj(SupportsRead[bytes], SupportsReadline[bytes], Protocol):
-        pass
-    _SimpleKeyword: TypeAlias = tuple[int | tuple[int, int] | tuple[int,
-        str], ...] | None
+        def seek(self, __offset: int, __whence: int = ...) -> int: ...
+        def tell(self) -> int: ...
+
+    _SimpleKeyword: TypeAlias = tuple[int | tuple[int, int] | tuple[int, str], ...] | None
     _Keyword: TypeAlias = dict[int | None, _SimpleKeyword] | _SimpleKeyword
-    _FileExtractionResult: TypeAlias = tuple[str, int, str | tuple[str, ...
-        ], list[str], str | None]
-    _ExtractionResult: TypeAlias = tuple[int, str | tuple[str, ...], list[
-        str], str | None]
-    _CallableExtractionMethod: TypeAlias = Callable[[_FileObj | IO[bytes],
-        Mapping[str, _Keyword], Collection[str], Mapping[str, Any]],
-        Iterable[_ExtractionResult]]
+
+    # 5-tuple of (filename, lineno, messages, comments, context)
+    _FileExtractionResult: TypeAlias = tuple[str, int, str | tuple[str, ...], list[str], str | None]
+
+    # 4-tuple of (lineno, message, comments, context)
+    _ExtractionResult: TypeAlias = tuple[int, str | tuple[str, ...], list[str], str | None]
+
+    # Required arguments: fileobj, keywords, comment_tags, options
+    # Return value: Iterable of (lineno, message, comments, context)
+    _CallableExtractionMethod: TypeAlias = Callable[
+        [_FileObj | IO[bytes], Mapping[str, _Keyword], Collection[str], Mapping[str, Any]],
+        Iterable[_ExtractionResult],
+    ]
+
     _ExtractionMethod: TypeAlias = _CallableExtractionMethod | str
+
 GROUP_NAME: Final[str] = 'babel.extractors'
-DEFAULT_KEYWORDS: dict[str, _Keyword] = {'_': None, 'gettext': None,
-    'ngettext': (1, 2), 'ugettext': None, 'ungettext': (1, 2), 'dgettext':
-    (2,), 'dngettext': (2, 3), 'N_': None, 'pgettext': ((1, 'c'), 2),
-    'npgettext': ((1, 'c'), 2, 3)}
+
+DEFAULT_KEYWORDS: dict[str, _Keyword] = {
+    '_': None,
+    'gettext': None,
+    'ngettext': (1, 2),
+    'ugettext': None,
+    'ungettext': (1, 2),
+    'dgettext': (2,),
+    'dngettext': (2, 3),
+    'N_': None,
+    'pgettext': ((1, 'c'), 2),
+    'npgettext': ((1, 'c'), 2, 3),
+}
+
 DEFAULT_MAPPING: list[tuple[str, str]] = [('**.py', 'python')]
-FSTRING_START = getattr(tokenize, 'FSTRING_START', None)
-FSTRING_MIDDLE = getattr(tokenize, 'FSTRING_MIDDLE', None)
-FSTRING_END = getattr(tokenize, 'FSTRING_END', None)
+
+# New tokens in Python 3.12, or None on older versions
+FSTRING_START = getattr(tokenize, "FSTRING_START", None)
+FSTRING_MIDDLE = getattr(tokenize, "FSTRING_MIDDLE", None)
+FSTRING_END = getattr(tokenize, "FSTRING_END", None)


 def _strip_comment_tags(comments: MutableSequence[str], tags: Iterable[str]):
     """Helper function for `extract` that strips comment tags from strings
     in a list of comment lines.  This functions operates in-place.
     """
-    pass
-
-
-def extract_from_dir(dirname: (str | os.PathLike[str] | None)=None,
-    method_map: Iterable[tuple[str, str]]=DEFAULT_MAPPING, options_map: (
-    SupportsItems[str, dict[str, Any]] | None)=None, keywords: Mapping[str,
-    _Keyword]=DEFAULT_KEYWORDS, comment_tags: Collection[str]=(), callback:
-    (Callable[[str, str, dict[str, Any]], object] | None)=None,
-    strip_comment_tags: bool=False, directory_filter: (Callable[[str], bool
-    ] | None)=None) ->Generator[_FileExtractionResult, None, None]:
+    def _strip(line: str):
+        for tag in tags:
+            if line.startswith(tag):
+                return line[len(tag):].strip()
+        return line
+    comments[:] = map(_strip, comments)
+
+
+def default_directory_filter(dirpath: str | os.PathLike[str]) -> bool:
+    subdir = os.path.basename(dirpath)
+    # Legacy default behavior: ignore dot and underscore directories
+    return not (subdir.startswith('.') or subdir.startswith('_'))
+
+
+def extract_from_dir(
+    dirname: str | os.PathLike[str] | None = None,
+    method_map: Iterable[tuple[str, str]] = DEFAULT_MAPPING,
+    options_map: SupportsItems[str, dict[str, Any]] | None = None,
+    keywords: Mapping[str, _Keyword] = DEFAULT_KEYWORDS,
+    comment_tags: Collection[str] = (),
+    callback: Callable[[str, str, dict[str, Any]], object] | None = None,
+    strip_comment_tags: bool = False,
+    directory_filter: Callable[[str], bool] | None = None,
+) -> Generator[_FileExtractionResult, None, None]:
     """Extract messages from any source files found in the given directory.

     This function generates tuples of the form ``(filename, lineno, message,
@@ -151,15 +194,46 @@ def extract_from_dir(dirname: (str | os.PathLike[str] | None)=None,
                              should return True if the directory is valid.
     :see: `pathmatch`
     """
-    pass
-
-
-def check_and_call_extract_file(filepath: (str | os.PathLike[str]),
-    method_map: Iterable[tuple[str, str]], options_map: SupportsItems[str,
-    dict[str, Any]], callback: (Callable[[str, str, dict[str, Any]], object
-    ] | None), keywords: Mapping[str, _Keyword], comment_tags: Collection[
-    str], strip_comment_tags: bool, dirpath: (str | os.PathLike[str] | None
-    )=None) ->Generator[_FileExtractionResult, None, None]:
+    if dirname is None:
+        dirname = os.getcwd()
+    if options_map is None:
+        options_map = {}
+    if directory_filter is None:
+        directory_filter = default_directory_filter
+
+    absname = os.path.abspath(dirname)
+    for root, dirnames, filenames in os.walk(absname):
+        dirnames[:] = [
+            subdir for subdir in dirnames
+            if directory_filter(os.path.join(root, subdir))
+        ]
+        dirnames.sort()
+        filenames.sort()
+        for filename in filenames:
+            filepath = os.path.join(root, filename).replace(os.sep, '/')
+
+            yield from check_and_call_extract_file(
+                filepath,
+                method_map,
+                options_map,
+                callback,
+                keywords,
+                comment_tags,
+                strip_comment_tags,
+                dirpath=absname,
+            )
+
+
+def check_and_call_extract_file(
+    filepath: str | os.PathLike[str],
+    method_map: Iterable[tuple[str, str]],
+    options_map: SupportsItems[str, dict[str, Any]],
+    callback: Callable[[str, str, dict[str, Any]], object] | None,
+    keywords: Mapping[str, _Keyword],
+    comment_tags: Collection[str],
+    strip_comment_tags: bool,
+    dirpath: str | os.PathLike[str] | None = None,
+) -> Generator[_FileExtractionResult, None, None]:
     """Checks if the given file matches an extraction method mapping, and if so, calls extract_from_file.

     Note that the extraction method mappings are based relative to dirpath.
@@ -189,13 +263,39 @@ def check_and_call_extract_file(filepath: (str | os.PathLike[str]),
     :return: iterable of 5-tuples (filename, lineno, messages, comments, context)
     :rtype: Iterable[tuple[str, int, str|tuple[str], list[str], str|None]
     """
-    pass
-
-
-def extract_from_file(method: _ExtractionMethod, filename: (str | os.
-    PathLike[str]), keywords: Mapping[str, _Keyword]=DEFAULT_KEYWORDS,
-    comment_tags: Collection[str]=(), options: (Mapping[str, Any] | None)=
-    None, strip_comment_tags: bool=False) ->list[_ExtractionResult]:
+    # filename is the relative path from dirpath to the actual file
+    filename = relpath(filepath, dirpath)
+
+    for pattern, method in method_map:
+        if not pathmatch(pattern, filename):
+            continue
+
+        options = {}
+        for opattern, odict in options_map.items():
+            if pathmatch(opattern, filename):
+                options = odict
+        if callback:
+            callback(filename, method, options)
+        for message_tuple in extract_from_file(
+            method, filepath,
+            keywords=keywords,
+            comment_tags=comment_tags,
+            options=options,
+            strip_comment_tags=strip_comment_tags,
+        ):
+            yield (filename, *message_tuple)
+
+        break
+
+
+def extract_from_file(
+    method: _ExtractionMethod,
+    filename: str | os.PathLike[str],
+    keywords: Mapping[str, _Keyword] = DEFAULT_KEYWORDS,
+    comment_tags: Collection[str] = (),
+    options: Mapping[str, Any] | None = None,
+    strip_comment_tags: bool = False,
+) -> list[_ExtractionResult]:
     """Extract messages from a specific file.

     This function returns a list of tuples of the form ``(lineno, message, comments, context)``.
@@ -214,13 +314,63 @@ def extract_from_file(method: _ExtractionMethod, filename: (str | os.
     :returns: list of tuples of the form ``(lineno, message, comments, context)``
     :rtype: list[tuple[int, str|tuple[str], list[str], str|None]
     """
-    pass
-
-
-def extract(method: _ExtractionMethod, fileobj: _FileObj, keywords: Mapping
-    [str, _Keyword]=DEFAULT_KEYWORDS, comment_tags: Collection[str]=(),
-    options: (Mapping[str, Any] | None)=None, strip_comment_tags: bool=False
-    ) ->Generator[_ExtractionResult, None, None]:
+    if method == 'ignore':
+        return []
+
+    with open(filename, 'rb') as fileobj:
+        return list(extract(method, fileobj, keywords, comment_tags,
+                            options, strip_comment_tags))
+
+
+def _match_messages_against_spec(lineno: int, messages: list[str|None], comments: list[str],
+                                 fileobj: _FileObj, spec: tuple[int|tuple[int, str], ...]):
+    translatable = []
+    context = None
+
+    # last_index is 1 based like the keyword spec
+    last_index = len(messages)
+    for index in spec:
+        if isinstance(index, tuple): # (n, 'c')
+            context = messages[index[0] - 1]
+            continue
+        if last_index < index:
+            # Not enough arguments
+            return
+        message = messages[index - 1]
+        if message is None:
+            return
+        translatable.append(message)
+
+    # keyword spec indexes are 1 based, therefore '-1'
+    if isinstance(spec[0], tuple):
+        # context-aware *gettext method
+        first_msg_index = spec[1] - 1
+    else:
+        first_msg_index = spec[0] - 1
+    # An empty string msgid isn't valid, emit a warning
+    if not messages[first_msg_index]:
+        filename = (getattr(fileobj, "name", None) or "(unknown)")
+        sys.stderr.write(
+            f"{filename}:{lineno}: warning: Empty msgid.  It is reserved by GNU gettext: gettext(\"\") "
+            f"returns the header entry with meta information, not the empty string.\n",
+        )
+        return
+
+    translatable = tuple(translatable)
+    if len(translatable) == 1:
+        translatable = translatable[0]
+
+    return lineno, translatable, comments, context
+
+
+def extract(
+    method: _ExtractionMethod,
+    fileobj: _FileObj,
+    keywords: Mapping[str, _Keyword] = DEFAULT_KEYWORDS,
+    comment_tags: Collection[str] = (),
+    options: Mapping[str, Any] | None = None,
+    strip_comment_tags: bool = False,
+) -> Generator[_ExtractionResult, None, None]:
     """Extract messages from the given file-like object using the specified
     extraction method.

@@ -260,21 +410,88 @@ def extract(method: _ExtractionMethod, fileobj: _FileObj, keywords: Mapping
     :returns: iterable of tuples of the form ``(lineno, message, comments, context)``
     :rtype: Iterable[tuple[int, str|tuple[str], list[str], str|None]
     """
-    pass
-
-
-def extract_nothing(fileobj: _FileObj, keywords: Mapping[str, _Keyword],
-    comment_tags: Collection[str], options: Mapping[str, Any]) ->list[
-    _ExtractionResult]:
+    func = None
+    if callable(method):
+        func = method
+    elif ':' in method or '.' in method:
+        if ':' not in method:
+            lastdot = method.rfind('.')
+            module, attrname = method[:lastdot], method[lastdot + 1:]
+        else:
+            module, attrname = method.split(':', 1)
+        func = getattr(__import__(module, {}, {}, [attrname]), attrname)
+    else:
+        try:
+            from pkg_resources import working_set
+        except ImportError:
+            pass
+        else:
+            for entry_point in working_set.iter_entry_points(GROUP_NAME,
+                                                             method):
+                func = entry_point.load(require=True)
+                break
+        if func is None:
+            # if pkg_resources is not available or no usable egg-info was found
+            # (see #230), we resort to looking up the builtin extractors
+            # directly
+            builtin = {
+                'ignore': extract_nothing,
+                'python': extract_python,
+                'javascript': extract_javascript,
+            }
+            func = builtin.get(method)
+
+    if func is None:
+        raise ValueError(f"Unknown extraction method {method!r}")
+
+    results = func(fileobj, keywords.keys(), comment_tags,
+                   options=options or {})
+
+    for lineno, funcname, messages, comments in results:
+        if not isinstance(messages, (list, tuple)):
+            messages = [messages]
+        if not messages:
+            continue
+
+        specs = keywords[funcname] or None if funcname else None
+        # {None: x} may be collapsed into x for backwards compatibility.
+        if not isinstance(specs, dict):
+            specs = {None: specs}
+
+        if strip_comment_tags:
+            _strip_comment_tags(comments, comment_tags)
+
+        # None matches all arities.
+        for arity in (None, len(messages)):
+            try:
+                spec = specs[arity]
+            except KeyError:
+                continue
+            if spec is None:
+                spec = (1,)
+            result = _match_messages_against_spec(lineno, messages, comments, fileobj, spec)
+            if result is not None:
+                yield result
+
+
+def extract_nothing(
+    fileobj: _FileObj,
+    keywords: Mapping[str, _Keyword],
+    comment_tags: Collection[str],
+    options: Mapping[str, Any],
+) -> list[_ExtractionResult]:
     """Pseudo extractor that does not actually extract anything, but simply
     returns an empty list.
     """
-    pass
+    return []


-def extract_python(fileobj: IO[bytes], keywords: Mapping[str, _Keyword],
-    comment_tags: Collection[str], options: _PyOptions) ->Generator[
-    _ExtractionResult, None, None]:
+def extract_python(
+    fileobj: IO[bytes],
+    keywords: Mapping[str, _Keyword],
+    comment_tags: Collection[str],
+    options: _PyOptions,
+) -> Generator[_ExtractionResult, None, None]:
     """Extract messages from Python source code.

     It returns an iterator yielding tuples in the following form ``(lineno,
@@ -289,12 +506,158 @@ def extract_python(fileobj: IO[bytes], keywords: Mapping[str, _Keyword],
     :param options: a dictionary of additional options (optional)
     :rtype: ``iterator``
     """
-    pass
-
-
-def extract_javascript(fileobj: _FileObj, keywords: Mapping[str, _Keyword],
-    comment_tags: Collection[str], options: _JSOptions, lineno: int=1
-    ) ->Generator[_ExtractionResult, None, None]:
+    funcname = lineno = message_lineno = None
+    call_stack = -1
+    buf = []
+    messages = []
+    translator_comments = []
+    in_def = in_translator_comments = False
+    comment_tag = None
+
+    encoding = parse_encoding(fileobj) or options.get('encoding', 'UTF-8')
+    future_flags = parse_future_flags(fileobj, encoding)
+    next_line = lambda: fileobj.readline().decode(encoding)
+
+    tokens = generate_tokens(next_line)
+
+    # Current prefix of a Python 3.12 (PEP 701) f-string, or None if we're not
+    # currently parsing one.
+    current_fstring_start = None
+
+    for tok, value, (lineno, _), _, _ in tokens:
+        if call_stack == -1 and tok == NAME and value in ('def', 'class'):
+            in_def = True
+        elif tok == OP and value == '(':
+            if in_def:
+                # Avoid false positives for declarations such as:
+                # def gettext(arg='message'):
+                in_def = False
+                continue
+            if funcname:
+                message_lineno = lineno
+                call_stack += 1
+        elif in_def and tok == OP and value == ':':
+            # End of a class definition without parens
+            in_def = False
+            continue
+        elif call_stack == -1 and tok == COMMENT:
+            # Strip the comment token from the line
+            value = value[1:].strip()
+            if in_translator_comments and \
+                    translator_comments[-1][0] == lineno - 1:
+                # We're already inside a translator comment, continue appending
+                translator_comments.append((lineno, value))
+                continue
+            # If execution reaches this point, let's see if comment line
+            # starts with one of the comment tags
+            for comment_tag in comment_tags:
+                if value.startswith(comment_tag):
+                    in_translator_comments = True
+                    translator_comments.append((lineno, value))
+                    break
+        elif funcname and call_stack == 0:
+            nested = (tok == NAME and value in keywords)
+            if (tok == OP and value == ')') or nested:
+                if buf:
+                    messages.append(''.join(buf))
+                    del buf[:]
+                else:
+                    messages.append(None)
+
+                messages = tuple(messages) if len(messages) > 1 else messages[0]
+                # Comments don't apply unless they immediately
+                # precede the message
+                if translator_comments and \
+                        translator_comments[-1][0] < message_lineno - 1:
+                    translator_comments = []
+
+                yield (message_lineno, funcname, messages,
+                       [comment[1] for comment in translator_comments])
+
+                funcname = lineno = message_lineno = None
+                call_stack = -1
+                messages = []
+                translator_comments = []
+                in_translator_comments = False
+                if nested:
+                    funcname = value
+            elif tok == STRING:
+                val = _parse_python_string(value, encoding, future_flags)
+                if val is not None:
+                    buf.append(val)
+
+            # Python 3.12+, see https://peps.python.org/pep-0701/#new-tokens
+            elif tok == FSTRING_START:
+                current_fstring_start = value
+            elif tok == FSTRING_MIDDLE:
+                if current_fstring_start is not None:
+                    current_fstring_start += value
+            elif tok == FSTRING_END:
+                if current_fstring_start is not None:
+                    fstring = current_fstring_start + value
+                    val = _parse_python_string(fstring, encoding, future_flags)
+                    if val is not None:
+                        buf.append(val)
+
+            elif tok == OP and value == ',':
+                if buf:
+                    messages.append(''.join(buf))
+                    del buf[:]
+                else:
+                    messages.append(None)
+                if translator_comments:
+                    # We have translator comments, and since we're on a
+                    # comma(,) user is allowed to break into a new line
+                    # Let's increase the last comment's lineno in order
+                    # for the comment to still be a valid one
+                    old_lineno, old_comment = translator_comments.pop()
+                    translator_comments.append((old_lineno + 1, old_comment))
+        elif call_stack > 0 and tok == OP and value == ')':
+            call_stack -= 1
+        elif funcname and call_stack == -1:
+            funcname = None
+        elif tok == NAME and value in keywords:
+            funcname = value
+
+        if (current_fstring_start is not None
+            and tok not in {FSTRING_START, FSTRING_MIDDLE}
+        ):
+            # In Python 3.12, tokens other than FSTRING_* mean the
+            # f-string is dynamic, so we don't wan't to extract it.
+            # And if it's FSTRING_END, we've already handled it above.
+            # Let's forget that we're in an f-string.
+            current_fstring_start = None
+
+
+def _parse_python_string(value: str, encoding: str, future_flags: int) -> str | None:
+    # Unwrap quotes in a safe manner, maintaining the string's encoding
+    # https://sourceforge.net/tracker/?func=detail&atid=355470&aid=617979&group_id=5470
+    code = compile(
+        f'# coding={str(encoding)}\n{value}',
+        '<string>',
+        'eval',
+        ast.PyCF_ONLY_AST | future_flags,
+    )
+    if isinstance(code, ast.Expression):
+        body = code.body
+        if isinstance(body, ast.Str):
+            return body.s
+        if isinstance(body, ast.JoinedStr):  # f-string
+            if all(isinstance(node, ast.Str) for node in body.values):
+                return ''.join(node.s for node in body.values)
+            if all(isinstance(node, ast.Constant) for node in body.values):
+                return ''.join(str(node.value) for node in body.values)
+            # TODO: we could raise an error or warning when not all nodes are constants
+    return None
+
+
+def extract_javascript(
+    fileobj: _FileObj,
+    keywords: Mapping[str, _Keyword],
+    comment_tags: Collection[str],
+    options: _JSOptions,
+    lineno: int = 1,
+) -> Generator[_ExtractionResult, None, None]:
     """Extract messages from JavaScript source code.

     :param fileobj: the seekable, file-like object the messages should be
@@ -312,12 +675,137 @@ def extract_javascript(fileobj: _FileObj, keywords: Mapping[str, _Keyword],
                                                  template strings.
     :param lineno: line number offset (for parsing embedded fragments)
     """
-    pass
-
-
-def parse_template_string(template_string: str, keywords: Mapping[str,
-    _Keyword], comment_tags: Collection[str], options: _JSOptions, lineno:
-    int=1) ->Generator[_ExtractionResult, None, None]:
+    from babel.messages.jslexer import Token, tokenize, unquote_string
+    funcname = message_lineno = None
+    messages = []
+    last_argument = None
+    translator_comments = []
+    concatenate_next = False
+    encoding = options.get('encoding', 'utf-8')
+    last_token = None
+    call_stack = -1
+    dotted = any('.' in kw for kw in keywords)
+    for token in tokenize(
+        fileobj.read().decode(encoding),
+        jsx=options.get("jsx", True),
+        template_string=options.get("template_string", True),
+        dotted=dotted,
+        lineno=lineno,
+    ):
+        if (  # Turn keyword`foo` expressions into keyword("foo") calls:
+            funcname and  # have a keyword...
+            (last_token and last_token.type == 'name') and  # we've seen nothing after the keyword...
+            token.type == 'template_string'  # this is a template string
+        ):
+            message_lineno = token.lineno
+            messages = [unquote_string(token.value)]
+            call_stack = 0
+            token = Token('operator', ')', token.lineno)
+
+        if options.get('parse_template_string') and not funcname and token.type == 'template_string':
+            yield from parse_template_string(token.value, keywords, comment_tags, options, token.lineno)
+
+        elif token.type == 'operator' and token.value == '(':
+            if funcname:
+                message_lineno = token.lineno
+                call_stack += 1
+
+        elif call_stack == -1 and token.type == 'linecomment':
+            value = token.value[2:].strip()
+            if translator_comments and \
+               translator_comments[-1][0] == token.lineno - 1:
+                translator_comments.append((token.lineno, value))
+                continue
+
+            for comment_tag in comment_tags:
+                if value.startswith(comment_tag):
+                    translator_comments.append((token.lineno, value.strip()))
+                    break
+
+        elif token.type == 'multilinecomment':
+            # only one multi-line comment may precede a translation
+            translator_comments = []
+            value = token.value[2:-2].strip()
+            for comment_tag in comment_tags:
+                if value.startswith(comment_tag):
+                    lines = value.splitlines()
+                    if lines:
+                        lines[0] = lines[0].strip()
+                        lines[1:] = dedent('\n'.join(lines[1:])).splitlines()
+                        for offset, line in enumerate(lines):
+                            translator_comments.append((token.lineno + offset,
+                                                        line))
+                    break
+
+        elif funcname and call_stack == 0:
+            if token.type == 'operator' and token.value == ')':
+                if last_argument is not None:
+                    messages.append(last_argument)
+                if len(messages) > 1:
+                    messages = tuple(messages)
+                elif messages:
+                    messages = messages[0]
+                else:
+                    messages = None
+
+                # Comments don't apply unless they immediately precede the
+                # message
+                if translator_comments and \
+                   translator_comments[-1][0] < message_lineno - 1:
+                    translator_comments = []
+
+                if messages is not None:
+                    yield (message_lineno, funcname, messages,
+                           [comment[1] for comment in translator_comments])
+
+                funcname = message_lineno = last_argument = None
+                concatenate_next = False
+                translator_comments = []
+                messages = []
+                call_stack = -1
+
+            elif token.type in ('string', 'template_string'):
+                new_value = unquote_string(token.value)
+                if concatenate_next:
+                    last_argument = (last_argument or '') + new_value
+                    concatenate_next = False
+                else:
+                    last_argument = new_value
+
+            elif token.type == 'operator':
+                if token.value == ',':
+                    if last_argument is not None:
+                        messages.append(last_argument)
+                        last_argument = None
+                    else:
+                        messages.append(None)
+                    concatenate_next = False
+                elif token.value == '+':
+                    concatenate_next = True
+
+        elif call_stack > 0 and token.type == 'operator' \
+                and token.value == ')':
+            call_stack -= 1
+
+        elif funcname and call_stack == -1:
+            funcname = None
+
+        elif call_stack == -1 and token.type == 'name' and \
+            token.value in keywords and \
+            (last_token is None or last_token.type != 'name' or
+             last_token.value != 'function'):
+            funcname = token.value
+
+        last_token = token
+
+
+def parse_template_string(
+    template_string: str,
+    keywords: Mapping[str, _Keyword],
+    comment_tags: Collection[str],
+    options: _JSOptions,
+    lineno: int = 1,
+) -> Generator[_ExtractionResult, None, None]:
     """Parse JavaScript template string.

     :param template_string: the template string to be parsed
@@ -328,4 +816,27 @@ def parse_template_string(template_string: str, keywords: Mapping[str,
     :param options: a dictionary of additional options (optional)
     :param lineno: starting line number (optional)
     """
-    pass
+    from babel.messages.jslexer import line_re
+    prev_character = None
+    level = 0
+    inside_str = False
+    expression_contents = ''
+    for character in template_string[1:-1]:
+        if not inside_str and character in ('"', "'", '`'):
+            inside_str = character
+        elif inside_str == character and prev_character != r'\\':
+            inside_str = False
+        if level:
+            expression_contents += character
+        if not inside_str:
+            if character == '{' and prev_character == '$':
+                level += 1
+            elif level and character == '}':
+                level -= 1
+                if level == 0 and expression_contents:
+                    expression_contents = expression_contents[0:-1]
+                    fake_file_obj = io.BytesIO(expression_contents.encode())
+                    yield from extract_javascript(fake_file_obj, keywords, comment_tags, options, lineno)
+                    lineno += len(line_re.findall(expression_contents))
+                    expression_contents = ''
+        prev_character = character
diff --git a/babel/messages/frontend.py b/babel/messages/frontend.py
index 8d98f9d..34f9e89 100644
--- a/babel/messages/frontend.py
+++ b/babel/messages/frontend.py
@@ -7,7 +7,9 @@
     :copyright: (c) 2013-2023 by the Babel Team.
     :license: BSD, see LICENSE for more details.
 """
+
 from __future__ import annotations
+
 import datetime
 import fnmatch
 import logging
@@ -21,14 +23,21 @@ from collections import OrderedDict
 from configparser import RawConfigParser
 from io import StringIO
 from typing import Iterable
+
 from babel import Locale, localedata
 from babel import __version__ as VERSION
 from babel.core import UnknownLocaleError
 from babel.messages.catalog import DEFAULT_HEADER, Catalog
-from babel.messages.extract import DEFAULT_KEYWORDS, DEFAULT_MAPPING, check_and_call_extract_file, extract_from_dir
+from babel.messages.extract import (
+    DEFAULT_KEYWORDS,
+    DEFAULT_MAPPING,
+    check_and_call_extract_file,
+    extract_from_dir,
+)
 from babel.messages.mofile import write_mo
 from babel.messages.pofile import read_po, write_po
 from babel.util import LOCALTZ
+
 log = logging.getLogger('babel')


@@ -70,18 +79,54 @@ def listify_value(arg, split=None):
     :param split: The argument to pass to `str.split()`.
     :return:
     """
-    pass
+    out = []
+
+    if not isinstance(arg, (list, tuple)):
+        arg = [arg]
+
+    for val in arg:
+        if val is None:
+            continue
+        if isinstance(val, (list, tuple)):
+            out.extend(listify_value(val, split=split))
+            continue
+        out.extend(s.strip() for s in str(val).split(split))
+    assert all(isinstance(val, str) for val in out)
+    return out


 class CommandMixin:
+    # This class is a small shim between Distutils commands and
+    # optparse option parsing in the frontend command line.
+
+    #: Option name to be input as `args` on the script command line.
     as_args = None
+
+    #: Options which allow multiple values.
+    #: This is used by the `optparse` transmogrification code.
     multiple_value_options = ()
+
+    #: Options which are booleans.
+    #: This is used by the `optparse` transmogrification code.
+    # (This is actually used by distutils code too, but is never
+    # declared in the base class.)
     boolean_options = ()
+
+    #: Option aliases, to retain standalone command compatibility.
+    #: Distutils does not support option aliases, but optparse does.
+    #: This maps the distutils argument name to an iterable of aliases
+    #: that are usable with optparse.
     option_aliases = {}
+
+    #: Choices for options that needed to be restricted to specific
+    #: list of choices.
     option_choices = {}
+
+    #: Log object. To allow replacement in the script command line runner.
     log = log

     def __init__(self, dist=None):
+        # A less strict version of distutils' `__init__`.
         self.distribution = dist
         self.initialize_options()
         self._dry_run = None
@@ -90,113 +135,702 @@ class CommandMixin:
         self.help = 0
         self.finalized = 0

+    def initialize_options(self):
+        pass
+
+    def ensure_finalized(self):
+        if not self.finalized:
+            self.finalize_options()
+        self.finalized = 1
+
+    def finalize_options(self):
+        raise RuntimeError(
+            f"abstract method -- subclass {self.__class__} must override",
+        )
+

 class CompileCatalog(CommandMixin):
     description = 'compile message catalogs to binary MO files'
-    user_options = [('domain=', 'D',
-        "domains of PO files (space separated list, default 'messages')"),
+    user_options = [
+        ('domain=', 'D',
+         "domains of PO files (space separated list, default 'messages')"),
         ('directory=', 'd',
-        'path to base directory containing the catalogs'), ('input-file=',
-        'i', 'name of the input file'), ('output-file=', 'o',
-        "name of the output file (default '<output_dir>/<locale>/LC_MESSAGES/<domain>.mo')"
-        ), ('locale=', 'l', 'locale of the catalog to compile'), (
-        'use-fuzzy', 'f', 'also include fuzzy translations'), ('statistics',
-        None, 'print statistics about translations')]
+         'path to base directory containing the catalogs'),
+        ('input-file=', 'i',
+         'name of the input file'),
+        ('output-file=', 'o',
+         "name of the output file (default "
+         "'<output_dir>/<locale>/LC_MESSAGES/<domain>.mo')"),
+        ('locale=', 'l',
+         'locale of the catalog to compile'),
+        ('use-fuzzy', 'f',
+         'also include fuzzy translations'),
+        ('statistics', None,
+         'print statistics about translations'),
+    ]
     boolean_options = ['use-fuzzy', 'statistics']

+    def initialize_options(self):
+        self.domain = 'messages'
+        self.directory = None
+        self.input_file = None
+        self.output_file = None
+        self.locale = None
+        self.use_fuzzy = False
+        self.statistics = False
+
+    def finalize_options(self):
+        self.domain = listify_value(self.domain)
+        if not self.input_file and not self.directory:
+            raise OptionError('you must specify either the input file or the base directory')
+        if not self.output_file and not self.directory:
+            raise OptionError('you must specify either the output file or the base directory')
+
+    def run(self):
+        n_errors = 0
+        for domain in self.domain:
+            for errors in self._run_domain(domain).values():
+                n_errors += len(errors)
+        if n_errors:
+            self.log.error('%d errors encountered.', n_errors)
+        return (1 if n_errors else 0)
+
+    def _run_domain(self, domain):
+        po_files = []
+        mo_files = []
+
+        if not self.input_file:
+            if self.locale:
+                po_files.append((self.locale,
+                                 os.path.join(self.directory, self.locale,
+                                              'LC_MESSAGES',
+                                              f"{domain}.po")))
+                mo_files.append(os.path.join(self.directory, self.locale,
+                                             'LC_MESSAGES',
+                                             f"{domain}.mo"))
+            else:
+                for locale in os.listdir(self.directory):
+                    po_file = os.path.join(self.directory, locale,
+                                           'LC_MESSAGES', f"{domain}.po")
+                    if os.path.exists(po_file):
+                        po_files.append((locale, po_file))
+                        mo_files.append(os.path.join(self.directory, locale,
+                                                     'LC_MESSAGES',
+                                                     f"{domain}.mo"))
+        else:
+            po_files.append((self.locale, self.input_file))
+            if self.output_file:
+                mo_files.append(self.output_file)
+            else:
+                mo_files.append(os.path.join(self.directory, self.locale,
+                                             'LC_MESSAGES',
+                                             f"{domain}.mo"))
+
+        if not po_files:
+            raise OptionError('no message catalogs found')
+
+        catalogs_and_errors = {}
+
+        for idx, (locale, po_file) in enumerate(po_files):
+            mo_file = mo_files[idx]
+            with open(po_file, 'rb') as infile:
+                catalog = read_po(infile, locale)
+
+            if self.statistics:
+                translated = 0
+                for message in list(catalog)[1:]:
+                    if message.string:
+                        translated += 1
+                percentage = 0
+                if len(catalog):
+                    percentage = translated * 100 // len(catalog)
+                self.log.info(
+                    '%d of %d messages (%d%%) translated in %s',
+                    translated, len(catalog), percentage, po_file,
+                )
+
+            if catalog.fuzzy and not self.use_fuzzy:
+                self.log.info('catalog %s is marked as fuzzy, skipping', po_file)
+                continue
+
+            catalogs_and_errors[catalog] = catalog_errors = list(catalog.check())
+            for message, errors in catalog_errors:
+                for error in errors:
+                    self.log.error(
+                        'error: %s:%d: %s', po_file, message.lineno, error,
+                    )
+
+            self.log.info('compiling catalog %s to %s', po_file, mo_file)
+
+            with open(mo_file, 'wb') as outfile:
+                write_mo(outfile, catalog, use_fuzzy=self.use_fuzzy)
+
+        return catalogs_and_errors
+

 def _make_directory_filter(ignore_patterns):
     """
     Build a directory_filter function based on a list of ignore patterns.
     """
-    pass
+
+    def cli_directory_filter(dirname):
+        basename = os.path.basename(dirname)
+        return not any(
+            fnmatch.fnmatch(basename, ignore_pattern)
+            for ignore_pattern
+            in ignore_patterns
+        )
+
+    return cli_directory_filter


 class ExtractMessages(CommandMixin):
     description = 'extract localizable strings from the project code'
-    user_options = [('charset=', None,
-        'charset to use in the output file (default "utf-8")'), (
-        'keywords=', 'k',
-        'space-separated list of keywords to look for in addition to the defaults (may be repeated multiple times)'
-        ), ('no-default-keywords', None,
-        'do not include the default keywords'), ('mapping-file=', 'F',
-        'path to the mapping configuration file'), ('no-location', None,
-        'do not include location comments with filename and line number'),
+    user_options = [
+        ('charset=', None,
+         'charset to use in the output file (default "utf-8")'),
+        ('keywords=', 'k',
+         'space-separated list of keywords to look for in addition to the '
+         'defaults (may be repeated multiple times)'),
+        ('no-default-keywords', None,
+         'do not include the default keywords'),
+        ('mapping-file=', 'F',
+         'path to the mapping configuration file'),
+        ('no-location', None,
+         'do not include location comments with filename and line number'),
         ('add-location=', None,
-        'location lines format. If it is not given or "full", it generates the lines with both file name and line number. If it is "file", the line number part is omitted. If it is "never", it completely suppresses the lines (same as --no-location).'
-        ), ('omit-header', None, 'do not include msgid "" entry in header'),
-        ('output-file=', 'o', 'name of the output file'), ('width=', 'w',
-        'set output line width (default 76)'), ('no-wrap', None,
-        'do not break long message lines, longer than the output line width, into several lines'
-        ), ('sort-output', None, 'generate sorted output (default False)'),
+         'location lines format. If it is not given or "full", it generates '
+         'the lines with both file name and line number. If it is "file", '
+         'the line number part is omitted. If it is "never", it completely '
+         'suppresses the lines (same as --no-location).'),
+        ('omit-header', None,
+         'do not include msgid "" entry in header'),
+        ('output-file=', 'o',
+         'name of the output file'),
+        ('width=', 'w',
+         'set output line width (default 76)'),
+        ('no-wrap', None,
+         'do not break long message lines, longer than the output line width, '
+         'into several lines'),
+        ('sort-output', None,
+         'generate sorted output (default False)'),
         ('sort-by-file', None,
-        'sort output by file location (default False)'), (
-        'msgid-bugs-address=', None, 'set report address for msgid'), (
-        'copyright-holder=', None, 'set copyright holder in output'), (
-        'project=', None, 'set project name in output'), ('version=', None,
-        'set project version in output'), ('add-comments=', 'c',
-        'place comment block with TAG (or those preceding keyword lines) in output file. Separate multiple TAGs with commas(,)'
-        ), ('strip-comments', 's',
-        'strip the comment TAGs from the comments.'), ('input-paths=', None,
-        'files or directories that should be scanned for messages. Separate multiple files or directories with commas(,)'
-        ), ('input-dirs=', None,
-        'alias for input-paths (does allow files as well as directories).'),
+         'sort output by file location (default False)'),
+        ('msgid-bugs-address=', None,
+         'set report address for msgid'),
+        ('copyright-holder=', None,
+         'set copyright holder in output'),
+        ('project=', None,
+         'set project name in output'),
+        ('version=', None,
+         'set project version in output'),
+        ('add-comments=', 'c',
+         'place comment block with TAG (or those preceding keyword lines) in '
+         'output file. Separate multiple TAGs with commas(,)'),  # TODO: Support repetition of this argument
+        ('strip-comments', 's',
+         'strip the comment TAGs from the comments.'),
+        ('input-paths=', None,
+         'files or directories that should be scanned for messages. Separate multiple '
+         'files or directories with commas(,)'),  # TODO: Support repetition of this argument
+        ('input-dirs=', None,  # TODO (3.x): Remove me.
+         'alias for input-paths (does allow files as well as directories).'),
         ('ignore-dirs=', None,
-        'Patterns for directories to ignore when scanning for messages. Separate multiple patterns with spaces (default ".* ._")'
-        ), ('header-comment=', None, 'header comment for the catalog'), (
-        'last-translator=', None,
-        'set the name and email of the last translator in output')]
-    boolean_options = ['no-default-keywords', 'no-location', 'omit-header',
-        'no-wrap', 'sort-output', 'sort-by-file', 'strip-comments']
+         'Patterns for directories to ignore when scanning for messages. '
+         'Separate multiple patterns with spaces (default ".* ._")'),
+        ('header-comment=', None,
+         'header comment for the catalog'),
+        ('last-translator=', None,
+         'set the name and email of the last translator in output'),
+    ]
+    boolean_options = [
+        'no-default-keywords', 'no-location', 'omit-header', 'no-wrap',
+        'sort-output', 'sort-by-file', 'strip-comments',
+    ]
     as_args = 'input-paths'
-    multiple_value_options = 'add-comments', 'keywords', 'ignore-dirs'
-    option_aliases = {'keywords': ('--keyword',), 'mapping-file': (
-        '--mapping',), 'output-file': ('--output',), 'strip-comments': (
-        '--strip-comment-tags',), 'last-translator': ('--last-translator',)}
-    option_choices = {'add-location': ('full', 'file', 'never')}
+    multiple_value_options = (
+        'add-comments',
+        'keywords',
+        'ignore-dirs',
+    )
+    option_aliases = {
+        'keywords': ('--keyword',),
+        'mapping-file': ('--mapping',),
+        'output-file': ('--output',),
+        'strip-comments': ('--strip-comment-tags',),
+        'last-translator': ('--last-translator',),
+    }
+    option_choices = {
+        'add-location': ('full', 'file', 'never'),
+    }
+
+    def initialize_options(self):
+        self.charset = 'utf-8'
+        self.keywords = None
+        self.no_default_keywords = False
+        self.mapping_file = None
+        self.no_location = False
+        self.add_location = None
+        self.omit_header = False
+        self.output_file = None
+        self.input_dirs = None
+        self.input_paths = None
+        self.width = None
+        self.no_wrap = False
+        self.sort_output = False
+        self.sort_by_file = False
+        self.msgid_bugs_address = None
+        self.copyright_holder = None
+        self.project = None
+        self.version = None
+        self.add_comments = None
+        self.strip_comments = False
+        self.include_lineno = True
+        self.ignore_dirs = None
+        self.header_comment = None
+        self.last_translator = None
+
+    def finalize_options(self):
+        if self.input_dirs:
+            if not self.input_paths:
+                self.input_paths = self.input_dirs
+            else:
+                raise OptionError(
+                    'input-dirs and input-paths are mutually exclusive',
+                )
+
+        keywords = {} if self.no_default_keywords else DEFAULT_KEYWORDS.copy()
+
+        keywords.update(parse_keywords(listify_value(self.keywords)))
+
+        self.keywords = keywords
+
+        if not self.keywords:
+            raise OptionError(
+                'you must specify new keywords if you disable the default ones',
+            )
+
+        if not self.output_file:
+            raise OptionError('no output file specified')
+        if self.no_wrap and self.width:
+            raise OptionError(
+                "'--no-wrap' and '--width' are mutually exclusive",
+            )
+        if not self.no_wrap and not self.width:
+            self.width = 76
+        elif self.width is not None:
+            self.width = int(self.width)
+
+        if self.sort_output and self.sort_by_file:
+            raise OptionError(
+                "'--sort-output' and '--sort-by-file' are mutually exclusive",
+            )
+
+        if self.input_paths:
+            if isinstance(self.input_paths, str):
+                self.input_paths = re.split(r',\s*', self.input_paths)
+        elif self.distribution is not None:
+            self.input_paths = dict.fromkeys([
+                k.split('.', 1)[0]
+                for k in (self.distribution.packages or ())
+            ]).keys()
+        else:
+            self.input_paths = []
+
+        if not self.input_paths:
+            raise OptionError("no input files or directories specified")
+
+        for path in self.input_paths:
+            if not os.path.exists(path):
+                raise OptionError(f"Input path: {path} does not exist")
+
+        self.add_comments = listify_value(self.add_comments or (), ",")
+
+        if self.distribution:
+            if not self.project:
+                self.project = self.distribution.get_name()
+            if not self.version:
+                self.version = self.distribution.get_version()
+
+        if self.add_location == 'never':
+            self.no_location = True
+        elif self.add_location == 'file':
+            self.include_lineno = False
+
+        ignore_dirs = listify_value(self.ignore_dirs)
+        if ignore_dirs:
+            self.directory_filter = _make_directory_filter(self.ignore_dirs)
+        else:
+            self.directory_filter = None
+
+    def _build_callback(self, path: str):
+        def callback(filename: str, method: str, options: dict):
+            if method == 'ignore':
+                return
+
+            # If we explicitly provide a full filepath, just use that.
+            # Otherwise, path will be the directory path and filename
+            # is the relative path from that dir to the file.
+            # So we can join those to get the full filepath.
+            if os.path.isfile(path):
+                filepath = path
+            else:
+                filepath = os.path.normpath(os.path.join(path, filename))
+
+            optstr = ''
+            if options:
+                opt_values = ", ".join(f'{k}="{v}"' for k, v in options.items())
+                optstr = f" ({opt_values})"
+            self.log.info('extracting messages from %s%s', filepath, optstr)
+
+        return callback
+
+    def run(self):
+        mappings = self._get_mappings()
+        with open(self.output_file, 'wb') as outfile:
+            catalog = Catalog(project=self.project,
+                              version=self.version,
+                              msgid_bugs_address=self.msgid_bugs_address,
+                              copyright_holder=self.copyright_holder,
+                              charset=self.charset,
+                              header_comment=(self.header_comment or DEFAULT_HEADER),
+                              last_translator=self.last_translator)
+
+            for path, method_map, options_map in mappings:
+                callback = self._build_callback(path)
+                if os.path.isfile(path):
+                    current_dir = os.getcwd()
+                    extracted = check_and_call_extract_file(
+                        path, method_map, options_map,
+                        callback, self.keywords, self.add_comments,
+                        self.strip_comments, current_dir,
+                    )
+                else:
+                    extracted = extract_from_dir(
+                        path, method_map, options_map,
+                        keywords=self.keywords,
+                        comment_tags=self.add_comments,
+                        callback=callback,
+                        strip_comment_tags=self.strip_comments,
+                        directory_filter=self.directory_filter,
+                    )
+                for filename, lineno, message, comments, context in extracted:
+                    if os.path.isfile(path):
+                        filepath = filename  # already normalized
+                    else:
+                        filepath = os.path.normpath(os.path.join(path, filename))
+
+                    catalog.add(message, None, [(filepath, lineno)],
+                                auto_comments=comments, context=context)
+
+            self.log.info('writing PO template file to %s', self.output_file)
+            write_po(outfile, catalog, width=self.width,
+                     no_location=self.no_location,
+                     omit_header=self.omit_header,
+                     sort_output=self.sort_output,
+                     sort_by_file=self.sort_by_file,
+                     include_lineno=self.include_lineno)
+
+    def _get_mappings(self):
+        mappings = []
+
+        if self.mapping_file:
+            with open(self.mapping_file) as fileobj:
+                method_map, options_map = parse_mapping(fileobj)
+            for path in self.input_paths:
+                mappings.append((path, method_map, options_map))
+
+        elif getattr(self.distribution, 'message_extractors', None):
+            message_extractors = self.distribution.message_extractors
+            for path, mapping in message_extractors.items():
+                if isinstance(mapping, str):
+                    method_map, options_map = parse_mapping(StringIO(mapping))
+                else:
+                    method_map, options_map = [], {}
+                    for pattern, method, options in mapping:
+                        method_map.append((pattern, method))
+                        options_map[pattern] = options or {}
+                mappings.append((path, method_map, options_map))
+
+        else:
+            for path in self.input_paths:
+                mappings.append((path, DEFAULT_MAPPING, {}))
+
+        return mappings


 class InitCatalog(CommandMixin):
     description = 'create a new catalog based on a POT file'
-    user_options = [('domain=', 'D',
-        "domain of PO file (default 'messages')"), ('input-file=', 'i',
-        'name of the input file'), ('output-dir=', 'd',
-        'path to output directory'), ('output-file=', 'o',
-        "name of the output file (default '<output_dir>/<locale>/LC_MESSAGES/<domain>.po')"
-        ), ('locale=', 'l', 'locale for the new localized catalog'), (
-        'width=', 'w', 'set output line width (default 76)'), ('no-wrap',
-        None,
-        'do not break long message lines, longer than the output line width, into several lines'
-        )]
+    user_options = [
+        ('domain=', 'D',
+         "domain of PO file (default 'messages')"),
+        ('input-file=', 'i',
+         'name of the input file'),
+        ('output-dir=', 'd',
+         'path to output directory'),
+        ('output-file=', 'o',
+         "name of the output file (default "
+         "'<output_dir>/<locale>/LC_MESSAGES/<domain>.po')"),
+        ('locale=', 'l',
+         'locale for the new localized catalog'),
+        ('width=', 'w',
+         'set output line width (default 76)'),
+        ('no-wrap', None,
+         'do not break long message lines, longer than the output line width, '
+         'into several lines'),
+    ]
     boolean_options = ['no-wrap']

+    def initialize_options(self):
+        self.output_dir = None
+        self.output_file = None
+        self.input_file = None
+        self.locale = None
+        self.domain = 'messages'
+        self.no_wrap = False
+        self.width = None
+
+    def finalize_options(self):
+        if not self.input_file:
+            raise OptionError('you must specify the input file')
+
+        if not self.locale:
+            raise OptionError('you must provide a locale for the new catalog')
+        try:
+            self._locale = Locale.parse(self.locale)
+        except UnknownLocaleError as e:
+            raise OptionError(e) from e
+
+        if not self.output_file and not self.output_dir:
+            raise OptionError('you must specify the output directory')
+        if not self.output_file:
+            self.output_file = os.path.join(self.output_dir, self.locale,
+                                            'LC_MESSAGES', f"{self.domain}.po")
+
+        if not os.path.exists(os.path.dirname(self.output_file)):
+            os.makedirs(os.path.dirname(self.output_file))
+        if self.no_wrap and self.width:
+            raise OptionError("'--no-wrap' and '--width' are mutually exclusive")
+        if not self.no_wrap and not self.width:
+            self.width = 76
+        elif self.width is not None:
+            self.width = int(self.width)
+
+    def run(self):
+        self.log.info(
+            'creating catalog %s based on %s', self.output_file, self.input_file,
+        )
+
+        with open(self.input_file, 'rb') as infile:
+            # Although reading from the catalog template, read_po must be fed
+            # the locale in order to correctly calculate plurals
+            catalog = read_po(infile, locale=self.locale)
+
+        catalog.locale = self._locale
+        catalog.revision_date = datetime.datetime.now(LOCALTZ)
+        catalog.fuzzy = False
+
+        with open(self.output_file, 'wb') as outfile:
+            write_po(outfile, catalog, width=self.width)
+

 class UpdateCatalog(CommandMixin):
     description = 'update message catalogs from a POT file'
-    user_options = [('domain=', 'D',
-        "domain of PO file (default 'messages')"), ('input-file=', 'i',
-        'name of the input file'), ('output-dir=', 'd',
-        'path to base directory containing the catalogs'), ('output-file=',
-        'o',
-        "name of the output file (default '<output_dir>/<locale>/LC_MESSAGES/<domain>.po')"
-        ), ('omit-header', None, 'do not include msgid  entry in header'),
-        ('locale=', 'l', 'locale of the catalog to compile'), ('width=',
-        'w', 'set output line width (default 76)'), ('no-wrap', None,
-        'do not break long message lines, longer than the output line width, into several lines'
-        ), ('ignore-obsolete=', None,
-        'whether to omit obsolete messages from the output'), (
-        'init-missing=', None,
-        'if any output files are missing, initialize them first'), (
-        'no-fuzzy-matching', 'N', 'do not use fuzzy matching'), (
-        'update-header-comment', None, 'update target header comment'), (
-        'previous', None, 'keep previous msgids of translated messages'), (
-        'check=', None,
-        "don't update the catalog, just return the status. Return code 0 means nothing would change. Return code 1 means that the catalog would be updated"
-        ), ('ignore-pot-creation-date=', None,
-        'ignore changes to POT-Creation-Date when updating or checking')]
-    boolean_options = ['omit-header', 'no-wrap', 'ignore-obsolete',
-        'init-missing', 'no-fuzzy-matching', 'previous',
-        'update-header-comment', 'check', 'ignore-pot-creation-date']
+    user_options = [
+        ('domain=', 'D',
+         "domain of PO file (default 'messages')"),
+        ('input-file=', 'i',
+         'name of the input file'),
+        ('output-dir=', 'd',
+         'path to base directory containing the catalogs'),
+        ('output-file=', 'o',
+         "name of the output file (default "
+         "'<output_dir>/<locale>/LC_MESSAGES/<domain>.po')"),
+        ('omit-header', None,
+         "do not include msgid "" entry in header"),
+        ('locale=', 'l',
+         'locale of the catalog to compile'),
+        ('width=', 'w',
+         'set output line width (default 76)'),
+        ('no-wrap', None,
+         'do not break long message lines, longer than the output line width, '
+         'into several lines'),
+        ('ignore-obsolete=', None,
+         'whether to omit obsolete messages from the output'),
+        ('init-missing=', None,
+         'if any output files are missing, initialize them first'),
+        ('no-fuzzy-matching', 'N',
+         'do not use fuzzy matching'),
+        ('update-header-comment', None,
+         'update target header comment'),
+        ('previous', None,
+         'keep previous msgids of translated messages'),
+        ('check=', None,
+         'don\'t update the catalog, just return the status. Return code 0 '
+         'means nothing would change. Return code 1 means that the catalog '
+         'would be updated'),
+        ('ignore-pot-creation-date=', None,
+         'ignore changes to POT-Creation-Date when updating or checking'),
+    ]
+    boolean_options = [
+        'omit-header', 'no-wrap', 'ignore-obsolete', 'init-missing',
+        'no-fuzzy-matching', 'previous', 'update-header-comment',
+        'check', 'ignore-pot-creation-date',
+    ]
+
+    def initialize_options(self):
+        self.domain = 'messages'
+        self.input_file = None
+        self.output_dir = None
+        self.output_file = None
+        self.omit_header = False
+        self.locale = None
+        self.width = None
+        self.no_wrap = False
+        self.ignore_obsolete = False
+        self.init_missing = False
+        self.no_fuzzy_matching = False
+        self.update_header_comment = False
+        self.previous = False
+        self.check = False
+        self.ignore_pot_creation_date = False
+
+    def finalize_options(self):
+        if not self.input_file:
+            raise OptionError('you must specify the input file')
+        if not self.output_file and not self.output_dir:
+            raise OptionError('you must specify the output file or directory')
+        if self.output_file and not self.locale:
+            raise OptionError('you must specify the locale')
+
+        if self.init_missing:
+            if not self.locale:
+                raise OptionError(
+                    'you must specify the locale for '
+                    'the init-missing option to work',
+                )
+
+            try:
+                self._locale = Locale.parse(self.locale)
+            except UnknownLocaleError as e:
+                raise OptionError(e) from e
+        else:
+            self._locale = None
+
+        if self.no_wrap and self.width:
+            raise OptionError("'--no-wrap' and '--width' are mutually exclusive")
+        if not self.no_wrap and not self.width:
+            self.width = 76
+        elif self.width is not None:
+            self.width = int(self.width)
+        if self.no_fuzzy_matching and self.previous:
+            self.previous = False
+
+    def run(self):
+        check_status = {}
+        po_files = []
+        if not self.output_file:
+            if self.locale:
+                po_files.append((self.locale,
+                                 os.path.join(self.output_dir, self.locale,
+                                              'LC_MESSAGES',
+                                              f"{self.domain}.po")))
+            else:
+                for locale in os.listdir(self.output_dir):
+                    po_file = os.path.join(self.output_dir, locale,
+                                           'LC_MESSAGES',
+                                           f"{self.domain}.po")
+                    if os.path.exists(po_file):
+                        po_files.append((locale, po_file))
+        else:
+            po_files.append((self.locale, self.output_file))
+
+        if not po_files:
+            raise OptionError('no message catalogs found')
+
+        domain = self.domain
+        if not domain:
+            domain = os.path.splitext(os.path.basename(self.input_file))[0]
+
+        with open(self.input_file, 'rb') as infile:
+            template = read_po(infile)
+
+        for locale, filename in po_files:
+            if self.init_missing and not os.path.exists(filename):
+                if self.check:
+                    check_status[filename] = False
+                    continue
+                self.log.info(
+                    'creating catalog %s based on %s', filename, self.input_file,
+                )
+
+                with open(self.input_file, 'rb') as infile:
+                    # Although reading from the catalog template, read_po must
+                    # be fed the locale in order to correctly calculate plurals
+                    catalog = read_po(infile, locale=self.locale)
+
+                catalog.locale = self._locale
+                catalog.revision_date = datetime.datetime.now(LOCALTZ)
+                catalog.fuzzy = False
+
+                with open(filename, 'wb') as outfile:
+                    write_po(outfile, catalog)
+
+            self.log.info('updating catalog %s based on %s', filename, self.input_file)
+            with open(filename, 'rb') as infile:
+                catalog = read_po(infile, locale=locale, domain=domain)
+
+            catalog.update(
+                template, self.no_fuzzy_matching,
+                update_header_comment=self.update_header_comment,
+                update_creation_date=not self.ignore_pot_creation_date,
+            )
+
+            tmpname = os.path.join(os.path.dirname(filename),
+                                   tempfile.gettempprefix() +
+                                   os.path.basename(filename))
+            try:
+                with open(tmpname, 'wb') as tmpfile:
+                    write_po(tmpfile, catalog,
+                             omit_header=self.omit_header,
+                             ignore_obsolete=self.ignore_obsolete,
+                             include_previous=self.previous, width=self.width)
+            except Exception:
+                os.remove(tmpname)
+                raise
+
+            if self.check:
+                with open(filename, "rb") as origfile:
+                    original_catalog = read_po(origfile)
+                with open(tmpname, "rb") as newfile:
+                    updated_catalog = read_po(newfile)
+                updated_catalog.revision_date = original_catalog.revision_date
+                check_status[filename] = updated_catalog.is_identical(original_catalog)
+                os.remove(tmpname)
+                continue
+
+            try:
+                os.rename(tmpname, filename)
+            except OSError:
+                # We're probably on Windows, which doesn't support atomic
+                # renames, at least not through Python
+                # If the error is in fact due to a permissions problem, that
+                # same error is going to be raised from one of the following
+                # operations
+                os.remove(filename)
+                shutil.copy(tmpname, filename)
+                os.remove(tmpname)
+
+        if self.check:
+            for filename, up_to_date in check_status.items():
+                if up_to_date:
+                    self.log.info('Catalog %s is up to date.', filename)
+                else:
+                    self.log.warning('Catalog %s is out of date.', filename)
+            if not all(check_status.values()):
+                raise BaseError("Some catalogs are out of date.")
+            else:
+                self.log.info("All the catalogs are up-to-date.")
+            return


 class CommandLineInterface:
@@ -205,30 +839,144 @@ class CommandLineInterface:
     This class provides a simple command-line interface to the message
     extraction and PO file generation functionality.
     """
+
     usage = '%%prog %s [options] %s'
     version = f'%prog {VERSION}'
-    commands = {'compile': 'compile message catalogs to MO files',
-        'extract':
-        'extract messages from source files and generate a POT file',
-        'init': 'create new message catalogs from a POT file', 'update':
-        'update existing message catalogs from a POT file'}
-    command_classes = {'compile': CompileCatalog, 'extract':
-        ExtractMessages, 'init': InitCatalog, 'update': UpdateCatalog}
-    log = None
+    commands = {
+        'compile': 'compile message catalogs to MO files',
+        'extract': 'extract messages from source files and generate a POT file',
+        'init': 'create new message catalogs from a POT file',
+        'update': 'update existing message catalogs from a POT file',
+    }
+
+    command_classes = {
+        'compile': CompileCatalog,
+        'extract': ExtractMessages,
+        'init': InitCatalog,
+        'update': UpdateCatalog,
+    }
+
+    log = None  # Replaced on instance level

     def run(self, argv=None):
         """Main entry point of the command-line interface.

         :param argv: list of arguments passed on the command-line
         """
-        pass
+
+        if argv is None:
+            argv = sys.argv
+
+        self.parser = optparse.OptionParser(usage=self.usage % ('command', '[args]'),
+                                            version=self.version)
+        self.parser.disable_interspersed_args()
+        self.parser.print_help = self._help
+        self.parser.add_option('--list-locales', dest='list_locales',
+                               action='store_true',
+                               help="print all known locales and exit")
+        self.parser.add_option('-v', '--verbose', action='store_const',
+                               dest='loglevel', const=logging.DEBUG,
+                               help='print as much as possible')
+        self.parser.add_option('-q', '--quiet', action='store_const',
+                               dest='loglevel', const=logging.ERROR,
+                               help='print as little as possible')
+        self.parser.set_defaults(list_locales=False, loglevel=logging.INFO)
+
+        options, args = self.parser.parse_args(argv[1:])
+
+        self._configure_logging(options.loglevel)
+        if options.list_locales:
+            identifiers = localedata.locale_identifiers()
+            id_width = max(len(identifier) for identifier in identifiers) + 1
+            for identifier in sorted(identifiers):
+                locale = Locale.parse(identifier)
+                print(f"{identifier:<{id_width}} {locale.english_name}")
+            return 0
+
+        if not args:
+            self.parser.error('no valid command or option passed. '
+                              'Try the -h/--help option for more information.')
+
+        cmdname = args[0]
+        if cmdname not in self.commands:
+            self.parser.error(f'unknown command "{cmdname}"')
+
+        cmdinst = self._configure_command(cmdname, args[1:])
+        return cmdinst.run()
+
+    def _configure_logging(self, loglevel):
+        self.log = log
+        self.log.setLevel(loglevel)
+        # Don't add a new handler for every instance initialization (#227), this
+        # would cause duplicated output when the CommandLineInterface as an
+        # normal Python class.
+        if self.log.handlers:
+            handler = self.log.handlers[0]
+        else:
+            handler = logging.StreamHandler()
+            self.log.addHandler(handler)
+        handler.setLevel(loglevel)
+        formatter = logging.Formatter('%(message)s')
+        handler.setFormatter(formatter)
+
+    def _help(self):
+        print(self.parser.format_help())
+        print("commands:")
+        cmd_width = max(8, max(len(command) for command in self.commands) + 1)
+        for name, description in sorted(self.commands.items()):
+            print(f"  {name:<{cmd_width}} {description}")

     def _configure_command(self, cmdname, argv):
         """
         :type cmdname: str
         :type argv: list[str]
         """
-        pass
+        cmdclass = self.command_classes[cmdname]
+        cmdinst = cmdclass()
+        if self.log:
+            cmdinst.log = self.log  # Use our logger, not distutils'.
+        assert isinstance(cmdinst, CommandMixin)
+        cmdinst.initialize_options()
+
+        parser = optparse.OptionParser(
+            usage=self.usage % (cmdname, ''),
+            description=self.commands[cmdname],
+        )
+        as_args = getattr(cmdclass, "as_args", ())
+        for long, short, help in cmdclass.user_options:
+            name = long.strip("=")
+            default = getattr(cmdinst, name.replace("-", "_"))
+            strs = [f"--{name}"]
+            if short:
+                strs.append(f"-{short}")
+            strs.extend(cmdclass.option_aliases.get(name, ()))
+            choices = cmdclass.option_choices.get(name, None)
+            if name == as_args:
+                parser.usage += f"<{name}>"
+            elif name in cmdclass.boolean_options:
+                parser.add_option(*strs, action="store_true", help=help)
+            elif name in cmdclass.multiple_value_options:
+                parser.add_option(*strs, action="append", help=help, choices=choices)
+            else:
+                parser.add_option(*strs, help=help, default=default, choices=choices)
+        options, args = parser.parse_args(argv)
+
+        if as_args:
+            setattr(options, as_args.replace('-', '_'), args)
+
+        for key, value in vars(options).items():
+            setattr(cmdinst, key, value)
+
+        try:
+            cmdinst.ensure_finalized()
+        except OptionError as err:
+            parser.error(str(err))
+
+        return cmdinst
+
+
+def main():
+    return CommandLineInterface().run(sys.argv)


 def parse_mapping(fileobj, filename=None):
@@ -280,10 +1028,45 @@ def parse_mapping(fileobj, filename=None):
                     text to parse
     :see: `extract_from_directory`
     """
-    pass
+    extractors = {}
+    method_map = []
+    options_map = {}
+
+    parser = RawConfigParser()
+    parser._sections = OrderedDict(parser._sections)  # We need ordered sections
+    parser.read_file(fileobj, filename)
+
+    for section in parser.sections():
+        if section == 'extractors':
+            extractors = dict(parser.items(section))
+        else:
+            method, pattern = (part.strip() for part in section.split(':', 1))
+            method_map.append((pattern, method))
+            options_map[pattern] = dict(parser.items(section))
+
+    if extractors:
+        for idx, (pattern, method) in enumerate(method_map):
+            if method in extractors:
+                method = extractors[method]
+            method_map[idx] = (pattern, method)
+
+    return method_map, options_map


-def parse_keywords(strings: Iterable[str]=()):
+def _parse_spec(s: str) -> tuple[int | None, tuple[int | tuple[int, str], ...]]:
+    inds = []
+    number = None
+    for x in s.split(','):
+        if x[-1] == 't':
+            number = int(x[:-1])
+        elif x[-1] == 'c':
+            inds.append((int(x[:-1]), 'c'))
+        else:
+            inds.append(int(x))
+    return number, tuple(inds)
+
+
+def parse_keywords(strings: Iterable[str] = ()):
     """Parse keywords specifications from the given list of strings.

     >>> import pprint
@@ -312,15 +1095,34 @@ def parse_keywords(strings: Iterable[str]=()):
     messages. A ``None`` specification is equivalent to ``(1,)``, extracting the first
     argument.
     """
-    pass
+    keywords = {}
+    for string in strings:
+        if ':' in string:
+            funcname, spec_str = string.split(':')
+            number, spec = _parse_spec(spec_str)
+        else:
+            funcname = string
+            number = None
+            spec = None
+        keywords.setdefault(funcname, {})[number] = spec
+
+    # For best backwards compatibility, collapse {None: x} into x.
+    for k, v in keywords.items():
+        if set(v) == {None}:
+            keywords[k] = v[None]
+
+    return keywords


 def __getattr__(name: str):
-    if name in {'check_message_extractors', 'compile_catalog',
-        'extract_messages', 'init_catalog', 'update_catalog'}:
+    # Re-exports for backwards compatibility;
+    # `setuptools_frontend` is the canonical import location.
+    if name in {'check_message_extractors', 'compile_catalog', 'extract_messages', 'init_catalog', 'update_catalog'}:
         from babel.messages import setuptools_frontend
+
         return getattr(setuptools_frontend, name)
-    raise AttributeError(f'module {__name__!r} has no attribute {name!r}')
+
+    raise AttributeError(f"module {__name__!r} has no attribute {name!r}")


 if __name__ == '__main__':
diff --git a/babel/messages/jslexer.py b/babel/messages/jslexer.py
index a00f87d..6456bd0 100644
--- a/babel/messages/jslexer.py
+++ b/babel/messages/jslexer.py
@@ -9,24 +9,28 @@
     :license: BSD, see LICENSE for more details.
 """
 from __future__ import annotations
+
 import re
 from collections.abc import Generator
 from typing import NamedTuple
-operators: list[str] = sorted(['+', '-', '*', '%', '!=', '==', '<', '>',
-    '<=', '>=', '=', '+=', '-=', '*=', '%=', '<<', '>>', '>>>', '<<=',
-    '>>=', '>>>=', '&', '&=', '|', '|=', '&&', '||', '^', '^=', '(', ')',
-    '[', ']', '{', '}', '!', '--', '++', '~', ',', ';', '.', ':'], key=len,
-    reverse=True)
-escapes: dict[str, str] = {'b': '\x08', 'f': '\x0c', 'n': '\n', 'r': '\r',
-    't': '\t'}
-name_re = re.compile('[\\w$_][\\w\\d$_]*', re.UNICODE)
-dotted_name_re = re.compile('[\\w$_][\\w\\d$_.]*[\\w\\d$_.]', re.UNICODE)
-division_re = re.compile('/=?')
-regex_re = re.compile('/(?:[^/\\\\]*(?:\\\\.[^/\\\\]*)*)/[a-zA-Z]*', re.DOTALL)
-line_re = re.compile('(\\r\\n|\\n|\\r)')
-line_join_re = re.compile('\\\\' + line_re.pattern)
-uni_escape_re = re.compile('[a-fA-F0-9]{1,4}')
-hex_escape_re = re.compile('[a-fA-F0-9]{1,2}')
+
+operators: list[str] = sorted([
+    '+', '-', '*', '%', '!=', '==', '<', '>', '<=', '>=', '=',
+    '+=', '-=', '*=', '%=', '<<', '>>', '>>>', '<<=', '>>=',
+    '>>>=', '&', '&=', '|', '|=', '&&', '||', '^', '^=', '(', ')',
+    '[', ']', '{', '}', '!', '--', '++', '~', ',', ';', '.', ':',
+], key=len, reverse=True)
+
+escapes: dict[str, str] = {'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t'}
+
+name_re = re.compile(r'[\w$_][\w\d$_]*', re.UNICODE)
+dotted_name_re = re.compile(r'[\w$_][\w\d$_.]*[\w\d$_.]', re.UNICODE)
+division_re = re.compile(r'/=?')
+regex_re = re.compile(r'/(?:[^/\\]*(?:\\.[^/\\]*)*)/[a-zA-Z]*', re.DOTALL)
+line_re = re.compile(r'(\r\n|\n|\r)')
+line_join_re = re.compile(r'\\' + line_re.pattern)
+uni_escape_re = re.compile(r'[a-fA-F0-9]{1,4}')
+hex_escape_re = re.compile(r'[a-fA-F0-9]{1,2}')


 class Token(NamedTuple):
@@ -35,54 +39,126 @@ class Token(NamedTuple):
     lineno: int


-_rules: list[tuple[str | None, re.Pattern[str]]] = [(None, re.compile(
-    '\\s+', re.UNICODE)), (None, re.compile('<!--.*')), ('linecomment', re.
-    compile('//.*')), ('multilinecomment', re.compile('/\\*.*?\\*/', re.
-    UNICODE | re.DOTALL)), ('dotted_name', dotted_name_re), ('name',
-    name_re), ('number', re.compile(
-    """(
-        (?:0|[1-9]\\d*)
-        (\\.\\d+)?
-        ([eE][-+]?\\d+)? |
+_rules: list[tuple[str | None, re.Pattern[str]]] = [
+    (None, re.compile(r'\s+', re.UNICODE)),
+    (None, re.compile(r'<!--.*')),
+    ('linecomment', re.compile(r'//.*')),
+    ('multilinecomment', re.compile(r'/\*.*?\*/', re.UNICODE | re.DOTALL)),
+    ('dotted_name', dotted_name_re),
+    ('name', name_re),
+    ('number', re.compile(r'''(
+        (?:0|[1-9]\d*)
+        (\.\d+)?
+        ([eE][-+]?\d+)? |
         (0x[a-fA-F0-9]+)
-    )"""
-    , re.VERBOSE)), ('jsx_tag', re.compile('(?:</?[^>\\s]+|/>)', re.I)), (
-    'operator', re.compile('(%s)' % '|'.join(map(re.escape, operators)))),
-    ('template_string', re.compile('`(?:[^`\\\\]*(?:\\\\.[^`\\\\]*)*)`', re
-    .UNICODE)), ('string', re.compile(
-    """(
-        '(?:[^'\\\\]*(?:\\\\.[^'\\\\]*)*)'  |
-        "(?:[^"\\\\]*(?:\\\\.[^"\\\\]*)*)"
-    )"""
-    , re.VERBOSE | re.DOTALL))]
-
-
-def get_rules(jsx: bool, dotted: bool, template_string: bool) ->list[tuple[
-    str | None, re.Pattern[str]]]:
+    )''', re.VERBOSE)),
+    ('jsx_tag', re.compile(r'(?:</?[^>\s]+|/>)', re.I)),  # May be mangled in `get_rules`
+    ('operator', re.compile(r'(%s)' % '|'.join(map(re.escape, operators)))),
+    ('template_string', re.compile(r'''`(?:[^`\\]*(?:\\.[^`\\]*)*)`''', re.UNICODE)),
+    ('string', re.compile(r'''(
+        '(?:[^'\\]*(?:\\.[^'\\]*)*)'  |
+        "(?:[^"\\]*(?:\\.[^"\\]*)*)"
+    )''', re.VERBOSE | re.DOTALL)),
+]
+
+
+def get_rules(jsx: bool, dotted: bool, template_string: bool) -> list[tuple[str | None, re.Pattern[str]]]:
     """
     Get a tokenization rule list given the passed syntax options.

     Internal to this module.
     """
-    pass
-
-
-def indicates_division(token: Token) ->bool:
+    rules = []
+    for token_type, rule in _rules:
+        if not jsx and token_type and 'jsx' in token_type:
+            continue
+        if not template_string and token_type == 'template_string':
+            continue
+        if token_type == 'dotted_name':
+            if not dotted:
+                continue
+            token_type = 'name'
+        rules.append((token_type, rule))
+    return rules
+
+
+def indicates_division(token: Token) -> bool:
     """A helper function that helps the tokenizer to decide if the current
     token may be followed by a division operator.
     """
-    pass
+    if token.type == 'operator':
+        return token.value in (')', ']', '}', '++', '--')
+    return token.type in ('name', 'number', 'string', 'regexp')


-def unquote_string(string: str) ->str:
+def unquote_string(string: str) -> str:
     """Unquote a string with JavaScript rules.  The string has to start with
     string delimiters (``'``, ``"`` or the back-tick/grave accent (for template strings).)
     """
-    pass
-
-
-def tokenize(source: str, jsx: bool=True, dotted: bool=True,
-    template_string: bool=True, lineno: int=1) ->Generator[Token, None, None]:
+    assert string and string[0] == string[-1] and string[0] in '"\'`', \
+        'string provided is not properly delimited'
+    string = line_join_re.sub('\\1', string[1:-1])
+    result: list[str] = []
+    add = result.append
+    pos = 0
+
+    while True:
+        # scan for the next escape
+        escape_pos = string.find('\\', pos)
+        if escape_pos < 0:
+            break
+        add(string[pos:escape_pos])
+
+        # check which character is escaped
+        next_char = string[escape_pos + 1]
+        if next_char in escapes:
+            add(escapes[next_char])
+
+        # unicode escapes.  trie to consume up to four characters of
+        # hexadecimal characters and try to interpret them as unicode
+        # character point.  If there is no such character point, put
+        # all the consumed characters into the string.
+        elif next_char in 'uU':
+            escaped = uni_escape_re.match(string, escape_pos + 2)
+            if escaped is not None:
+                escaped_value = escaped.group()
+                if len(escaped_value) == 4:
+                    try:
+                        add(chr(int(escaped_value, 16)))
+                    except ValueError:
+                        pass
+                    else:
+                        pos = escape_pos + 6
+                        continue
+                add(next_char + escaped_value)
+                pos = escaped.end()
+                continue
+            else:
+                add(next_char)
+
+        # hex escapes. conversion from 2-digits hex to char is infallible
+        elif next_char in 'xX':
+            escaped = hex_escape_re.match(string, escape_pos + 2)
+            if escaped is not None:
+                escaped_value = escaped.group()
+                add(chr(int(escaped_value, 16)))
+                pos = escape_pos + 2 + len(escaped_value)
+                continue
+            else:
+                add(next_char)
+
+        # bogus escape.  Just remove the backslash.
+        else:
+            add(next_char)
+        pos = escape_pos + 2
+
+    if pos < len(string):
+        add(string[pos:])
+
+    return ''.join(result)
+
+
+def tokenize(source: str, jsx: bool = True, dotted: bool = True, template_string: bool = True, lineno: int = 1) -> Generator[Token, None, None]:
     """
     Tokenize JavaScript/JSX source.  Returns a generator of tokens.

@@ -91,4 +167,37 @@ def tokenize(source: str, jsx: bool=True, dotted: bool=True,
     :param template_string: Support ES6 template strings
     :param lineno: starting line number (optional)
     """
-    pass
+    may_divide = False
+    pos = 0
+    end = len(source)
+    rules = get_rules(jsx=jsx, dotted=dotted, template_string=template_string)
+
+    while pos < end:
+        # handle regular rules first
+        for token_type, rule in rules:  # noqa: B007
+            match = rule.match(source, pos)
+            if match is not None:
+                break
+        # if we don't have a match we don't give up yet, but check for
+        # division operators or regular expression literals, based on
+        # the status of `may_divide` which is determined by the last
+        # processed non-whitespace token using `indicates_division`.
+        else:
+            if may_divide:
+                match = division_re.match(source, pos)
+                token_type = 'operator'
+            else:
+                match = regex_re.match(source, pos)
+                token_type = 'regexp'
+            if match is None:
+                # woops. invalid syntax. jump one char ahead and try again.
+                pos += 1
+                continue
+
+        token_value = match.group()
+        if token_type is not None:
+            token = Token(token_type, token_value, lineno)
+            may_divide = indicates_division(token)
+            yield token
+        lineno += len(line_re.findall(token_value))
+        pos = match.end()
diff --git a/babel/messages/mofile.py b/babel/messages/mofile.py
index 2924f4e..ca02e68 100644
--- a/babel/messages/mofile.py
+++ b/babel/messages/mofile.py
@@ -8,17 +8,21 @@
     :license: BSD, see LICENSE for more details.
 """
 from __future__ import annotations
+
 import array
 import struct
 from typing import TYPE_CHECKING
+
 from babel.messages.catalog import Catalog, Message
+
 if TYPE_CHECKING:
     from _typeshed import SupportsRead, SupportsWrite
-LE_MAGIC: int = 2500072158
-BE_MAGIC: int = 3725722773

+LE_MAGIC: int = 0x950412de
+BE_MAGIC: int = 0xde120495

-def read_mo(fileobj: SupportsRead[bytes]) ->Catalog:
+
+def read_mo(fileobj: SupportsRead[bytes]) -> Catalog:
     """Read a binary MO file from the given file-like object and return a
     corresponding `Catalog` object.

@@ -28,11 +32,81 @@ def read_mo(fileobj: SupportsRead[bytes]) ->Catalog:
            ``GNUTranslations._parse`` method of the ``gettext`` module in the
            standard library.
     """
-    pass
+    catalog = Catalog()
+    headers = {}
+
+    filename = getattr(fileobj, 'name', '')
+
+    buf = fileobj.read()
+    buflen = len(buf)
+    unpack = struct.unpack
+
+    # Parse the .mo file header, which consists of 5 little endian 32
+    # bit words.
+    magic = unpack('<I', buf[:4])[0]  # Are we big endian or little endian?
+    if magic == LE_MAGIC:
+        version, msgcount, origidx, transidx = unpack('<4I', buf[4:20])
+        ii = '<II'
+    elif magic == BE_MAGIC:
+        version, msgcount, origidx, transidx = unpack('>4I', buf[4:20])
+        ii = '>II'
+    else:
+        raise OSError(0, 'Bad magic number', filename)
+
+    # Now put all messages from the .mo file buffer into the catalog
+    # dictionary
+    for _i in range(msgcount):
+        mlen, moff = unpack(ii, buf[origidx:origidx + 8])
+        mend = moff + mlen
+        tlen, toff = unpack(ii, buf[transidx:transidx + 8])
+        tend = toff + tlen
+        if mend < buflen and tend < buflen:
+            msg = buf[moff:mend]
+            tmsg = buf[toff:tend]
+        else:
+            raise OSError(0, 'File is corrupt', filename)
+
+        # See if we're looking at GNU .mo conventions for metadata
+        if mlen == 0:
+            # Catalog description
+            lastkey = key = None
+            for item in tmsg.splitlines():
+                item = item.strip()
+                if not item:
+                    continue
+                if b':' in item:
+                    key, value = item.split(b':', 1)
+                    lastkey = key = key.strip().lower()
+                    headers[key] = value.strip()
+                elif lastkey:
+                    headers[lastkey] += b'\n' + item

+        if b'\x04' in msg:  # context
+            ctxt, msg = msg.split(b'\x04')
+        else:
+            ctxt = None

-def write_mo(fileobj: SupportsWrite[bytes], catalog: Catalog, use_fuzzy:
-    bool=False) ->None:
+        if b'\x00' in msg:  # plural forms
+            msg = msg.split(b'\x00')
+            tmsg = tmsg.split(b'\x00')
+            if catalog.charset:
+                msg = [x.decode(catalog.charset) for x in msg]
+                tmsg = [x.decode(catalog.charset) for x in tmsg]
+        else:
+            if catalog.charset:
+                msg = msg.decode(catalog.charset)
+                tmsg = tmsg.decode(catalog.charset)
+        catalog[msg] = Message(msg, tmsg, context=ctxt)
+
+        # advance to next entry in the seek tables
+        origidx += 8
+        transidx += 8
+
+    catalog.mime_headers = headers.items()
+    return catalog
+
+
+def write_mo(fileobj: SupportsWrite[bytes], catalog: Catalog, use_fuzzy: bool = False) -> None:
     """Write a catalog to the specified file-like object using the GNU MO file
     format.

@@ -80,4 +154,59 @@ def write_mo(fileobj: SupportsWrite[bytes], catalog: Catalog, use_fuzzy:
     :param use_fuzzy: whether translations marked as "fuzzy" should be included
                       in the output
     """
-    pass
+    messages = list(catalog)
+    messages[1:] = [m for m in messages[1:]
+                    if m.string and (use_fuzzy or not m.fuzzy)]
+    messages.sort()
+
+    ids = strs = b''
+    offsets = []
+
+    for message in messages:
+        # For each string, we need size and file offset.  Each string is NUL
+        # terminated; the NUL does not count into the size.
+        if message.pluralizable:
+            msgid = b'\x00'.join([
+                msgid.encode(catalog.charset) for msgid in message.id
+            ])
+            msgstrs = []
+            for idx, string in enumerate(message.string):
+                if not string:
+                    msgstrs.append(message.id[min(int(idx), 1)])
+                else:
+                    msgstrs.append(string)
+            msgstr = b'\x00'.join([
+                msgstr.encode(catalog.charset) for msgstr in msgstrs
+            ])
+        else:
+            msgid = message.id.encode(catalog.charset)
+            msgstr = message.string.encode(catalog.charset)
+        if message.context:
+            msgid = b'\x04'.join([message.context.encode(catalog.charset),
+                                  msgid])
+        offsets.append((len(ids), len(msgid), len(strs), len(msgstr)))
+        ids += msgid + b'\x00'
+        strs += msgstr + b'\x00'
+
+    # The header is 7 32-bit unsigned integers.  We don't use hash tables, so
+    # the keys start right after the index tables.
+    keystart = 7 * 4 + 16 * len(messages)
+    valuestart = keystart + len(ids)
+
+    # The string table first has the list of keys, then the list of values.
+    # Each entry has first the size of the string, then the file offset.
+    koffsets = []
+    voffsets = []
+    for o1, l1, o2, l2 in offsets:
+        koffsets += [l1, o1 + keystart]
+        voffsets += [l2, o2 + valuestart]
+    offsets = koffsets + voffsets
+
+    fileobj.write(struct.pack('Iiiiiii',
+                              LE_MAGIC,                   # magic
+                              0,                          # version
+                              len(messages),              # number of entries
+                              7 * 4,                      # start of key index
+                              7 * 4 + len(messages) * 8,  # start of value index
+                              0, 0,                       # size and offset of hash table
+                              ) + array.array.tobytes(array.array("i", offsets)) + ids + strs)
diff --git a/babel/messages/plurals.py b/babel/messages/plurals.py
index 1097d43..fa3f03e 100644
--- a/babel/messages/plurals.py
+++ b/babel/messages/plurals.py
@@ -8,73 +8,219 @@
     :license: BSD, see LICENSE for more details.
 """
 from __future__ import annotations
+
 from operator import itemgetter
+
 from babel.core import Locale, default_locale
+
+# XXX: remove this file, duplication with babel.plural
+
+
 LC_CTYPE: str | None = default_locale('LC_CTYPE')
-PLURALS: dict[str, tuple[int, str]] = {'af': (2, '(n != 1)'), 'ar': (6,
-    '(n==0 ? 0 : n==1 ? 1 : n==2 ? 2 : n%100>=3 && n%100<=10 ? 3 : n%100>=0 && n%100<=2 ? 4 : 5)'
-    ), 'be': (3,
-    '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'
-    ), 'bg': (2, '(n != 1)'), 'bn': (2, '(n != 1)'), 'bo': (1, '0'), 'br':
-    (6,
-    '(n==1 ? 0 : n%10==1 && n%100!=11 && n%100!=71 && n%100!=91 ? 1 : n%10==2 && n%100!=12 && n%100!=72 && n%100!=92 ? 2 : (n%10==3 || n%10==4 || n%10==9) && n%100!=13 && n%100!=14 && n%100!=19 && n%100!=73 && n%100!=74 && n%100!=79 && n%100!=93 && n%100!=94 && n%100!=99 ? 3 : n%1000000==0 ? 4 : 5)'
-    ), 'bs': (3,
-    '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'
-    ), 'ca': (2, '(n != 1)'), 'cs': (3,
-    '((n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2)'), 'cv': (1, '0'), 'cy': (5,
-    '(n==1 ? 1 : n==2 ? 2 : n==3 ? 3 : n==6 ? 4 : 0)'), 'da': (2,
-    '(n != 1)'), 'de': (2, '(n != 1)'), 'dz': (1, '0'), 'el': (2,
-    '(n != 1)'), 'en': (2, '(n != 1)'), 'eo': (2, '(n != 1)'), 'es': (2,
-    '(n != 1)'), 'et': (2, '(n != 1)'), 'eu': (2, '(n != 1)'), 'fa': (1,
-    '0'), 'fi': (2, '(n != 1)'), 'fr': (2, '(n > 1)'), 'fur': (2, '(n > 1)'
-    ), 'ga': (5,
-    '(n==1 ? 0 : n==2 ? 1 : n>=3 && n<=6 ? 2 : n>=7 && n<=10 ? 3 : 4)'),
-    'gl': (2, '(n != 1)'), 'ha': (2, '(n != 1)'), 'he': (2, '(n != 1)'),
-    'hi': (2, '(n != 1)'), 'hr': (3,
-    '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'
-    ), 'hu': (1, '0'), 'hy': (1, '0'), 'is': (2,
-    '(n%10==1 && n%100!=11 ? 0 : 1)'), 'it': (2, '(n != 1)'), 'ja': (1, '0'
-    ), 'ka': (1, '0'), 'kg': (2, '(n != 1)'), 'km': (1, '0'), 'ko': (1, '0'
-    ), 'ku': (2, '(n != 1)'), 'lo': (1, '0'), 'lt': (3,
-    '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && (n%100<10 || n%100>=20) ? 1 : 2)'
-    ), 'lv': (3, '(n%10==1 && n%100!=11 ? 0 : n != 0 ? 1 : 2)'), 'mt': (4,
-    '(n==1 ? 0 : n==0 || ( n%100>=1 && n%100<=10) ? 1 : (n%100>10 && n%100<20 ) ? 2 : 3)'
-    ), 'nb': (2, '(n != 1)'), 'nl': (2, '(n != 1)'), 'nn': (2, '(n != 1)'),
-    'no': (2, '(n != 1)'), 'pa': (2, '(n != 1)'), 'pl': (3,
-    '(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
-    'pt': (2, '(n != 1)'), 'pt_BR': (2, '(n > 1)'), 'ro': (3,
-    '(n==1 ? 0 : (n==0 || (n%100 > 0 && n%100 < 20)) ? 1 : 2)'), 'ru': (3,
-    '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'
-    ), 'sk': (3, '((n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2)'), 'sl': (4,
-    '(n%100==1 ? 0 : n%100==2 ? 1 : n%100==3 || n%100==4 ? 2 : 3)'), 'sr':
-    (3,
-    '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'
-    ), 'st': (2, '(n != 1)'), 'sv': (2, '(n != 1)'), 'th': (1, '0'), 'tr':
-    (1, '0'), 'uk': (3,
-    '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'
-    ), 've': (2, '(n != 1)'), 'vi': (1, '0'), 'xh': (2, '(n != 1)'), 'zh':
-    (1, '0')}
+
+
+PLURALS: dict[str, tuple[int, str]] = {
+    # Afar
+    # 'aa': (),
+    # Abkhazian
+    # 'ab': (),
+    # Avestan
+    # 'ae': (),
+    # Afrikaans - From Pootle's PO's
+    'af': (2, '(n != 1)'),
+    # Akan
+    # 'ak': (),
+    # Amharic
+    # 'am': (),
+    # Aragonese
+    # 'an': (),
+    # Arabic - From Pootle's PO's
+    'ar': (6, '(n==0 ? 0 : n==1 ? 1 : n==2 ? 2 : n%100>=3 && n%100<=10 ? 3 : n%100>=0 && n%100<=2 ? 4 : 5)'),
+    # Assamese
+    # 'as': (),
+    # Avaric
+    # 'av': (),
+    # Aymara
+    # 'ay': (),
+    # Azerbaijani
+    # 'az': (),
+    # Bashkir
+    # 'ba': (),
+    # Belarusian
+    'be': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
+    # Bulgarian - From Pootle's PO's
+    'bg': (2, '(n != 1)'),
+    # Bihari
+    # 'bh': (),
+    # Bislama
+    # 'bi': (),
+    # Bambara
+    # 'bm': (),
+    # Bengali - From Pootle's PO's
+    'bn': (2, '(n != 1)'),
+    # Tibetan - as discussed in private with Andrew West
+    'bo': (1, '0'),
+    # Breton
+    'br': (
+        6,
+        '(n==1 ? 0 : n%10==1 && n%100!=11 && n%100!=71 && n%100!=91 ? 1 : n%10==2 && n%100!=12 && n%100!=72 && '
+        'n%100!=92 ? 2 : (n%10==3 || n%10==4 || n%10==9) && n%100!=13 && n%100!=14 && n%100!=19 && n%100!=73 && '
+        'n%100!=74 && n%100!=79 && n%100!=93 && n%100!=94 && n%100!=99 ? 3 : n%1000000==0 ? 4 : 5)',
+    ),
+    # Bosnian
+    'bs': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
+    # Catalan - From Pootle's PO's
+    'ca': (2, '(n != 1)'),
+    # Chechen
+    # 'ce': (),
+    # Chamorro
+    # 'ch': (),
+    # Corsican
+    # 'co': (),
+    # Cree
+    # 'cr': (),
+    # Czech
+    'cs': (3, '((n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2)'),
+    # Church Slavic
+    # 'cu': (),
+    # Chuvash
+    'cv': (1, '0'),
+    # Welsh
+    'cy': (5, '(n==1 ? 1 : n==2 ? 2 : n==3 ? 3 : n==6 ? 4 : 0)'),
+    # Danish
+    'da': (2, '(n != 1)'),
+    # German
+    'de': (2, '(n != 1)'),
+    # Divehi
+    # 'dv': (),
+    # Dzongkha
+    'dz': (1, '0'),
+    # Greek
+    'el': (2, '(n != 1)'),
+    # English
+    'en': (2, '(n != 1)'),
+    # Esperanto
+    'eo': (2, '(n != 1)'),
+    # Spanish
+    'es': (2, '(n != 1)'),
+    # Estonian
+    'et': (2, '(n != 1)'),
+    # Basque - From Pootle's PO's
+    'eu': (2, '(n != 1)'),
+    # Persian - From Pootle's PO's
+    'fa': (1, '0'),
+    # Finnish
+    'fi': (2, '(n != 1)'),
+    # French
+    'fr': (2, '(n > 1)'),
+    # Friulian - From Pootle's PO's
+    'fur': (2, '(n > 1)'),
+    # Irish
+    'ga': (5, '(n==1 ? 0 : n==2 ? 1 : n>=3 && n<=6 ? 2 : n>=7 && n<=10 ? 3 : 4)'),
+    # Galician - From Pootle's PO's
+    'gl': (2, '(n != 1)'),
+    # Hausa - From Pootle's PO's
+    'ha': (2, '(n != 1)'),
+    # Hebrew
+    'he': (2, '(n != 1)'),
+    # Hindi - From Pootle's PO's
+    'hi': (2, '(n != 1)'),
+    # Croatian
+    'hr': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
+    # Hungarian
+    'hu': (1, '0'),
+    # Armenian - From Pootle's PO's
+    'hy': (1, '0'),
+    # Icelandic - From Pootle's PO's
+    'is': (2, '(n%10==1 && n%100!=11 ? 0 : 1)'),
+    # Italian
+    'it': (2, '(n != 1)'),
+    # Japanese
+    'ja': (1, '0'),
+    # Georgian - From Pootle's PO's
+    'ka': (1, '0'),
+    # Kongo - From Pootle's PO's
+    'kg': (2, '(n != 1)'),
+    # Khmer - From Pootle's PO's
+    'km': (1, '0'),
+    # Korean
+    'ko': (1, '0'),
+    # Kurdish - From Pootle's PO's
+    'ku': (2, '(n != 1)'),
+    # Lao - Another member of the Tai language family, like Thai.
+    'lo': (1, '0'),
+    # Lithuanian
+    'lt': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && (n%100<10 || n%100>=20) ? 1 : 2)'),
+    # Latvian
+    'lv': (3, '(n%10==1 && n%100!=11 ? 0 : n != 0 ? 1 : 2)'),
+    # Maltese - From Pootle's PO's
+    'mt': (4, '(n==1 ? 0 : n==0 || ( n%100>=1 && n%100<=10) ? 1 : (n%100>10 && n%100<20 ) ? 2 : 3)'),
+    # Norwegian Bokmål
+    'nb': (2, '(n != 1)'),
+    # Dutch
+    'nl': (2, '(n != 1)'),
+    # Norwegian Nynorsk
+    'nn': (2, '(n != 1)'),
+    # Norwegian
+    'no': (2, '(n != 1)'),
+    # Punjabi - From Pootle's PO's
+    'pa': (2, '(n != 1)'),
+    # Polish
+    'pl': (3, '(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
+    # Portuguese
+    'pt': (2, '(n != 1)'),
+    # Brazilian
+    'pt_BR': (2, '(n > 1)'),
+    # Romanian - From Pootle's PO's
+    'ro': (3, '(n==1 ? 0 : (n==0 || (n%100 > 0 && n%100 < 20)) ? 1 : 2)'),
+    # Russian
+    'ru': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
+    # Slovak
+    'sk': (3, '((n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2)'),
+    # Slovenian
+    'sl': (4, '(n%100==1 ? 0 : n%100==2 ? 1 : n%100==3 || n%100==4 ? 2 : 3)'),
+    # Serbian - From Pootle's PO's
+    'sr': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
+    # Southern Sotho - From Pootle's PO's
+    'st': (2, '(n != 1)'),
+    # Swedish
+    'sv': (2, '(n != 1)'),
+    # Thai
+    'th': (1, '0'),
+    # Turkish
+    'tr': (1, '0'),
+    # Ukrainian
+    'uk': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
+    # Venda - From Pootle's PO's
+    've': (2, '(n != 1)'),
+    # Vietnamese - From Pootle's PO's
+    'vi': (1, '0'),
+    # Xhosa - From Pootle's PO's
+    'xh': (2, '(n != 1)'),
+    # Chinese - From Pootle's PO's (modified)
+    'zh': (1, '0'),
+}
+
+
 DEFAULT_PLURAL: tuple[int, str] = (2, '(n != 1)')


 class _PluralTuple(tuple):
     """A tuple with plural information."""
+
     __slots__ = ()
-    num_plurals = property(itemgetter(0), doc=
-        """
+    num_plurals = property(itemgetter(0), doc="""
     The number of plurals used by the locale.""")
-    plural_expr = property(itemgetter(1), doc=
-        """
+    plural_expr = property(itemgetter(1), doc="""
     The plural expression used by the locale.""")
-    plural_forms = property(lambda x: 'nplurals={}; plural={};'.format(*x),
-        doc="""
+    plural_forms = property(lambda x: 'nplurals={}; plural={};'.format(*x), doc="""
     The plural expression used by the catalog or locale.""")

-    def __str__(self) ->str:
+    def __str__(self) -> str:
         return self.plural_forms


-def get_plural(locale: (str | None)=LC_CTYPE) ->_PluralTuple:
+def get_plural(locale: str | None = LC_CTYPE) -> _PluralTuple:
     """A tuple with the information catalogs need to perform proper
     pluralization.  The first item of the tuple is the number of plural
     forms, the second the plural expression.
@@ -100,4 +246,12 @@ def get_plural(locale: (str | None)=LC_CTYPE) ->_PluralTuple:
     >>> str(tup)
     'nplurals=1; plural=0;'
     """
-    pass
+    locale = Locale.parse(locale)
+    try:
+        tup = PLURALS[str(locale)]
+    except KeyError:
+        try:
+            tup = PLURALS[locale.language]
+        except KeyError:
+            tup = DEFAULT_PLURAL
+    return _PluralTuple(tup)
diff --git a/babel/messages/pofile.py b/babel/messages/pofile.py
index bbc0cb7..b64a508 100644
--- a/babel/messages/pofile.py
+++ b/babel/messages/pofile.py
@@ -9,61 +9,81 @@
     :license: BSD, see LICENSE for more details.
 """
 from __future__ import annotations
+
 import os
 import re
 from collections.abc import Iterable
 from typing import TYPE_CHECKING
+
 from babel.core import Locale
 from babel.messages.catalog import Catalog, Message
 from babel.util import _cmp, wraptext
+
 if TYPE_CHECKING:
     from typing import IO, AnyStr
+
     from _typeshed import SupportsWrite
     from typing_extensions import Literal


-def unescape(string: str) ->str:
-    """Reverse `escape` the given string.
+def unescape(string: str) -> str:
+    r"""Reverse `escape` the given string.

-    >>> print(unescape('"Say:\\\\n  \\\\"hello, world!\\\\"\\\\n"'))
+    >>> print(unescape('"Say:\\n  \\"hello, world!\\"\\n"'))
     Say:
       "hello, world!"
     <BLANKLINE>

     :param string: the string to unescape
     """
-    pass
-
-
-def denormalize(string: str) ->str:
-    """Reverse the normalization done by the `normalize` function.
+    def replace_escapes(match):
+        m = match.group(1)
+        if m == 'n':
+            return '\n'
+        elif m == 't':
+            return '\t'
+        elif m == 'r':
+            return '\r'
+        # m is \ or "
+        return m
+    return re.compile(r'\\([\\trn"])').sub(replace_escapes, string[1:-1])
+
+
+def denormalize(string: str) -> str:
+    r"""Reverse the normalization done by the `normalize` function.

     >>> print(denormalize(r'''""
-    ... "Say:\\n"
-    ... "  \\"hello, world!\\"\\n"'''))
+    ... "Say:\n"
+    ... "  \"hello, world!\"\n"'''))
     Say:
       "hello, world!"
     <BLANKLINE>

     >>> print(denormalize(r'''""
-    ... "Say:\\n"
-    ... "  \\"Lorem ipsum dolor sit "
+    ... "Say:\n"
+    ... "  \"Lorem ipsum dolor sit "
     ... "amet, consectetur adipisicing"
-    ... " elit, \\"\\n"'''))
+    ... " elit, \"\n"'''))
     Say:
       "Lorem ipsum dolor sit amet, consectetur adipisicing elit, "
     <BLANKLINE>

     :param string: the string to denormalize
     """
-    pass
+    if '\n' in string:
+        escaped_lines = string.splitlines()
+        if string.startswith('""'):
+            escaped_lines = escaped_lines[1:]
+        lines = map(unescape, escaped_lines)
+        return ''.join(lines)
+    else:
+        return unescape(string)


 class PoFileError(Exception):
     """Exception thrown by PoParser when an invalid po file is encountered."""

-    def __init__(self, message: str, catalog: Catalog, line: str, lineno: int
-        ) ->None:
+    def __init__(self, message: str, catalog: Catalog, line: str, lineno: int) -> None:
         super().__init__(f'{message} on {lineno}')
         self.catalog = catalog
         self.line = line
@@ -72,38 +92,45 @@ class PoFileError(Exception):

 class _NormalizedString:

-    def __init__(self, *args: str) ->None:
+    def __init__(self, *args: str) -> None:
         self._strs: list[str] = []
         for arg in args:
             self.append(arg)

-    def __bool__(self) ->bool:
+    def append(self, s: str) -> None:
+        self._strs.append(s.strip())
+
+    def denormalize(self) -> str:
+        return ''.join(map(unescape, self._strs))
+
+    def __bool__(self) -> bool:
         return bool(self._strs)

-    def __repr__(self) ->str:
+    def __repr__(self) -> str:
         return os.linesep.join(self._strs)

-    def __cmp__(self, other: object) ->int:
+    def __cmp__(self, other: object) -> int:
         if not other:
             return 1
+
         return _cmp(str(self), str(other))

-    def __gt__(self, other: object) ->bool:
+    def __gt__(self, other: object) -> bool:
         return self.__cmp__(other) > 0

-    def __lt__(self, other: object) ->bool:
+    def __lt__(self, other: object) -> bool:
         return self.__cmp__(other) < 0

-    def __ge__(self, other: object) ->bool:
+    def __ge__(self, other: object) -> bool:
         return self.__cmp__(other) >= 0

-    def __le__(self, other: object) ->bool:
+    def __le__(self, other: object) -> bool:
         return self.__cmp__(other) <= 0

-    def __eq__(self, other: object) ->bool:
+    def __eq__(self, other: object) -> bool:
         return self.__cmp__(other) == 0

-    def __ne__(self, other: object) ->bool:
+    def __ne__(self, other: object) -> bool:
         return self.__cmp__(other) != 0


@@ -113,10 +140,15 @@ class PoFileParser:

     See `read_po` for simple cases.
     """
-    _keywords = ['msgid', 'msgstr', 'msgctxt', 'msgid_plural']

-    def __init__(self, catalog: Catalog, ignore_obsolete: bool=False,
-        abort_invalid: bool=False) ->None:
+    _keywords = [
+        'msgid',
+        'msgstr',
+        'msgctxt',
+        'msgid_plural',
+    ]
+
+    def __init__(self, catalog: Catalog, ignore_obsolete: bool = False, abort_invalid: bool = False) -> None:
         self.catalog = catalog
         self.ignore_obsolete = ignore_obsolete
         self.counter = 0
@@ -124,24 +156,186 @@ class PoFileParser:
         self.abort_invalid = abort_invalid
         self._reset_message_state()

-    def _add_message(self) ->None:
+    def _reset_message_state(self) -> None:
+        self.messages = []
+        self.translations = []
+        self.locations = []
+        self.flags = []
+        self.user_comments = []
+        self.auto_comments = []
+        self.context = None
+        self.obsolete = False
+        self.in_msgid = False
+        self.in_msgstr = False
+        self.in_msgctxt = False
+
+    def _add_message(self) -> None:
         """
         Add a message to the catalog based on the current parser state and
         clear the state ready to process the next message.
         """
-        pass
+        self.translations.sort()
+        if len(self.messages) > 1:
+            msgid = tuple(m.denormalize() for m in self.messages)
+        else:
+            msgid = self.messages[0].denormalize()
+        if isinstance(msgid, (list, tuple)):
+            string = ['' for _ in range(self.catalog.num_plurals)]
+            for idx, translation in self.translations:
+                if idx >= self.catalog.num_plurals:
+                    self._invalid_pofile("", self.offset, "msg has more translations than num_plurals of catalog")
+                    continue
+                string[idx] = translation.denormalize()
+            string = tuple(string)
+        else:
+            string = self.translations[0][1].denormalize()
+        msgctxt = self.context.denormalize() if self.context else None
+        message = Message(msgid, string, list(self.locations), set(self.flags),
+                          self.auto_comments, self.user_comments, lineno=self.offset + 1,
+                          context=msgctxt)
+        if self.obsolete:
+            if not self.ignore_obsolete:
+                self.catalog.obsolete[msgid] = message
+        else:
+            self.catalog[msgid] = message
+        self.counter += 1
+        self._reset_message_state()

-    def parse(self, fileobj: IO[AnyStr]) ->None:
+    def _finish_current_message(self) -> None:
+        if self.messages:
+            self._add_message()
+
+    def _process_message_line(self, lineno, line, obsolete=False) -> None:
+        if line.startswith('"'):
+            self._process_string_continuation_line(line, lineno)
+        else:
+            self._process_keyword_line(lineno, line, obsolete)
+
+    def _process_keyword_line(self, lineno, line, obsolete=False) -> None:
+
+        for keyword in self._keywords:
+            try:
+                if line.startswith(keyword) and line[len(keyword)] in [' ', '[']:
+                    arg = line[len(keyword):]
+                    break
+            except IndexError:
+                self._invalid_pofile(line, lineno, "Keyword must be followed by a string")
+        else:
+            self._invalid_pofile(line, lineno, "Start of line didn't match any expected keyword.")
+            return
+
+        if keyword in ['msgid', 'msgctxt']:
+            self._finish_current_message()
+
+        self.obsolete = obsolete
+
+        # The line that has the msgid is stored as the offset of the msg
+        # should this be the msgctxt if it has one?
+        if keyword == 'msgid':
+            self.offset = lineno
+
+        if keyword in ['msgid', 'msgid_plural']:
+            self.in_msgctxt = False
+            self.in_msgid = True
+            self.messages.append(_NormalizedString(arg))
+
+        elif keyword == 'msgstr':
+            self.in_msgid = False
+            self.in_msgstr = True
+            if arg.startswith('['):
+                idx, msg = arg[1:].split(']', 1)
+                self.translations.append([int(idx), _NormalizedString(msg)])
+            else:
+                self.translations.append([0, _NormalizedString(arg)])
+
+        elif keyword == 'msgctxt':
+            self.in_msgctxt = True
+            self.context = _NormalizedString(arg)
+
+    def _process_string_continuation_line(self, line, lineno) -> None:
+        if self.in_msgid:
+            s = self.messages[-1]
+        elif self.in_msgstr:
+            s = self.translations[-1][1]
+        elif self.in_msgctxt:
+            s = self.context
+        else:
+            self._invalid_pofile(line, lineno, "Got line starting with \" but not in msgid, msgstr or msgctxt")
+            return
+        s.append(line)
+
+    def _process_comment(self, line) -> None:
+
+        self._finish_current_message()
+
+        if line[1:].startswith(':'):
+            for location in line[2:].lstrip().split():
+                pos = location.rfind(':')
+                if pos >= 0:
+                    try:
+                        lineno = int(location[pos + 1:])
+                    except ValueError:
+                        continue
+                    self.locations.append((location[:pos], lineno))
+                else:
+                    self.locations.append((location, None))
+        elif line[1:].startswith(','):
+            for flag in line[2:].lstrip().split(','):
+                self.flags.append(flag.strip())
+        elif line[1:].startswith('.'):
+            # These are called auto-comments
+            comment = line[2:].strip()
+            if comment:  # Just check that we're not adding empty comments
+                self.auto_comments.append(comment)
+        else:
+            # These are called user comments
+            self.user_comments.append(line[1:].strip())
+
+    def parse(self, fileobj: IO[AnyStr]) -> None:
         """
         Reads from the file-like object `fileobj` and adds any po file
         units found in it to the `Catalog` supplied to the constructor.
         """
-        pass
-

-def read_po(fileobj: IO[AnyStr], locale: (str | Locale | None)=None, domain:
-    (str | None)=None, ignore_obsolete: bool=False, charset: (str | None)=
-    None, abort_invalid: bool=False) ->Catalog:
+        for lineno, line in enumerate(fileobj):
+            line = line.strip()
+            if not isinstance(line, str):
+                line = line.decode(self.catalog.charset)
+            if not line:
+                continue
+            if line.startswith('#'):
+                if line[1:].startswith('~'):
+                    self._process_message_line(lineno, line[2:].lstrip(), obsolete=True)
+                else:
+                    self._process_comment(line)
+            else:
+                self._process_message_line(lineno, line)
+
+        self._finish_current_message()
+
+        # No actual messages found, but there was some info in comments, from which
+        # we'll construct an empty header message
+        if not self.counter and (self.flags or self.user_comments or self.auto_comments):
+            self.messages.append(_NormalizedString('""'))
+            self.translations.append([0, _NormalizedString('""')])
+            self._add_message()
+
+    def _invalid_pofile(self, line, lineno, msg) -> None:
+        assert isinstance(line, str)
+        if self.abort_invalid:
+            raise PoFileError(msg, self.catalog, line, lineno)
+        print("WARNING:", msg)
+        print(f"WARNING: Problem on line {lineno + 1}: {line!r}")
+
+
+def read_po(
+    fileobj: IO[AnyStr],
+    locale: str | Locale | None = None,
+    domain: str | None = None,
+    ignore_obsolete: bool = False,
+    charset: str | None = None,
+    abort_invalid: bool = False,
+) -> Catalog:
     """Read messages from a ``gettext`` PO (portable object) file from the given
     file-like object and return a `Catalog`.

@@ -188,60 +382,111 @@ def read_po(fileobj: IO[AnyStr], locale: (str | Locale | None)=None, domain:
     :param charset: the character set of the catalog.
     :param abort_invalid: abort read if po file is invalid
     """
-    pass
+    catalog = Catalog(locale=locale, domain=domain, charset=charset)
+    parser = PoFileParser(catalog, ignore_obsolete, abort_invalid=abort_invalid)
+    parser.parse(fileobj)
+    return catalog


-WORD_SEP = re.compile(
-    '(\\s+|[^\\s\\w]*\\w+[a-zA-Z]-(?=\\w+[a-zA-Z])|(?<=[\\w\\!\\"\\\'\\&\\.\\,\\?])-{2,}(?=\\w))'
-    )
+WORD_SEP = re.compile('('
+                      r'\s+|'                                 # any whitespace
+                      r'[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|'  # hyphenated words
+                      r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w)'   # em-dash
+                      ')')


-def escape(string: str) ->str:
-    """Escape the given string so that it can be included in double-quoted
+def escape(string: str) -> str:
+    r"""Escape the given string so that it can be included in double-quoted
     strings in ``PO`` files.

     >>> escape('''Say:
     ...   "hello, world!"
     ... ''')
-    '"Say:\\\\n  \\\\"hello, world!\\\\"\\\\n"'
+    '"Say:\\n  \\"hello, world!\\"\\n"'

     :param string: the string to escape
     """
-    pass
+    return '"%s"' % string.replace('\\', '\\\\') \
+                          .replace('\t', '\\t') \
+                          .replace('\r', '\\r') \
+                          .replace('\n', '\\n') \
+                          .replace('\"', '\\"')


-def normalize(string: str, prefix: str='', width: int=76) ->str:
-    """Convert a string into a format that is appropriate for .po files.
+def normalize(string: str, prefix: str = '', width: int = 76) -> str:
+    r"""Convert a string into a format that is appropriate for .po files.

     >>> print(normalize('''Say:
     ...   "hello, world!"
     ... ''', width=None))
     ""
-    "Say:\\n"
-    "  \\"hello, world!\\"\\n"
+    "Say:\n"
+    "  \"hello, world!\"\n"

     >>> print(normalize('''Say:
     ...   "Lorem ipsum dolor sit amet, consectetur adipisicing elit, "
     ... ''', width=32))
     ""
-    "Say:\\n"
-    "  \\"Lorem ipsum dolor sit "
+    "Say:\n"
+    "  \"Lorem ipsum dolor sit "
     "amet, consectetur adipisicing"
-    " elit, \\"\\n"
+    " elit, \"\n"

     :param string: the string to normalize
     :param prefix: a string that should be prepended to every line
     :param width: the maximum line width; use `None`, 0, or a negative number
                   to completely disable line wrapping
     """
-    pass
-
-
-def write_po(fileobj: SupportsWrite[bytes], catalog: Catalog, width: int=76,
-    no_location: bool=False, omit_header: bool=False, sort_output: bool=
-    False, sort_by_file: bool=False, ignore_obsolete: bool=False,
-    include_previous: bool=False, include_lineno: bool=True) ->None:
-    """Write a ``gettext`` PO (portable object) template file for a given
+    if width and width > 0:
+        prefixlen = len(prefix)
+        lines = []
+        for line in string.splitlines(True):
+            if len(escape(line)) + prefixlen > width:
+                chunks = WORD_SEP.split(line)
+                chunks.reverse()
+                while chunks:
+                    buf = []
+                    size = 2
+                    while chunks:
+                        length = len(escape(chunks[-1])) - 2 + prefixlen
+                        if size + length < width:
+                            buf.append(chunks.pop())
+                            size += length
+                        else:
+                            if not buf:
+                                # handle long chunks by putting them on a
+                                # separate line
+                                buf.append(chunks.pop())
+                            break
+                    lines.append(''.join(buf))
+            else:
+                lines.append(line)
+    else:
+        lines = string.splitlines(True)
+
+    if len(lines) <= 1:
+        return escape(string)
+
+    # Remove empty trailing line
+    if lines and not lines[-1]:
+        del lines[-1]
+        lines[-1] += '\n'
+    return '""\n' + '\n'.join([(prefix + escape(line)) for line in lines])
+
+
+def write_po(
+    fileobj: SupportsWrite[bytes],
+    catalog: Catalog,
+    width: int = 76,
+    no_location: bool = False,
+    omit_header: bool = False,
+    sort_output: bool = False,
+    sort_by_file: bool = False,
+    ignore_obsolete: bool = False,
+    include_previous: bool = False,
+    include_lineno: bool = True,
+) -> None:
+    r"""Write a ``gettext`` PO (portable object) template file for a given
     message catalog to the provided file-like object.

     >>> catalog = Catalog()
@@ -284,11 +529,112 @@ def write_po(fileobj: SupportsWrite[bytes], catalog: Catalog, width: int=76,
                              updating the catalog
     :param include_lineno: include line number in the location comment
     """
-    pass
-
-
-def _sort_messages(messages: Iterable[Message], sort_by: Literal['message',
-    'location']) ->list[Message]:
+    def _normalize(key, prefix=''):
+        return normalize(key, prefix=prefix, width=width)
+
+    def _write(text):
+        if isinstance(text, str):
+            text = text.encode(catalog.charset, 'backslashreplace')
+        fileobj.write(text)
+
+    def _write_comment(comment, prefix=''):
+        # xgettext always wraps comments even if --no-wrap is passed;
+        # provide the same behaviour
+        _width = width if width and width > 0 else 76
+        for line in wraptext(comment, _width):
+            _write(f"#{prefix} {line.strip()}\n")
+
+    def _write_message(message, prefix=''):
+        if isinstance(message.id, (list, tuple)):
+            if message.context:
+                _write(f"{prefix}msgctxt {_normalize(message.context, prefix)}\n")
+            _write(f"{prefix}msgid {_normalize(message.id[0], prefix)}\n")
+            _write(f"{prefix}msgid_plural {_normalize(message.id[1], prefix)}\n")
+
+            for idx in range(catalog.num_plurals):
+                try:
+                    string = message.string[idx]
+                except IndexError:
+                    string = ''
+                _write(f"{prefix}msgstr[{idx:d}] {_normalize(string, prefix)}\n")
+        else:
+            if message.context:
+                _write(f"{prefix}msgctxt {_normalize(message.context, prefix)}\n")
+            _write(f"{prefix}msgid {_normalize(message.id, prefix)}\n")
+            _write(f"{prefix}msgstr {_normalize(message.string or '', prefix)}\n")
+
+    sort_by = None
+    if sort_output:
+        sort_by = "message"
+    elif sort_by_file:
+        sort_by = "location"
+
+    for message in _sort_messages(catalog, sort_by=sort_by):
+        if not message.id:  # This is the header "message"
+            if omit_header:
+                continue
+            comment_header = catalog.header_comment
+            if width and width > 0:
+                lines = []
+                for line in comment_header.splitlines():
+                    lines += wraptext(line, width=width,
+                                      subsequent_indent='# ')
+                comment_header = '\n'.join(lines)
+            _write(f"{comment_header}\n")
+
+        for comment in message.user_comments:
+            _write_comment(comment)
+        for comment in message.auto_comments:
+            _write_comment(comment, prefix='.')
+
+        if not no_location:
+            locs = []
+
+            # sort locations by filename and lineno.
+            # if there's no <int> as lineno, use `-1`.
+            # if no sorting possible, leave unsorted.
+            # (see issue #606)
+            try:
+                locations = sorted(message.locations,
+                                   key=lambda x: (x[0], isinstance(x[1], int) and x[1] or -1))
+            except TypeError:  # e.g. "TypeError: unorderable types: NoneType() < int()"
+                locations = message.locations
+
+            for filename, lineno in locations:
+                location = filename.replace(os.sep, '/')
+                if lineno and include_lineno:
+                    location = f"{location}:{lineno:d}"
+                if location not in locs:
+                    locs.append(location)
+            _write_comment(' '.join(locs), prefix=':')
+        if message.flags:
+            _write(f"#{', '.join(['', *sorted(message.flags)])}\n")
+
+        if message.previous_id and include_previous:
+            _write_comment(
+                f'msgid {_normalize(message.previous_id[0])}',
+                prefix='|',
+            )
+            if len(message.previous_id) > 1:
+                _write_comment('msgid_plural %s' % _normalize(
+                    message.previous_id[1],
+                ), prefix='|')
+
+        _write_message(message)
+        _write('\n')
+
+    if not ignore_obsolete:
+        for message in _sort_messages(
+            catalog.obsolete.values(),
+            sort_by=sort_by,
+        ):
+            for comment in message.user_comments:
+                _write_comment(comment)
+            _write_message(message, prefix='#~ ')
+            _write('\n')
+
+
+def _sort_messages(messages: Iterable[Message], sort_by: Literal["message", "location"]) -> list[Message]:
     """
     Sort the given message iterable by the given criteria.

@@ -298,4 +644,9 @@ def _sort_messages(messages: Iterable[Message], sort_by: Literal['message',
     :param sort_by: Sort by which criteria? Options are `message` and `location`.
     :return: list[Message]
     """
-    pass
+    messages = list(messages)
+    if sort_by == "message":
+        messages.sort()
+    elif sort_by == "location":
+        messages.sort(key=lambda m: m.locations)
+    return messages
diff --git a/babel/messages/setuptools_frontend.py b/babel/messages/setuptools_frontend.py
index 626c6dd..67b9f24 100644
--- a/babel/messages/setuptools_frontend.py
+++ b/babel/messages/setuptools_frontend.py
@@ -1,11 +1,16 @@
 from __future__ import annotations
+
 from babel.messages import frontend
+
 try:
+    # See: https://setuptools.pypa.io/en/latest/deprecated/distutils-legacy.html
     from setuptools import Command
+
     try:
         from setuptools.errors import BaseError, OptionError, SetupError
-    except ImportError:
+    except ImportError:  # Error aliases only added in setuptools 59 (2021-11).
         OptionError = SetupError = BaseError = Exception
+
 except ImportError:
     from distutils.cmd import Command
     from distutils.errors import DistutilsSetupError as SetupError
@@ -20,7 +25,11 @@ def check_message_extractors(dist, name, value):
     :param value: the value of the keyword argument
     :raise `DistutilsSetupError`: if the value is not valid
     """
-    pass
+    assert name == "message_extractors"
+    if not isinstance(value, dict):
+        raise SetupError(
+            'the value of the "message_extractors" parameter must be a dictionary',
+        )


 class compile_catalog(frontend.CompileCatalog, Command):
@@ -91,6 +100,9 @@ class update_catalog(frontend.UpdateCatalog, Command):
     """


-COMMANDS = {'compile_catalog': compile_catalog, 'extract_messages':
-    extract_messages, 'init_catalog': init_catalog, 'update_catalog':
-    update_catalog}
+COMMANDS = {
+    "compile_catalog": compile_catalog,
+    "extract_messages": extract_messages,
+    "init_catalog": init_catalog,
+    "update_catalog": update_catalog,
+}
diff --git a/babel/numbers.py b/babel/numbers.py
index d54ba55..2240c65 100644
--- a/babel/numbers.py
+++ b/babel/numbers.py
@@ -14,16 +14,23 @@
     :copyright: (c) 2013-2023 by the Babel Team.
     :license: BSD, see LICENSE for more details.
 """
+# TODO:
+#  Padding and rounding increments in pattern:
+#  - https://www.unicode.org/reports/tr35/ (Appendix G.6)
 from __future__ import annotations
+
 import datetime
 import decimal
 import re
 import warnings
 from typing import TYPE_CHECKING, Any, cast, overload
+
 from babel.core import Locale, default_locale, get_global
 from babel.localedata import LocaleDataDict
+
 if TYPE_CHECKING:
     from typing_extensions import Literal
+
 LC_NUMERIC = default_locale('LC_NUMERIC')


@@ -31,15 +38,17 @@ class UnknownCurrencyError(Exception):
     """Exception thrown when a currency is requested for which no data is available.
     """

-    def __init__(self, identifier: str) ->None:
+    def __init__(self, identifier: str) -> None:
         """Create the exception.
         :param identifier: the identifier string of the unsupported currency
         """
-        Exception.__init__(self, f'Unknown currency {identifier!r}.')
+        Exception.__init__(self, f"Unknown currency {identifier!r}.")
+
+        #: The identifier of the locale that could not be found.
         self.identifier = identifier


-def list_currencies(locale: (Locale | str | None)=None) ->set[str]:
+def list_currencies(locale: Locale | str | None = None) -> set[str]:
     """ Return a `set` of normalized currency codes.

     .. versionadded:: 2.5.0
@@ -49,11 +58,13 @@ def list_currencies(locale: (Locale | str | None)=None) ->set[str]:
                    provided, returns the list of all currencies from all
                    locales.
     """
-    pass
+    # Get locale-scoped currencies.
+    if locale:
+        return set(Locale.parse(locale).currencies)
+    return set(get_global('all_currencies'))


-def validate_currency(currency: str, locale: (Locale | str | None)=None
-    ) ->None:
+def validate_currency(currency: str, locale: Locale | str | None = None) -> None:
     """ Check the currency code is recognized by Babel.

     Accepts a ``locale`` parameter for fined-grained validation, working as
@@ -61,19 +72,25 @@ def validate_currency(currency: str, locale: (Locale | str | None)=None

     Raises a `UnknownCurrencyError` exception if the currency is unknown to Babel.
     """
-    pass
+    if currency not in list_currencies(locale):
+        raise UnknownCurrencyError(currency)


-def is_currency(currency: str, locale: (Locale | str | None)=None) ->bool:
+def is_currency(currency: str, locale: Locale | str | None = None) -> bool:
     """ Returns `True` only if a currency is recognized by Babel.

     This method always return a Boolean and never raise.
     """
-    pass
+    if not currency or not isinstance(currency, str):
+        return False
+    try:
+        validate_currency(currency, locale)
+    except UnknownCurrencyError:
+        return False
+    return True


-def normalize_currency(currency: str, locale: (Locale | str | None)=None) ->(
-    str | None):
+def normalize_currency(currency: str, locale: Locale | str | None = None) -> str | None:
     """Returns the normalized identifier of any currency code.

     Accepts a ``locale`` parameter for fined-grained validation, working as
@@ -81,11 +98,18 @@ def normalize_currency(currency: str, locale: (Locale | str | None)=None) ->(

     Returns None if the currency is unknown to Babel.
     """
-    pass
-
-
-def get_currency_name(currency: str, count: (float | decimal.Decimal | None
-    )=None, locale: (Locale | str | None)=LC_NUMERIC) ->str:
+    if isinstance(currency, str):
+        currency = currency.upper()
+    if not is_currency(currency, locale):
+        return None
+    return currency
+
+
+def get_currency_name(
+    currency: str,
+    count: float | decimal.Decimal | None = None,
+    locale: Locale | str | None = LC_NUMERIC,
+) -> str:
     """Return the name used by the locale for the specified currency.

     >>> get_currency_name('USD', locale='en_US')
@@ -98,11 +122,23 @@ def get_currency_name(currency: str, count: (float | decimal.Decimal | None
                   will be pluralized to that number if possible.
     :param locale: the `Locale` object or locale identifier.
     """
-    pass
-
-
-def get_currency_symbol(currency: str, locale: (Locale | str | None)=LC_NUMERIC
-    ) ->str:
+    loc = Locale.parse(locale)
+    if count is not None:
+        try:
+            plural_form = loc.plural_form(count)
+        except (OverflowError, ValueError):
+            plural_form = 'other'
+        plural_names = loc._data['currency_names_plural']
+        if currency in plural_names:
+            currency_plural_names = plural_names[currency]
+            if plural_form in currency_plural_names:
+                return currency_plural_names[plural_form]
+            if 'other' in currency_plural_names:
+                return currency_plural_names['other']
+    return loc.currencies.get(currency, currency)
+
+
+def get_currency_symbol(currency: str, locale: Locale | str | None = LC_NUMERIC) -> str:
     """Return the symbol used by the locale for the specified currency.

     >>> get_currency_symbol('USD', locale='en_US')
@@ -111,10 +147,10 @@ def get_currency_symbol(currency: str, locale: (Locale | str | None)=LC_NUMERIC
     :param currency: the currency code.
     :param locale: the `Locale` object or locale identifier.
     """
-    pass
+    return Locale.parse(locale).currency_symbols.get(currency, currency)


-def get_currency_precision(currency: str) ->int:
+def get_currency_precision(currency: str) -> int:
     """Return currency's precision.

     Precision is the number of decimals found after the decimal point in the
@@ -124,11 +160,15 @@ def get_currency_precision(currency: str) ->int:

     :param currency: the currency code.
     """
-    pass
+    precisions = get_global('currency_fractions')
+    return precisions.get(currency, precisions['DEFAULT'])[0]


-def get_currency_unit_pattern(currency: str, count: (float | decimal.
-    Decimal | None)=None, locale: (Locale | str | None)=LC_NUMERIC) ->str:
+def get_currency_unit_pattern(
+    currency: str,
+    count: float | decimal.Decimal | None = None,
+    locale: Locale | str | None = LC_NUMERIC,
+) -> str:
     """
     Return the unit pattern used for long display of a currency value
     for a given locale.
@@ -146,13 +186,50 @@ def get_currency_unit_pattern(currency: str, count: (float | decimal.
                   pattern for that number will be returned.
     :param locale: the `Locale` object or locale identifier.
     """
-    pass
-
-
-def get_territory_currencies(territory: str, start_date: (datetime.date |
-    None)=None, end_date: (datetime.date | None)=None, tender: bool=True,
-    non_tender: bool=False, include_details: bool=False) ->(list[str] |
-    list[dict[str, Any]]):
+    loc = Locale.parse(locale)
+    if count is not None:
+        plural_form = loc.plural_form(count)
+        try:
+            return loc._data['currency_unit_patterns'][plural_form]
+        except LookupError:
+            # Fall back to 'other'
+            pass
+
+    return loc._data['currency_unit_patterns']['other']
+
+
+@overload
+def get_territory_currencies(
+    territory: str,
+    start_date: datetime.date | None = ...,
+    end_date: datetime.date | None = ...,
+    tender: bool = ...,
+    non_tender: bool = ...,
+    include_details: Literal[False] = ...,
+) -> list[str]:
+    ...  # pragma: no cover
+
+
+@overload
+def get_territory_currencies(
+    territory: str,
+    start_date: datetime.date | None = ...,
+    end_date: datetime.date | None = ...,
+    tender: bool = ...,
+    non_tender: bool = ...,
+    include_details: Literal[True] = ...,
+) -> list[dict[str, Any]]:
+    ...  # pragma: no cover
+
+
+def get_territory_currencies(
+    territory: str,
+    start_date: datetime.date | None = None,
+    end_date: datetime.date | None = None,
+    tender: bool = True,
+    non_tender: bool = False,
+    include_details: bool = False,
+) -> list[str] | list[dict[str, Any]]:
     """Returns the list of currencies for the given territory that are valid for
     the given date range.  In addition to that the currency database
     distinguishes between tender and non-tender currencies.  By default only
@@ -202,7 +279,62 @@ def get_territory_currencies(territory: str, start_date: (datetime.date |
                             dictionary will have the keys ``'currency'``,
                             ``'from'``, ``'to'``, and ``'tender'``.
     """
-    pass
+    currencies = get_global('territory_currencies')
+    if start_date is None:
+        start_date = datetime.date.today()
+    elif isinstance(start_date, datetime.datetime):
+        start_date = start_date.date()
+    if end_date is None:
+        end_date = start_date
+    elif isinstance(end_date, datetime.datetime):
+        end_date = end_date.date()
+
+    curs = currencies.get(territory.upper(), ())
+    # TODO: validate that the territory exists
+
+    def _is_active(start, end):
+        return (start is None or start <= end_date) and \
+               (end is None or end >= start_date)
+
+    result = []
+    for currency_code, start, end, is_tender in curs:
+        if start:
+            start = datetime.date(*start)
+        if end:
+            end = datetime.date(*end)
+        if ((is_tender and tender) or
+                (not is_tender and non_tender)) and _is_active(start, end):
+            if include_details:
+                result.append({
+                    'currency': currency_code,
+                    'from': start,
+                    'to': end,
+                    'tender': is_tender,
+                })
+            else:
+                result.append(currency_code)
+
+    return result
+
+
+def _get_numbering_system(locale: Locale, numbering_system: Literal["default"] | str = "latn") -> str:
+    if numbering_system == "default":
+        return locale.default_numbering_system
+    else:
+        return numbering_system
+
+
+def _get_number_symbols(
+    locale: Locale | str | None,
+    *,
+    numbering_system: Literal["default"] | str = "latn",
+) -> LocaleDataDict:
+    parsed_locale = Locale.parse(locale)
+    numbering_system = _get_numbering_system(parsed_locale, numbering_system)
+    try:
+        return parsed_locale.number_symbols[numbering_system]
+    except KeyError as error:
+        raise UnsupportedNumberingSystemError(f"Unknown numbering system {numbering_system} for Locale {parsed_locale}.") from error


 class UnsupportedNumberingSystemError(Exception):
@@ -210,8 +342,11 @@ class UnsupportedNumberingSystemError(Exception):
     pass


-def get_decimal_symbol(locale: (Locale | str | None)=LC_NUMERIC, *,
-    numbering_system: (Literal['default'] | str)='latn') ->str:
+def get_decimal_symbol(
+    locale: Locale | str | None = LC_NUMERIC,
+    *,
+    numbering_system: Literal["default"] | str = "latn",
+) -> str:
     """Return the symbol used by the locale to separate decimal fractions.

     >>> get_decimal_symbol('en_US')
@@ -226,49 +361,58 @@ def get_decimal_symbol(locale: (Locale | str | None)=LC_NUMERIC, *,
                              The special value "default" will use the default numbering system of the locale.
     :raise `UnsupportedNumberingSystemError`: If the numbering system is not supported by the locale.
     """
-    pass
+    return _get_number_symbols(locale, numbering_system=numbering_system).get('decimal', '.')


-def get_plus_sign_symbol(locale: (Locale | str | None)=LC_NUMERIC, *,
-    numbering_system: (Literal['default'] | str)='latn') ->str:
+def get_plus_sign_symbol(
+    locale: Locale | str | None = LC_NUMERIC,
+    *,
+    numbering_system: Literal["default"] | str = "latn",
+) -> str:
     """Return the plus sign symbol used by the current locale.

     >>> get_plus_sign_symbol('en_US')
     u'+'
     >>> get_plus_sign_symbol('ar_EG', numbering_system='default')
-    u'؜+'
+    u'\u061c+'
     >>> get_plus_sign_symbol('ar_EG', numbering_system='latn')
-    u'‎+'
+    u'\u200e+'

     :param locale: the `Locale` object or locale identifier
     :param numbering_system: The numbering system used for fetching the symbol. Defaults to "latn".
                              The special value "default" will use the default numbering system of the locale.
     :raise `UnsupportedNumberingSystemError`: if the numbering system is not supported by the locale.
     """
-    pass
+    return _get_number_symbols(locale, numbering_system=numbering_system).get('plusSign', '+')


-def get_minus_sign_symbol(locale: (Locale | str | None)=LC_NUMERIC, *,
-    numbering_system: (Literal['default'] | str)='latn') ->str:
+def get_minus_sign_symbol(
+    locale: Locale | str | None = LC_NUMERIC,
+    *,
+    numbering_system: Literal["default"] | str = "latn",
+) -> str:
     """Return the plus sign symbol used by the current locale.

     >>> get_minus_sign_symbol('en_US')
     u'-'
     >>> get_minus_sign_symbol('ar_EG', numbering_system='default')
-    u'؜-'
+    u'\u061c-'
     >>> get_minus_sign_symbol('ar_EG', numbering_system='latn')
-    u'‎-'
+    u'\u200e-'

     :param locale: the `Locale` object or locale identifier
     :param numbering_system: The numbering system used for fetching the symbol. Defaults to "latn".
                              The special value "default" will use the default numbering system of the locale.
     :raise `UnsupportedNumberingSystemError`: if the numbering system is not supported by the locale.
     """
-    pass
+    return _get_number_symbols(locale, numbering_system=numbering_system).get('minusSign', '-')


-def get_exponential_symbol(locale: (Locale | str | None)=LC_NUMERIC, *,
-    numbering_system: (Literal['default'] | str)='latn') ->str:
+def get_exponential_symbol(
+    locale: Locale | str | None = LC_NUMERIC,
+    *,
+    numbering_system: Literal["default"] | str = "latn",
+) -> str:
     """Return the symbol used by the locale to separate mantissa and exponent.

     >>> get_exponential_symbol('en_US')
@@ -283,11 +427,14 @@ def get_exponential_symbol(locale: (Locale | str | None)=LC_NUMERIC, *,
                              The special value "default" will use the default numbering system of the locale.
     :raise `UnsupportedNumberingSystemError`: if the numbering system is not supported by the locale.
     """
-    pass
+    return _get_number_symbols(locale, numbering_system=numbering_system).get('exponential', 'E')


-def get_group_symbol(locale: (Locale | str | None)=LC_NUMERIC, *,
-    numbering_system: (Literal['default'] | str)='latn') ->str:
+def get_group_symbol(
+    locale: Locale | str | None = LC_NUMERIC,
+    *,
+    numbering_system: Literal["default"] | str = "latn",
+) -> str:
     """Return the symbol used by the locale to separate groups of thousands.

     >>> get_group_symbol('en_US')
@@ -302,11 +449,14 @@ def get_group_symbol(locale: (Locale | str | None)=LC_NUMERIC, *,
                              The special value "default" will use the default numbering system of the locale.
     :raise `UnsupportedNumberingSystemError`: if the numbering system is not supported by the locale.
     """
-    pass
+    return _get_number_symbols(locale, numbering_system=numbering_system).get('group', ',')


-def get_infinity_symbol(locale: (Locale | str | None)=LC_NUMERIC, *,
-    numbering_system: (Literal['default'] | str)='latn') ->str:
+def get_infinity_symbol(
+    locale: Locale | str | None = LC_NUMERIC,
+    *,
+    numbering_system: Literal["default"] | str = "latn",
+) -> str:
     """Return the symbol used by the locale to represent infinity.

     >>> get_infinity_symbol('en_US')
@@ -321,11 +471,10 @@ def get_infinity_symbol(locale: (Locale | str | None)=LC_NUMERIC, *,
                              The special value "default" will use the default numbering system of the locale.
     :raise `UnsupportedNumberingSystemError`: if the numbering system is not supported by the locale.
     """
-    pass
+    return _get_number_symbols(locale, numbering_system=numbering_system).get('infinity', '∞')


-def format_number(number: (float | decimal.Decimal | str), locale: (Locale |
-    str | None)=LC_NUMERIC) ->str:
+def format_number(number: float | decimal.Decimal | str, locale: Locale | str | None = LC_NUMERIC) -> str:
     """Return the given number formatted for a specific locale.

     >>> format_number(1099, locale='en_US')  # doctest: +SKIP
@@ -342,26 +491,39 @@ def format_number(number: (float | decimal.Decimal | str), locale: (Locale |


     """
-    pass
+    warnings.warn('Use babel.numbers.format_decimal() instead.', DeprecationWarning, stacklevel=2)
+    return format_decimal(number, locale=locale)


-def get_decimal_precision(number: decimal.Decimal) ->int:
+def get_decimal_precision(number: decimal.Decimal) -> int:
     """Return maximum precision of a decimal instance's fractional part.

     Precision is extracted from the fractional part only.
     """
-    pass
+    # Copied from: https://github.com/mahmoud/boltons/pull/59
+    assert isinstance(number, decimal.Decimal)
+    decimal_tuple = number.normalize().as_tuple()
+    # Note: DecimalTuple.exponent can be 'n' (qNaN), 'N' (sNaN), or 'F' (Infinity)
+    if not isinstance(decimal_tuple.exponent, int) or decimal_tuple.exponent >= 0:
+        return 0
+    return abs(decimal_tuple.exponent)


-def get_decimal_quantum(precision: (int | decimal.Decimal)) ->decimal.Decimal:
+def get_decimal_quantum(precision: int | decimal.Decimal) -> decimal.Decimal:
     """Return minimal quantum of a number, as defined by precision."""
-    pass
-
-
-def format_decimal(number: (float | decimal.Decimal | str), format: (str |
-    NumberPattern | None)=None, locale: (Locale | str | None)=LC_NUMERIC,
-    decimal_quantization: bool=True, group_separator: bool=True, *,
-    numbering_system: (Literal['default'] | str)='latn') ->str:
+    assert isinstance(precision, (int, decimal.Decimal))
+    return decimal.Decimal(10) ** (-precision)
+
+
+def format_decimal(
+    number: float | decimal.Decimal | str,
+    format: str | NumberPattern | None = None,
+    locale: Locale | str | None = LC_NUMERIC,
+    decimal_quantization: bool = True,
+    group_separator: bool = True,
+    *,
+    numbering_system: Literal["default"] | str = "latn",
+) -> str:
     """Return the given decimal number formatted for a specific locale.

     >>> format_decimal(1.2345, locale='en_US')
@@ -409,13 +571,22 @@ def format_decimal(number: (float | decimal.Decimal | str), format: (str |
                              The special value "default" will use the default numbering system of the locale.
     :raise `UnsupportedNumberingSystemError`: If the numbering system is not supported by the locale.
     """
-    pass
-
-
-def format_compact_decimal(number: (float | decimal.Decimal | str), *,
-    format_type: Literal['short', 'long']='short', locale: (Locale | str |
-    None)=LC_NUMERIC, fraction_digits: int=0, numbering_system: (Literal[
-    'default'] | str)='latn') ->str:
+    locale = Locale.parse(locale)
+    if format is None:
+        format = locale.decimal_formats[format]
+    pattern = parse_pattern(format)
+    return pattern.apply(
+        number, locale, decimal_quantization=decimal_quantization, group_separator=group_separator, numbering_system=numbering_system)
+
+
+def format_compact_decimal(
+    number: float | decimal.Decimal | str,
+    *,
+    format_type: Literal["short", "long"] = "short",
+    locale: Locale | str | None = LC_NUMERIC,
+    fraction_digits: int = 0,
+    numbering_system: Literal["default"] | str = "latn",
+) -> str:
     """Return the given decimal number formatted for a specific locale in compact form.

     >>> format_compact_decimal(12345, format_type="short", locale='en_US')
@@ -431,7 +602,7 @@ def format_compact_decimal(number: (float | decimal.Decimal | str), *,
     >>> format_compact_decimal(21000000, format_type="long", locale="mk")
     u'21 милион'
     >>> format_compact_decimal(12345, format_type="short", locale='ar_EG', fraction_digits=2, numbering_system='default')
-    u'12٫34 ألف'
+    u'12٫34\xa0ألف'

     :param number: the number to format
     :param format_type: Compact format to use ("short" or "long")
@@ -441,29 +612,72 @@ def format_compact_decimal(number: (float | decimal.Decimal | str), *,
                              The special value "default" will use the default numbering system of the locale.
     :raise `UnsupportedNumberingSystemError`: If the numbering system is not supported by the locale.
     """
-    pass
-
-
-def _get_compact_format(number: (float | decimal.Decimal | str),
-    compact_format: LocaleDataDict, locale: Locale, fraction_digits: int
-    ) ->tuple[decimal.Decimal, NumberPattern | None]:
+    locale = Locale.parse(locale)
+    compact_format = locale.compact_decimal_formats[format_type]
+    number, format = _get_compact_format(number, compact_format, locale, fraction_digits)
+    # Did not find a format, fall back.
+    if format is None:
+        format = locale.decimal_formats[None]
+    pattern = parse_pattern(format)
+    return pattern.apply(number, locale, decimal_quantization=False, numbering_system=numbering_system)
+
+
+def _get_compact_format(
+    number: float | decimal.Decimal | str,
+    compact_format: LocaleDataDict,
+    locale: Locale,
+    fraction_digits: int,
+) -> tuple[decimal.Decimal, NumberPattern | None]:
     """Returns the number after dividing by the unit and the format pattern to use.
     The algorithm is described here:
     https://www.unicode.org/reports/tr35/tr35-45/tr35-numbers.html#Compact_Number_Formats.
     """
-    pass
+    if not isinstance(number, decimal.Decimal):
+        number = decimal.Decimal(str(number))
+    if number.is_nan() or number.is_infinite():
+        return number, None
+    format = None
+    for magnitude in sorted([int(m) for m in compact_format["other"]], reverse=True):
+        if abs(number) >= magnitude:
+            # check the pattern using "other" as the amount
+            format = compact_format["other"][str(magnitude)]
+            pattern = parse_pattern(format).pattern
+            # if the pattern is "0", we do not divide the number
+            if pattern == "0":
+                break
+            # otherwise, we need to divide the number by the magnitude but remove zeros
+            # equal to the number of 0's in the pattern minus 1
+            number = cast(decimal.Decimal, number / (magnitude // (10 ** (pattern.count("0") - 1))))
+            # round to the number of fraction digits requested
+            rounded = round(number, fraction_digits)
+            # if the remaining number is singular, use the singular format
+            plural_form = locale.plural_form(abs(number))
+            if plural_form not in compact_format:
+                plural_form = "other"
+            if number == 1 and "1" in compact_format:
+                plural_form = "1"
+            format = compact_format[plural_form][str(magnitude)]
+            number = rounded
+            break
+    return number, format


 class UnknownCurrencyFormatError(KeyError):
     """Exception raised when an unknown currency format is requested."""


-def format_currency(number: (float | decimal.Decimal | str), currency: str,
-    format: (str | NumberPattern | None)=None, locale: (Locale | str | None
-    )=LC_NUMERIC, currency_digits: bool=True, format_type: Literal['name',
-    'standard', 'accounting']='standard', decimal_quantization: bool=True,
-    group_separator: bool=True, *, numbering_system: (Literal['default'] |
-    str)='latn') ->str:
+def format_currency(
+    number: float | decimal.Decimal | str,
+    currency: str,
+    format: str | NumberPattern | None = None,
+    locale: Locale | str | None = LC_NUMERIC,
+    currency_digits: bool = True,
+    format_type: Literal["name", "standard", "accounting"] = "standard",
+    decimal_quantization: bool = True,
+    group_separator: bool = True,
+    *,
+    numbering_system: Literal["default"] | str = "latn",
+) -> str:
     """Return formatted currency value.

     >>> format_currency(1099.98, 'USD', locale='en_US')
@@ -473,16 +687,16 @@ def format_currency(number: (float | decimal.Decimal | str), currency: str,
     >>> format_currency(1099.98, 'EUR', locale='de_DE')
     u'1.099,98\\xa0\\u20ac'
     >>> format_currency(1099.98, 'EGP', locale='ar_EG', numbering_system='default')
-    u'‏1٬099٫98 ج.م.‏'
+    u'\u200f1٬099٫98\xa0ج.م.\u200f'

     The format can also be specified explicitly.  The currency is
     placed with the '¤' sign.  As the sign gets repeated the format
     expands (¤ being the symbol, ¤¤ is the currency abbreviation and
     ¤¤¤ is the full name of the currency):

-    >>> format_currency(1099.98, 'EUR', u'¤¤ #,##0.00', locale='en_US')
+    >>> format_currency(1099.98, 'EUR', u'\xa4\xa4 #,##0.00', locale='en_US')
     u'EUR 1,099.98'
-    >>> format_currency(1099.98, 'EUR', u'#,##0.00 ¤¤¤', locale='en_US')
+    >>> format_currency(1099.98, 'EUR', u'#,##0.00 \xa4\xa4\xa4', locale='en_US')
     u'1,099.98 euros'

     Currencies usually have a specific number of decimal digits. This function
@@ -555,13 +769,76 @@ def format_currency(number: (float | decimal.Decimal | str), currency: str,
                              The special value "default" will use the default numbering system of the locale.
     :raise `UnsupportedNumberingSystemError`: If the numbering system is not supported by the locale.
     """
-    pass
-
-
-def format_compact_currency(number: (float | decimal.Decimal | str),
-    currency: str, *, format_type: Literal['short']='short', locale: (
-    Locale | str | None)=LC_NUMERIC, fraction_digits: int=0,
-    numbering_system: (Literal['default'] | str)='latn') ->str:
+    if format_type == 'name':
+        return _format_currency_long_name(number, currency, format=format,
+                                          locale=locale, currency_digits=currency_digits,
+                                          decimal_quantization=decimal_quantization, group_separator=group_separator,
+                                          numbering_system=numbering_system)
+    locale = Locale.parse(locale)
+    if format:
+        pattern = parse_pattern(format)
+    else:
+        try:
+            pattern = locale.currency_formats[format_type]
+        except KeyError:
+            raise UnknownCurrencyFormatError(f"{format_type!r} is not a known currency format type") from None
+
+    return pattern.apply(
+        number, locale, currency=currency, currency_digits=currency_digits,
+        decimal_quantization=decimal_quantization, group_separator=group_separator, numbering_system=numbering_system)
+
+
+def _format_currency_long_name(
+    number: float | decimal.Decimal | str,
+    currency: str,
+    format: str | NumberPattern | None = None,
+    locale: Locale | str | None = LC_NUMERIC,
+    currency_digits: bool = True,
+    format_type: Literal["name", "standard", "accounting"] = "standard",
+    decimal_quantization: bool = True,
+    group_separator: bool = True,
+    *,
+    numbering_system: Literal["default"] | str = "latn",
+) -> str:
+    # Algorithm described here:
+    # https://www.unicode.org/reports/tr35/tr35-numbers.html#Currencies
+    locale = Locale.parse(locale)
+    # Step 1.
+    # There are no examples of items with explicit count (0 or 1) in current
+    # locale data. So there is no point implementing that.
+    # Step 2.
+
+    # Correct number to numeric type, important for looking up plural rules:
+    number_n = float(number) if isinstance(number, str) else number
+
+    # Step 3.
+    unit_pattern = get_currency_unit_pattern(currency, count=number_n, locale=locale)
+
+    # Step 4.
+    display_name = get_currency_name(currency, count=number_n, locale=locale)
+
+    # Step 5.
+    if not format:
+        format = locale.decimal_formats[None]
+
+    pattern = parse_pattern(format)
+
+    number_part = pattern.apply(
+        number, locale, currency=currency, currency_digits=currency_digits,
+        decimal_quantization=decimal_quantization, group_separator=group_separator, numbering_system=numbering_system)
+
+    return unit_pattern.format(number_part, display_name)
+
+
+def format_compact_currency(
+    number: float | decimal.Decimal | str,
+    currency: str,
+    *,
+    format_type: Literal["short"] = "short",
+    locale: Locale | str | None = LC_NUMERIC,
+    fraction_digits: int = 0,
+    numbering_system: Literal["default"] | str = "latn",
+) -> str:
     """Format a number as a currency value in compact form.

     >>> format_compact_currency(12345, 'USD', locale='en_US')
@@ -569,7 +846,7 @@ def format_compact_currency(number: (float | decimal.Decimal | str),
     >>> format_compact_currency(123456789, 'USD', locale='en_US', fraction_digits=2)
     u'$123.46M'
     >>> format_compact_currency(123456789, 'EUR', locale='de_DE', fraction_digits=1)
-    '123,5 Mio. €'
+    '123,5\xa0Mio.\xa0€'

     :param number: the number to format
     :param currency: the currency code
@@ -580,13 +857,40 @@ def format_compact_currency(number: (float | decimal.Decimal | str),
                              The special value "default" will use the default numbering system of the locale.
     :raise `UnsupportedNumberingSystemError`: If the numbering system is not supported by the locale.
     """
-    pass
-
-
-def format_percent(number: (float | decimal.Decimal | str), format: (str |
-    NumberPattern | None)=None, locale: (Locale | str | None)=LC_NUMERIC,
-    decimal_quantization: bool=True, group_separator: bool=True, *,
-    numbering_system: (Literal['default'] | str)='latn') ->str:
+    locale = Locale.parse(locale)
+    try:
+        compact_format = locale.compact_currency_formats[format_type]
+    except KeyError as error:
+        raise UnknownCurrencyFormatError(f"{format_type!r} is not a known compact currency format type") from error
+    number, format = _get_compact_format(number, compact_format, locale, fraction_digits)
+    # Did not find a format, fall back.
+    if format is None or "¤" not in str(format):
+        # find first format that has a currency symbol
+        for magnitude in compact_format['other']:
+            format = compact_format['other'][magnitude].pattern
+            if '¤' not in format:
+                continue
+            # remove characters that are not the currency symbol, 0's or spaces
+            format = re.sub(r'[^0\s\¤]', '', format)
+            # compress adjacent spaces into one
+            format = re.sub(r'(\s)\s+', r'\1', format).strip()
+            break
+    if format is None:
+        raise ValueError('No compact currency format found for the given number and locale.')
+    pattern = parse_pattern(format)
+    return pattern.apply(number, locale, currency=currency, currency_digits=False, decimal_quantization=False,
+                         numbering_system=numbering_system)
+
+
+def format_percent(
+    number: float | decimal.Decimal | str,
+    format: str | NumberPattern | None = None,
+    locale: Locale | str | None = LC_NUMERIC,
+    decimal_quantization: bool = True,
+    group_separator: bool = True,
+    *,
+    numbering_system: Literal["default"] | str = "latn",
+) -> str:
     """Return formatted percent value for a specific locale.

     >>> format_percent(0.34, locale='en_US')
@@ -600,8 +904,8 @@ def format_percent(number: (float | decimal.Decimal | str), format: (str |

     The format pattern can also be specified explicitly:

-    >>> format_percent(25.1234, u'#,##0‰', locale='en_US')
-    u'25,123‰'
+    >>> format_percent(25.1234, u'#,##0\u2030', locale='en_US')
+    u'25,123\u2030'

     By default the locale is allowed to truncate and round a high-precision
     number by forcing its format pattern onto the decimal part. You can bypass
@@ -629,13 +933,24 @@ def format_percent(number: (float | decimal.Decimal | str), format: (str |
                              The special value "default" will use the default numbering system of the locale.
     :raise `UnsupportedNumberingSystemError`: If the numbering system is not supported by the locale.
     """
-    pass
-
-
-def format_scientific(number: (float | decimal.Decimal | str), format: (str |
-    NumberPattern | None)=None, locale: (Locale | str | None)=LC_NUMERIC,
-    decimal_quantization: bool=True, *, numbering_system: (Literal[
-    'default'] | str)='latn') ->str:
+    locale = Locale.parse(locale)
+    if not format:
+        format = locale.percent_formats[None]
+    pattern = parse_pattern(format)
+    return pattern.apply(
+        number, locale, decimal_quantization=decimal_quantization, group_separator=group_separator,
+        numbering_system=numbering_system,
+    )
+
+
+def format_scientific(
+        number: float | decimal.Decimal | str,
+        format: str | NumberPattern | None = None,
+        locale: Locale | str | None = LC_NUMERIC,
+        decimal_quantization: bool = True,
+        *,
+        numbering_system: Literal["default"] | str = "latn",
+) -> str:
     """Return value formatted in scientific notation for a specific locale.

     >>> format_scientific(10000, locale='en_US')
@@ -666,20 +981,29 @@ def format_scientific(number: (float | decimal.Decimal | str), format: (str |
                              The special value "default" will use the default numbering system of the locale.
     :raise `UnsupportedNumberingSystemError`: If the numbering system is not supported by the locale.
     """
-    pass
+    locale = Locale.parse(locale)
+    if not format:
+        format = locale.scientific_formats[None]
+    pattern = parse_pattern(format)
+    return pattern.apply(
+        number, locale, decimal_quantization=decimal_quantization, numbering_system=numbering_system)


 class NumberFormatError(ValueError):
     """Exception raised when a string cannot be parsed into a number."""

-    def __init__(self, message: str, suggestions: (list[str] | None)=None
-        ) ->None:
+    def __init__(self, message: str, suggestions: list[str] | None = None) -> None:
         super().__init__(message)
+        #: a list of properly formatted numbers derived from the invalid input
         self.suggestions = suggestions


-def parse_number(string: str, locale: (Locale | str | None)=LC_NUMERIC, *,
-    numbering_system: (Literal['default'] | str)='latn') ->int:
+def parse_number(
+    string: str,
+    locale: Locale | str | None = LC_NUMERIC,
+    *,
+    numbering_system: Literal["default"] | str = "latn",
+) -> int:
     """Parse localized number string into an integer.

     >>> parse_number('1,099', locale='en_US')
@@ -702,12 +1026,19 @@ def parse_number(string: str, locale: (Locale | str | None)=LC_NUMERIC, *,
     :raise `NumberFormatError`: if the string can not be converted to a number
     :raise `UnsupportedNumberingSystemError`: if the numbering system is not supported by the locale.
     """
-    pass
-
-
-def parse_decimal(string: str, locale: (Locale | str | None)=LC_NUMERIC,
-    strict: bool=False, *, numbering_system: (Literal['default'] | str)='latn'
-    ) ->decimal.Decimal:
+    try:
+        return int(string.replace(get_group_symbol(locale, numbering_system=numbering_system), ''))
+    except ValueError as ve:
+        raise NumberFormatError(f"{string!r} is not a valid number") from ve
+
+
+def parse_decimal(
+    string: str,
+    locale: Locale | str | None = LC_NUMERIC,
+    strict: bool = False,
+    *,
+    numbering_system: Literal["default"] | str = "latn",
+) -> decimal.Decimal:
     """Parse localized decimal string into a decimal.

     >>> parse_decimal('1,099.98', locale='en_US')
@@ -749,11 +1080,58 @@ def parse_decimal(string: str, locale: (Locale | str | None)=LC_NUMERIC,
                               decimal number
     :raise UnsupportedNumberingSystemError: if the numbering system is not supported by the locale.
     """
-    pass
-
-
-def _remove_trailing_zeros_after_decimal(string: str, decimal_symbol: str
-    ) ->str:
+    locale = Locale.parse(locale)
+    group_symbol = get_group_symbol(locale, numbering_system=numbering_system)
+    decimal_symbol = get_decimal_symbol(locale, numbering_system=numbering_system)
+
+    if not strict and (
+        group_symbol == '\xa0' and  # if the grouping symbol is U+00A0 NO-BREAK SPACE,
+        group_symbol not in string and  # and the string to be parsed does not contain it,
+        ' ' in string  # but it does contain a space instead,
+    ):
+        # ... it's reasonable to assume it is taking the place of the grouping symbol.
+        string = string.replace(' ', group_symbol)
+
+    try:
+        parsed = decimal.Decimal(string.replace(group_symbol, '')
+                                       .replace(decimal_symbol, '.'))
+    except decimal.InvalidOperation as exc:
+        raise NumberFormatError(f"{string!r} is not a valid decimal number") from exc
+    if strict and group_symbol in string:
+        proper = format_decimal(parsed, locale=locale, decimal_quantization=False, numbering_system=numbering_system)
+        if string != proper and proper != _remove_trailing_zeros_after_decimal(string, decimal_symbol):
+            try:
+                parsed_alt = decimal.Decimal(string.replace(decimal_symbol, '')
+                                                   .replace(group_symbol, '.'))
+            except decimal.InvalidOperation as exc:
+                raise NumberFormatError(
+                    f"{string!r} is not a properly formatted decimal number. "
+                    f"Did you mean {proper!r}?",
+                    suggestions=[proper],
+                ) from exc
+            else:
+                proper_alt = format_decimal(
+                    parsed_alt,
+                    locale=locale,
+                    decimal_quantization=False,
+                    numbering_system=numbering_system,
+                )
+                if proper_alt == proper:
+                    raise NumberFormatError(
+                        f"{string!r} is not a properly formatted decimal number. "
+                        f"Did you mean {proper!r}?",
+                        suggestions=[proper],
+                    )
+                else:
+                    raise NumberFormatError(
+                        f"{string!r} is not a properly formatted decimal number. "
+                        f"Did you mean {proper!r}? Or maybe {proper_alt!r}?",
+                        suggestions=[proper, proper_alt],
+                    )
+    return parsed
+
+
+def _remove_trailing_zeros_after_decimal(string: str, decimal_symbol: str) -> str:
     """
     Remove trailing zeros from the decimal part of a numeric string.

@@ -777,18 +1155,28 @@ def _remove_trailing_zeros_after_decimal(string: str, decimal_symbol: str
     >>> _remove_trailing_zeros_after_decimal("100", ".")
     '100'
     """
-    pass
+    integer_part, _, decimal_part = string.partition(decimal_symbol)
+
+    if decimal_part:
+        decimal_part = decimal_part.rstrip("0")
+        if decimal_part:
+            return integer_part + decimal_symbol + decimal_part
+        return integer_part
+
+    return string


-PREFIX_END = '[^0-9@#.,]'
-NUMBER_TOKEN = '[0-9@#.,E+]'
-PREFIX_PATTERN = "(?P<prefix>(?:'[^']*'|%s)*)" % PREFIX_END
-NUMBER_PATTERN = '(?P<number>%s*)' % NUMBER_TOKEN
-SUFFIX_PATTERN = '(?P<suffix>.*)'
-number_re = re.compile(f'{PREFIX_PATTERN}{NUMBER_PATTERN}{SUFFIX_PATTERN}')
+PREFIX_END = r'[^0-9@#.,]'
+NUMBER_TOKEN = r'[0-9@#.,E+]'

+PREFIX_PATTERN = r"(?P<prefix>(?:'[^']*'|%s)*)" % PREFIX_END
+NUMBER_PATTERN = r"(?P<number>%s*)" % NUMBER_TOKEN
+SUFFIX_PATTERN = r"(?P<suffix>.*)"

-def parse_grouping(p: str) ->tuple[int, int]:
+number_re = re.compile(f"{PREFIX_PATTERN}{NUMBER_PATTERN}{SUFFIX_PATTERN}")
+
+
+def parse_grouping(p: str) -> tuple[int, int]:
     """Parse primary and secondary digit grouping

     >>> parse_grouping('##')
@@ -798,20 +1186,98 @@ def parse_grouping(p: str) ->tuple[int, int]:
     >>> parse_grouping('#,####,###')
     (3, 4)
     """
-    pass
-
-
-def parse_pattern(pattern: (NumberPattern | str)) ->NumberPattern:
+    width = len(p)
+    g1 = p.rfind(',')
+    if g1 == -1:
+        return 1000, 1000
+    g1 = width - g1 - 1
+    g2 = p[:-g1 - 1].rfind(',')
+    if g2 == -1:
+        return g1, g1
+    g2 = width - g1 - g2 - 2
+    return g1, g2
+
+
+def parse_pattern(pattern: NumberPattern | str) -> NumberPattern:
     """Parse number format patterns"""
-    pass
+    if isinstance(pattern, NumberPattern):
+        return pattern
+
+    def _match_number(pattern):
+        rv = number_re.search(pattern)
+        if rv is None:
+            raise ValueError(f"Invalid number pattern {pattern!r}")
+        return rv.groups()
+
+    pos_pattern = pattern
+
+    # Do we have a negative subpattern?
+    if ';' in pattern:
+        pos_pattern, neg_pattern = pattern.split(';', 1)
+        pos_prefix, number, pos_suffix = _match_number(pos_pattern)
+        neg_prefix, _, neg_suffix = _match_number(neg_pattern)
+    else:
+        pos_prefix, number, pos_suffix = _match_number(pos_pattern)
+        neg_prefix = f"-{pos_prefix}"
+        neg_suffix = pos_suffix
+    if 'E' in number:
+        number, exp = number.split('E', 1)
+    else:
+        exp = None
+    if '@' in number and '.' in number and '0' in number:
+        raise ValueError('Significant digit patterns can not contain "@" or "0"')
+    if '.' in number:
+        integer, fraction = number.rsplit('.', 1)
+    else:
+        integer = number
+        fraction = ''
+
+    def parse_precision(p):
+        """Calculate the min and max allowed digits"""
+        min = max = 0
+        for c in p:
+            if c in '@0':
+                min += 1
+                max += 1
+            elif c == '#':
+                max += 1
+            elif c == ',':
+                continue
+            else:
+                break
+        return min, max
+
+    int_prec = parse_precision(integer)
+    frac_prec = parse_precision(fraction)
+    if exp:
+        exp_plus = exp.startswith('+')
+        exp = exp.lstrip('+')
+        exp_prec = parse_precision(exp)
+    else:
+        exp_plus = None
+        exp_prec = None
+    grouping = parse_grouping(integer)
+    return NumberPattern(pattern, (pos_prefix, neg_prefix),
+                         (pos_suffix, neg_suffix), grouping,
+                         int_prec, frac_prec,
+                         exp_prec, exp_plus, number)


 class NumberPattern:

-    def __init__(self, pattern: str, prefix: tuple[str, str], suffix: tuple
-        [str, str], grouping: tuple[int, int], int_prec: tuple[int, int],
-        frac_prec: tuple[int, int], exp_prec: (tuple[int, int] | None),
-        exp_plus: (bool | None), number_pattern: (str | None)=None) ->None:
+    def __init__(
+        self,
+        pattern: str,
+        prefix: tuple[str, str],
+        suffix: tuple[str, str],
+        grouping: tuple[int, int],
+        int_prec: tuple[int, int],
+        frac_prec: tuple[int, int],
+        exp_prec: tuple[int, int] | None,
+        exp_plus: bool | None,
+        number_pattern: str | None = None,
+    ) -> None:
+        # Metadata of the decomposed parsed pattern.
         self.pattern = pattern
         self.prefix = prefix
         self.suffix = suffix
@@ -823,30 +1289,68 @@ class NumberPattern:
         self.exp_plus = exp_plus
         self.scale = self.compute_scale()

-    def __repr__(self) ->str:
-        return f'<{type(self).__name__} {self.pattern!r}>'
+    def __repr__(self) -> str:
+        return f"<{type(self).__name__} {self.pattern!r}>"

-    def compute_scale(self) ->Literal[0, 2, 3]:
+    def compute_scale(self) -> Literal[0, 2, 3]:
         """Return the scaling factor to apply to the number before rendering.

         Auto-set to a factor of 2 or 3 if presence of a ``%`` or ``‰`` sign is
         detected in the prefix or suffix of the pattern. Default is to not mess
         with the scale at all and keep it to 0.
         """
-        pass
-
-    def scientific_notation_elements(self, value: decimal.Decimal, locale:
-        (Locale | str | None), *, numbering_system: (Literal['default'] |
-        str)='latn') ->tuple[decimal.Decimal, int, str]:
+        scale = 0
+        if '%' in ''.join(self.prefix + self.suffix):
+            scale = 2
+        elif '‰' in ''.join(self.prefix + self.suffix):
+            scale = 3
+        return scale
+
+    def scientific_notation_elements(
+        self,
+        value: decimal.Decimal,
+        locale: Locale | str | None,
+        *,
+        numbering_system: Literal["default"] | str = "latn",
+    ) -> tuple[decimal.Decimal, int, str]:
         """ Returns normalized scientific notation components of a value.
         """
-        pass
-
-    def apply(self, value: (float | decimal.Decimal | str), locale: (Locale |
-        str | None), currency: (str | None)=None, currency_digits: bool=
-        True, decimal_quantization: bool=True, force_frac: (tuple[int, int] |
-        None)=None, group_separator: bool=True, *, numbering_system: (
-        Literal['default'] | str)='latn'):
+        # Normalize value to only have one lead digit.
+        exp = value.adjusted()
+        value = value * get_decimal_quantum(exp)
+        assert value.adjusted() == 0
+
+        # Shift exponent and value by the minimum number of leading digits
+        # imposed by the rendering pattern. And always make that number
+        # greater or equal to 1.
+        lead_shift = max([1, min(self.int_prec)]) - 1
+        exp = exp - lead_shift
+        value = value * get_decimal_quantum(-lead_shift)
+
+        # Get exponent sign symbol.
+        exp_sign = ''
+        if exp < 0:
+            exp_sign = get_minus_sign_symbol(locale, numbering_system=numbering_system)
+        elif self.exp_plus:
+            exp_sign = get_plus_sign_symbol(locale, numbering_system=numbering_system)
+
+        # Normalize exponent value now that we have the sign.
+        exp = abs(exp)
+
+        return value, exp, exp_sign
+
+    def apply(
+        self,
+        value: float | decimal.Decimal | str,
+        locale: Locale | str | None,
+        currency: str | None = None,
+        currency_digits: bool = True,
+        decimal_quantization: bool = True,
+        force_frac: tuple[int, int] | None = None,
+        group_separator: bool = True,
+        *,
+        numbering_system: Literal["default"] | str = "latn",
+    ):
         """Renders into a string a number following the defined pattern.

         Forced decimal quantization is active by default so we'll produce a
@@ -875,4 +1379,176 @@ class NumberPattern:
         :rtype: str
         :raise UnsupportedNumberingSystemError: If the numbering system is not supported by the locale.
         """
-        pass
+        if not isinstance(value, decimal.Decimal):
+            value = decimal.Decimal(str(value))
+
+        value = value.scaleb(self.scale)
+
+        # Separate the absolute value from its sign.
+        is_negative = int(value.is_signed())
+        value = abs(value).normalize()
+
+        # Prepare scientific notation metadata.
+        if self.exp_prec:
+            value, exp, exp_sign = self.scientific_notation_elements(value, locale, numbering_system=numbering_system)
+
+        # Adjust the precision of the fractional part and force it to the
+        # currency's if necessary.
+        if force_frac:
+            # TODO (3.x?): Remove this parameter
+            warnings.warn(
+                'The force_frac parameter to NumberPattern.apply() is deprecated.',
+                DeprecationWarning,
+                stacklevel=2,
+            )
+            frac_prec = force_frac
+        elif currency and currency_digits:
+            frac_prec = (get_currency_precision(currency), ) * 2
+        else:
+            frac_prec = self.frac_prec
+
+        # Bump decimal precision to the natural precision of the number if it
+        # exceeds the one we're about to use. This adaptative precision is only
+        # triggered if the decimal quantization is disabled or if a scientific
+        # notation pattern has a missing mandatory fractional part (as in the
+        # default '#E0' pattern). This special case has been extensively
+        # discussed at https://github.com/python-babel/babel/pull/494#issuecomment-307649969 .
+        if not decimal_quantization or (self.exp_prec and frac_prec == (0, 0)):
+            frac_prec = (frac_prec[0], max([frac_prec[1], get_decimal_precision(value)]))
+
+        # Render scientific notation.
+        if self.exp_prec:
+            number = ''.join([
+                self._quantize_value(value, locale, frac_prec, group_separator, numbering_system=numbering_system),
+                get_exponential_symbol(locale, numbering_system=numbering_system),
+                exp_sign,  # type: ignore  # exp_sign is always defined here
+                self._format_int(str(exp), self.exp_prec[0], self.exp_prec[1], locale, numbering_system=numbering_system),  # type: ignore  # exp is always defined here
+            ])
+
+        # Is it a significant digits pattern?
+        elif '@' in self.pattern:
+            text = self._format_significant(value,
+                                            self.int_prec[0],
+                                            self.int_prec[1])
+            a, sep, b = text.partition(".")
+            number = self._format_int(a, 0, 1000, locale, numbering_system=numbering_system)
+            if sep:
+                number += get_decimal_symbol(locale, numbering_system=numbering_system) + b
+
+        # A normal number pattern.
+        else:
+            number = self._quantize_value(value, locale, frac_prec, group_separator, numbering_system=numbering_system)
+
+        retval = ''.join([
+            self.prefix[is_negative],
+            number if self.number_pattern != '' else '',
+            self.suffix[is_negative]])
+
+        if '¤' in retval and currency is not None:
+            retval = retval.replace('¤¤¤', get_currency_name(currency, value, locale))
+            retval = retval.replace('¤¤', currency.upper())
+            retval = retval.replace('¤', get_currency_symbol(currency, locale))
+
+        # remove single quotes around text, except for doubled single quotes
+        # which are replaced with a single quote
+        retval = re.sub(r"'([^']*)'", lambda m: m.group(1) or "'", retval)
+
+        return retval
+
+    #
+    # This is one tricky piece of code.  The idea is to rely as much as possible
+    # on the decimal module to minimize the amount of code.
+    #
+    # Conceptually, the implementation of this method can be summarized in the
+    # following steps:
+    #
+    #   - Move or shift the decimal point (i.e. the exponent) so the maximum
+    #     amount of significant digits fall into the integer part (i.e. to the
+    #     left of the decimal point)
+    #
+    #   - Round the number to the nearest integer, discarding all the fractional
+    #     part which contained extra digits to be eliminated
+    #
+    #   - Convert the rounded integer to a string, that will contain the final
+    #     sequence of significant digits already trimmed to the maximum
+    #
+    #   - Restore the original position of the decimal point, potentially
+    #     padding with zeroes on either side
+    #
+    def _format_significant(self, value: decimal.Decimal, minimum: int, maximum: int) -> str:
+        exp = value.adjusted()
+        scale = maximum - 1 - exp
+        digits = str(value.scaleb(scale).quantize(decimal.Decimal(1)))
+        if scale <= 0:
+            result = digits + '0' * -scale
+        else:
+            intpart = digits[:-scale]
+            i = len(intpart)
+            j = i + max(minimum - i, 0)
+            result = "{intpart}.{pad:0<{fill}}{fracpart}{fracextra}".format(
+                intpart=intpart or '0',
+                pad='',
+                fill=-min(exp + 1, 0),
+                fracpart=digits[i:j],
+                fracextra=digits[j:].rstrip('0'),
+            ).rstrip('.')
+        return result
+
+    def _format_int(
+        self,
+        value: str,
+        min: int,
+        max: int,
+        locale: Locale | str | None,
+        *,
+        numbering_system: Literal["default"] | str,
+    ) -> str:
+        width = len(value)
+        if width < min:
+            value = '0' * (min - width) + value
+        gsize = self.grouping[0]
+        ret = ''
+        symbol = get_group_symbol(locale, numbering_system=numbering_system)
+        while len(value) > gsize:
+            ret = symbol + value[-gsize:] + ret
+            value = value[:-gsize]
+            gsize = self.grouping[1]
+        return value + ret
+
+    def _quantize_value(
+        self,
+        value: decimal.Decimal,
+        locale: Locale | str | None,
+        frac_prec: tuple[int, int],
+        group_separator: bool,
+        *,
+        numbering_system: Literal["default"] | str,
+    ) -> str:
+        # If the number is +/-Infinity, we can't quantize it
+        if value.is_infinite():
+            return get_infinity_symbol(locale, numbering_system=numbering_system)
+        quantum = get_decimal_quantum(frac_prec[1])
+        rounded = value.quantize(quantum)
+        a, sep, b = f"{rounded:f}".partition(".")
+        integer_part = a
+        if group_separator:
+            integer_part = self._format_int(a, self.int_prec[0], self.int_prec[1], locale, numbering_system=numbering_system)
+        number = integer_part + self._format_frac(b or '0', locale=locale, force_frac=frac_prec, numbering_system=numbering_system)
+        return number
+
+    def _format_frac(
+        self,
+        value: str,
+        locale: Locale | str | None,
+        force_frac: tuple[int, int] | None = None,
+        *,
+        numbering_system: Literal["default"] | str,
+    ) -> str:
+        min, max = force_frac or self.frac_prec
+        if len(value) < min:
+            value += ('0' * (min - len(value)))
+        if max == 0 or (min == 0 and int(value) == 0):
+            return ''
+        while len(value) > min and value[-1] == '0':
+            value = value[:-1]
+        return get_decimal_symbol(locale, numbering_system=numbering_system) + value
diff --git a/babel/plural.py b/babel/plural.py
index 8675fdf..01df16c 100644
--- a/babel/plural.py
+++ b/babel/plural.py
@@ -8,18 +8,20 @@
     :license: BSD, see LICENSE for more details.
 """
 from __future__ import annotations
+
 import decimal
 import re
 from collections.abc import Iterable, Mapping
 from typing import TYPE_CHECKING, Any, Callable
+
 if TYPE_CHECKING:
     from typing_extensions import Literal
-_plural_tags = 'zero', 'one', 'two', 'few', 'many', 'other'
+
+_plural_tags = ('zero', 'one', 'two', 'few', 'many', 'other')
 _fallback_tag = 'other'


-def extract_operands(source: (float | decimal.Decimal)) ->tuple[decimal.
-    Decimal | int, int, int, int, int, int, Literal[0], Literal[0]]:
+def extract_operands(source: float | decimal.Decimal) -> tuple[decimal.Decimal | int, int, int, int, int, int, Literal[0], Literal[0]]:
     """Extract operands from a decimal, a float or an int, according to `CLDR rules`_.

     The result is an 8-tuple (n, i, v, w, f, t, c, e), where those symbols are as follows:
@@ -44,7 +46,38 @@ def extract_operands(source: (float | decimal.Decimal)) ->tuple[decimal.
     :return: A n-i-v-w-f-t-c-e tuple
     :rtype: tuple[decimal.Decimal, int, int, int, int, int, int, int]
     """
-    pass
+    n = abs(source)
+    i = int(n)
+    if isinstance(n, float):
+        if i == n:
+            n = i
+        else:
+            # Cast the `float` to a number via the string representation.
+            # This is required for Python 2.6 anyway (it will straight out fail to
+            # do the conversion otherwise), and it's highly unlikely that the user
+            # actually wants the lossless conversion behavior (quoting the Python
+            # documentation):
+            # > If value is a float, the binary floating point value is losslessly
+            # > converted to its exact decimal equivalent.
+            # > This conversion can often require 53 or more digits of precision.
+            # Should the user want that behavior, they can simply pass in a pre-
+            # converted `Decimal` instance of desired accuracy.
+            n = decimal.Decimal(str(n))
+
+    if isinstance(n, decimal.Decimal):
+        dec_tuple = n.as_tuple()
+        exp = dec_tuple.exponent
+        fraction_digits = dec_tuple.digits[exp:] if exp < 0 else ()
+        trailing = ''.join(str(d) for d in fraction_digits)
+        no_trailing = trailing.rstrip('0')
+        v = len(trailing)
+        w = len(no_trailing)
+        f = int(trailing or 0)
+        t = int(no_trailing or 0)
+    else:
+        v = w = f = t = 0
+    c = e = 0  # TODO: c and e are not supported
+    return n, i, v, w, f, t, c, e


 class PluralRule:
@@ -67,10 +100,10 @@ class PluralRule:

     .. _`CLDR rules`: https://www.unicode.org/reports/tr35/tr35-33/tr35-numbers.html#Language_Plural_Rules
     """
-    __slots__ = 'abstract', '_func'

-    def __init__(self, rules: (Mapping[str, str] | Iterable[tuple[str, str]])
-        ) ->None:
+    __slots__ = ('abstract', '_func')
+
+    def __init__(self, rules: Mapping[str, str] | Iterable[tuple[str, str]]) -> None:
         """Initialize the rule instance.

         :param rules: a list of ``(tag, expr)``) tuples with the rules
@@ -84,63 +117,63 @@ class PluralRule:
         self.abstract: list[tuple[str, Any]] = []
         for key, expr in sorted(rules):
             if key not in _plural_tags:
-                raise ValueError(f'unknown tag {key!r}')
+                raise ValueError(f"unknown tag {key!r}")
             elif key in found:
-                raise ValueError(f'tag {key!r} defined twice')
+                raise ValueError(f"tag {key!r} defined twice")
             found.add(key)
             ast = _Parser(expr).ast
             if ast:
                 self.abstract.append((key, ast))

-    def __repr__(self) ->str:
+    def __repr__(self) -> str:
         rules = self.rules
-        args = ', '.join([f'{tag}: {rules[tag]}' for tag in _plural_tags if
-            tag in rules])
-        return f'<{type(self).__name__} {args!r}>'
+        args = ", ".join([f"{tag}: {rules[tag]}" for tag in _plural_tags if tag in rules])
+        return f"<{type(self).__name__} {args!r}>"

     @classmethod
-    def parse(cls, rules: (Mapping[str, str] | Iterable[tuple[str, str]] |
-        PluralRule)) ->PluralRule:
+    def parse(cls, rules: Mapping[str, str] | Iterable[tuple[str, str]] | PluralRule) -> PluralRule:
         """Create a `PluralRule` instance for the given rules.  If the rules
         are a `PluralRule` object, that object is returned.

         :param rules: the rules as list or dict, or a `PluralRule` object
         :raise RuleError: if the expression is malformed
         """
-        pass
+        if isinstance(rules, PluralRule):
+            return rules
+        return cls(rules)

     @property
-    def rules(self) ->Mapping[str, str]:
+    def rules(self) -> Mapping[str, str]:
         """The `PluralRule` as a dict of unicode plural rules.

         >>> rule = PluralRule({'one': 'n is 1'})
         >>> rule.rules
         {'one': 'n is 1'}
         """
-        pass
+        _compile = _UnicodeCompiler().compile
+        return {tag: _compile(ast) for tag, ast in self.abstract}

     @property
-    def tags(self) ->frozenset[str]:
+    def tags(self) -> frozenset[str]:
         """A set of explicitly defined tags in this rule.  The implicit default
         ``'other'`` rules is not part of this set unless there is an explicit
         rule for it.
         """
-        pass
+        return frozenset(i[0] for i in self.abstract)

-    def __getstate__(self) ->list[tuple[str, Any]]:
+    def __getstate__(self) -> list[tuple[str, Any]]:
         return self.abstract

-    def __setstate__(self, abstract: list[tuple[str, Any]]) ->None:
+    def __setstate__(self, abstract: list[tuple[str, Any]]) -> None:
         self.abstract = abstract

-    def __call__(self, n: (float | decimal.Decimal)) ->str:
+    def __call__(self, n: float | decimal.Decimal) -> str:
         if not hasattr(self, '_func'):
             self._func = to_python(self)
         return self._func(n)


-def to_javascript(rule: (Mapping[str, str] | Iterable[tuple[str, str]] |
-    PluralRule)) ->str:
+def to_javascript(rule: Mapping[str, str] | Iterable[tuple[str, str]] | PluralRule) -> str:
     """Convert a list/dict of rules or a `PluralRule` object into a JavaScript
     function.  This function depends on no external library:

@@ -155,11 +188,15 @@ def to_javascript(rule: (Mapping[str, str] | Iterable[tuple[str, str]] |
     :param rule: the rules as list or dict, or a `PluralRule` object
     :raise RuleError: if the expression is malformed
     """
-    pass
+    to_js = _JavaScriptCompiler().compile
+    result = ['(function(n) { return ']
+    for tag, ast in PluralRule.parse(rule).abstract:
+        result.append(f"{to_js(ast)} ? {tag!r} : ")
+    result.append('%r; })' % _fallback_tag)
+    return ''.join(result)


-def to_python(rule: (Mapping[str, str] | Iterable[tuple[str, str]] |
-    PluralRule)) ->Callable[[float | decimal.Decimal], str]:
+def to_python(rule: Mapping[str, str] | Iterable[tuple[str, str]] | PluralRule) -> Callable[[float | decimal.Decimal], str]:
     """Convert a list/dict of rules or a `PluralRule` object into a regular
     Python function.  This is useful in situations where you need a real
     function and don't are about the actual rule object:
@@ -178,11 +215,28 @@ def to_python(rule: (Mapping[str, str] | Iterable[tuple[str, str]] |
     :param rule: the rules as list or dict, or a `PluralRule` object
     :raise RuleError: if the expression is malformed
     """
-    pass
-
-
-def to_gettext(rule: (Mapping[str, str] | Iterable[tuple[str, str]] |
-    PluralRule)) ->str:
+    namespace = {
+        'IN': in_range_list,
+        'WITHIN': within_range_list,
+        'MOD': cldr_modulo,
+        'extract_operands': extract_operands,
+    }
+    to_python_func = _PythonCompiler().compile
+    result = [
+        'def evaluate(n):',
+        ' n, i, v, w, f, t, c, e = extract_operands(n)',
+    ]
+    for tag, ast in PluralRule.parse(rule).abstract:
+        # the str() call is to coerce the tag to the native string.  It's
+        # a limited ascii restricted set of tags anyways so that is fine.
+        result.append(f" if ({to_python_func(ast)}): return {str(tag)!r}")
+    result.append(f" return {_fallback_tag!r}")
+    code = compile('\n'.join(result), '<rule>', 'exec')
+    eval(code, namespace)
+    return namespace['evaluate']
+
+
+def to_gettext(rule: Mapping[str, str] | Iterable[tuple[str, str]] | PluralRule) -> str:
     """The plural rule as gettext expression.  The gettext expression is
     technically limited to integers and returns indices rather than tags.

@@ -192,11 +246,20 @@ def to_gettext(rule: (Mapping[str, str] | Iterable[tuple[str, str]] |
     :param rule: the rules as list or dict, or a `PluralRule` object
     :raise RuleError: if the expression is malformed
     """
-    pass
+    rule = PluralRule.parse(rule)
+
+    used_tags = rule.tags | {_fallback_tag}
+    _compile = _GettextCompiler().compile
+    _get_index = [tag for tag in _plural_tags if tag in used_tags].index

+    result = [f"nplurals={len(used_tags)}; plural=("]
+    for tag, ast in rule.abstract:
+        result.append(f"{_compile(ast)} ? {_get_index(tag)} : ")
+    result.append(f"{_get_index(_fallback_tag)});")
+    return ''.join(result)

-def in_range_list(num: (float | decimal.Decimal), range_list: Iterable[
-    Iterable[float | decimal.Decimal]]) ->bool:
+
+def in_range_list(num: float | decimal.Decimal, range_list: Iterable[Iterable[float | decimal.Decimal]]) -> bool:
     """Integer range list test.  This is the callback for the "in" operator
     of the UTS #35 pluralization rule language:

@@ -213,11 +276,10 @@ def in_range_list(num: (float | decimal.Decimal), range_list: Iterable[
     >>> in_range_list(10, [(1, 4), (6, 8)])
     False
     """
-    pass
+    return num == int(num) and within_range_list(num, range_list)


-def within_range_list(num: (float | decimal.Decimal), range_list: Iterable[
-    Iterable[float | decimal.Decimal]]) ->bool:
+def within_range_list(num: float | decimal.Decimal, range_list: Iterable[Iterable[float | decimal.Decimal]]) -> bool:
     """Float range test.  This is the callback for the "within" operator
     of the UTS #35 pluralization rule language:

@@ -234,10 +296,10 @@ def within_range_list(num: (float | decimal.Decimal), range_list: Iterable[
     >>> within_range_list(10.5, [(1, 4), (20, 30)])
     False
     """
-    pass
+    return any(num >= min_ and num <= max_ for min_, max_ in range_list)


-def cldr_modulo(a: float, b: float) ->float:
+def cldr_modulo(a: float, b: float) -> float:
     """Javaish modulo.  This modulo operator returns the value with the sign
     of the dividend rather than the divisor like Python does:

@@ -248,19 +310,91 @@ def cldr_modulo(a: float, b: float) ->float:
     >>> cldr_modulo(3, 5)
     3
     """
-    pass
+    reverse = 0
+    if a < 0:
+        a *= -1
+        reverse = 1
+    if b < 0:
+        b *= -1
+    rv = a % b
+    if reverse:
+        rv *= -1
+    return rv


 class RuleError(Exception):
     """Raised if a rule is malformed."""


-_VARS = {'n', 'i', 'v', 'w', 'f', 't', 'c', 'e'}
-_RULES: list[tuple[str | None, re.Pattern[str]]] = [(None, re.compile(
-    '\\s+', re.UNICODE)), ('word', re.compile(
-    f"\\b(and|or|is|(?:with)?in|not|mod|[{''.join(_VARS)}])\\b")), ('value',
-    re.compile('\\d+')), ('symbol', re.compile('%|,|!=|=')), ('ellipsis',
-    re.compile('\\.{2,3}|\\u2026', re.UNICODE))]
+_VARS = {
+    'n',  # absolute value of the source number.
+    'i',  # integer digits of n.
+    'v',  # number of visible fraction digits in n, with trailing zeros.*
+    'w',  # number of visible fraction digits in n, without trailing zeros.*
+    'f',  # visible fraction digits in n, with trailing zeros.*
+    't',  # visible fraction digits in n, without trailing zeros.*
+    'c',  # compact decimal exponent value: exponent of the power of 10 used in compact decimal formatting.
+    'e',  # currently, synonym for `c`. however, may be redefined in the future.
+}
+
+_RULES: list[tuple[str | None, re.Pattern[str]]] = [
+    (None, re.compile(r'\s+', re.UNICODE)),
+    ('word', re.compile(fr'\b(and|or|is|(?:with)?in|not|mod|[{"".join(_VARS)}])\b')),
+    ('value', re.compile(r'\d+')),
+    ('symbol', re.compile(r'%|,|!=|=')),
+    ('ellipsis', re.compile(r'\.{2,3}|\u2026', re.UNICODE)),  # U+2026: ELLIPSIS
+]
+
+
+def tokenize_rule(s: str) -> list[tuple[str, str]]:
+    s = s.split('@')[0]
+    result: list[tuple[str, str]] = []
+    pos = 0
+    end = len(s)
+    while pos < end:
+        for tok, rule in _RULES:
+            match = rule.match(s, pos)
+            if match is not None:
+                pos = match.end()
+                if tok:
+                    result.append((tok, match.group()))
+                break
+        else:
+            raise RuleError('malformed CLDR pluralization rule.  '
+                            'Got unexpected %r' % s[pos])
+    return result[::-1]
+
+
+def test_next_token(
+    tokens: list[tuple[str, str]],
+    type_: str,
+    value: str | None = None,
+) -> list[tuple[str, str]] | bool:
+    return tokens and tokens[-1][0] == type_ and \
+        (value is None or tokens[-1][1] == value)
+
+
+def skip_token(tokens: list[tuple[str, str]], type_: str, value: str | None = None):
+    if test_next_token(tokens, type_, value):
+        return tokens.pop()
+
+
+def value_node(value: int) -> tuple[Literal['value'], tuple[int]]:
+    return 'value', (value, )
+
+
+def ident_node(name: str) -> tuple[str, tuple[()]]:
+    return name, ()
+
+
+def range_list_node(
+    range_list: Iterable[Iterable[float | decimal.Decimal]],
+) -> tuple[Literal['range_list'], Iterable[Iterable[float | decimal.Decimal]]]:
+    return 'range_list', range_list
+
+
+def negate(rv: tuple[Any, ...]) -> tuple[Literal['not'], tuple[tuple[Any, ...]]]:
+    return 'not', (rv,)


 class _Parser:
@@ -300,22 +434,99 @@ class _Parser:
     def __init__(self, string):
         self.tokens = tokenize_rule(string)
         if not self.tokens:
+            # If the pattern is only samples, it's entirely possible
+            # no stream of tokens whatsoever is generated.
             self.ast = None
             return
         self.ast = self.condition()
         if self.tokens:
-            raise RuleError(f'Expected end of rule, got {self.tokens[-1][1]!r}'
-                )
+            raise RuleError(f"Expected end of rule, got {self.tokens[-1][1]!r}")
+
+    def expect(self, type_, value=None, term=None):
+        token = skip_token(self.tokens, type_, value)
+        if token is not None:
+            return token
+        if term is None:
+            term = repr(value is None and type_ or value)
+        if not self.tokens:
+            raise RuleError(f"expected {term} but end of rule reached")
+        raise RuleError(f"expected {term} but got {self.tokens[-1][1]!r}")
+
+    def condition(self):
+        op = self.and_condition()
+        while skip_token(self.tokens, 'word', 'or'):
+            op = 'or', (op, self.and_condition())
+        return op
+
+    def and_condition(self):
+        op = self.relation()
+        while skip_token(self.tokens, 'word', 'and'):
+            op = 'and', (op, self.relation())
+        return op
+
+    def relation(self):
+        left = self.expr()
+        if skip_token(self.tokens, 'word', 'is'):
+            return skip_token(self.tokens, 'word', 'not') and 'isnot' or 'is', \
+                (left, self.value())
+        negated = skip_token(self.tokens, 'word', 'not')
+        method = 'in'
+        if skip_token(self.tokens, 'word', 'within'):
+            method = 'within'
+        else:
+            if not skip_token(self.tokens, 'word', 'in'):
+                if negated:
+                    raise RuleError('Cannot negate operator based rules.')
+                return self.newfangled_relation(left)
+        rv = 'relation', (method, left, self.range_list())
+        return negate(rv) if negated else rv
+
+    def newfangled_relation(self, left):
+        if skip_token(self.tokens, 'symbol', '='):
+            negated = False
+        elif skip_token(self.tokens, 'symbol', '!='):
+            negated = True
+        else:
+            raise RuleError('Expected "=" or "!=" or legacy relation')
+        rv = 'relation', ('in', left, self.range_list())
+        return negate(rv) if negated else rv
+
+    def range_or_value(self):
+        left = self.value()
+        if skip_token(self.tokens, 'ellipsis'):
+            return left, self.value()
+        else:
+            return left, left
+
+    def range_list(self):
+        range_list = [self.range_or_value()]
+        while skip_token(self.tokens, 'symbol', ','):
+            range_list.append(self.range_or_value())
+        return range_list_node(range_list)
+
+    def expr(self):
+        word = skip_token(self.tokens, 'word')
+        if word is None or word[1] not in _VARS:
+            raise RuleError('Expected identifier variable')
+        name = word[1]
+        if skip_token(self.tokens, 'word', 'mod'):
+            return 'mod', ((name, ()), self.value())
+        elif skip_token(self.tokens, 'symbol', '%'):
+            return 'mod', ((name, ()), self.value())
+        return ident_node(name)
+
+    def value(self):
+        return value_node(int(self.expect('value')[1]))


 def _binary_compiler(tmpl):
     """Compiler factory for the `_Compiler`."""
-    pass
+    return lambda self, left, right: tmpl % (self.compile(left), self.compile(right))


 def _unary_compiler(tmpl):
     """Compiler factory for the `_Compiler`."""
-    pass
+    return lambda self, x: tmpl % self.compile(x)


 compile_zero = lambda x: '0'
@@ -325,6 +536,11 @@ class _Compiler:
     """The compilers are able to transform the expressions into multiple
     output formats.
     """
+
+    def compile(self, arg):
+        op, args = arg
+        return getattr(self, f"compile_{op}")(*args)
+
     compile_n = lambda x: 'n'
     compile_i = lambda x: 'i'
     compile_v = lambda x: 'v'
@@ -341,37 +557,85 @@ class _Compiler:
     compile_is = _binary_compiler('(%s == %s)')
     compile_isnot = _binary_compiler('(%s != %s)')

+    def compile_relation(self, method, expr, range_list):
+        raise NotImplementedError()
+

 class _PythonCompiler(_Compiler):
     """Compiles an expression to Python."""
+
     compile_and = _binary_compiler('(%s and %s)')
     compile_or = _binary_compiler('(%s or %s)')
     compile_not = _unary_compiler('(not %s)')
     compile_mod = _binary_compiler('MOD(%s, %s)')

+    def compile_relation(self, method, expr, range_list):
+        ranges = ",".join([f"({self.compile(a)}, {self.compile(b)})" for (a, b) in range_list[1]])
+        return f"{method.upper()}({self.compile(expr)}, [{ranges}])"
+

 class _GettextCompiler(_Compiler):
     """Compile into a gettext plural expression."""
+
     compile_i = _Compiler.compile_n
     compile_v = compile_zero
     compile_w = compile_zero
     compile_f = compile_zero
     compile_t = compile_zero

+    def compile_relation(self, method, expr, range_list):
+        rv = []
+        expr = self.compile(expr)
+        for item in range_list[1]:
+            if item[0] == item[1]:
+                rv.append(f"({expr} == {self.compile(item[0])})")
+            else:
+                min, max = map(self.compile, item)
+                rv.append(f"({expr} >= {min} && {expr} <= {max})")
+        return f"({' || '.join(rv)})"
+

 class _JavaScriptCompiler(_GettextCompiler):
     """Compiles the expression to plain of JavaScript."""
+
+    # XXX: presently javascript does not support any of the
+    # fraction support and basically only deals with integers.
     compile_i = lambda x: 'parseInt(n, 10)'
     compile_v = compile_zero
     compile_w = compile_zero
     compile_f = compile_zero
     compile_t = compile_zero

+    def compile_relation(self, method, expr, range_list):
+        code = _GettextCompiler.compile_relation(
+            self, method, expr, range_list)
+        if method == 'in':
+            expr = self.compile(expr)
+            code = f"(parseInt({expr}, 10) == {expr} && {code})"
+        return code
+

 class _UnicodeCompiler(_Compiler):
     """Returns a unicode pluralization rule again."""
+
+    # XXX: this currently spits out the old syntax instead of the new
+    # one.  We can change that, but it will break a whole bunch of stuff
+    # for users I suppose.
+
     compile_is = _binary_compiler('%s is %s')
     compile_isnot = _binary_compiler('%s is not %s')
     compile_and = _binary_compiler('%s and %s')
     compile_or = _binary_compiler('%s or %s')
     compile_mod = _binary_compiler('%s mod %s')
+
+    def compile_not(self, relation):
+        return self.compile_relation(*relation[1], negated=True)
+
+    def compile_relation(self, method, expr, range_list, negated=False):
+        ranges = []
+        for item in range_list[1]:
+            if item[0] == item[1]:
+                ranges.append(self.compile(item[0]))
+            else:
+                ranges.append(f"{self.compile(item[0])}..{self.compile(item[1])}")
+        return f"{self.compile(expr)}{' not' if negated else ''} {method} {','.join(ranges)}"
diff --git a/babel/support.py b/babel/support.py
index bd493bb..1774d9d 100644
--- a/babel/support.py
+++ b/babel/support.py
@@ -11,17 +11,28 @@
     :license: BSD, see LICENSE for more details.
 """
 from __future__ import annotations
+
 import decimal
 import gettext
 import locale
 import os
 from collections.abc import Iterator
 from typing import TYPE_CHECKING, Any, Callable, Iterable
+
 from babel.core import Locale
 from babel.dates import format_date, format_datetime, format_time, format_timedelta
-from babel.numbers import format_compact_currency, format_compact_decimal, format_currency, format_decimal, format_percent, format_scientific
+from babel.numbers import (
+    format_compact_currency,
+    format_compact_decimal,
+    format_currency,
+    format_decimal,
+    format_percent,
+    format_scientific,
+)
+
 if TYPE_CHECKING:
     from typing_extensions import Literal
+
     from babel.dates import _PredefinedTimeFormat


@@ -38,9 +49,13 @@ class Format:
     u'1.234'
     """

-    def __init__(self, locale: (Locale | str), tzinfo: (datetime.tzinfo |
-        None)=None, *, numbering_system: (Literal['default'] | str)='latn'
-        ) ->None:
+    def __init__(
+        self,
+        locale: Locale | str,
+        tzinfo: datetime.tzinfo | None = None,
+        *,
+        numbering_system: Literal["default"] | str = "latn",
+    ) -> None:
         """Initialize the formatter.

         :param locale: the locale identifier or `Locale` instance
@@ -52,8 +67,11 @@ class Format:
         self.tzinfo = tzinfo
         self.numbering_system = numbering_system

-    def date(self, date: (datetime.date | None)=None, format: (
-        _PredefinedTimeFormat | str)='medium') ->str:
+    def date(
+        self,
+        date: datetime.date | None = None,
+        format: _PredefinedTimeFormat | str = 'medium',
+    ) -> str:
         """Return a date formatted according to the given pattern.

         >>> from datetime import date
@@ -61,36 +79,46 @@ class Format:
         >>> fmt.date(date(2007, 4, 1))
         u'Apr 1, 2007'
         """
-        pass
+        return format_date(date, format, locale=self.locale)

-    def datetime(self, datetime: (datetime.date | None)=None, format: (
-        _PredefinedTimeFormat | str)='medium') ->str:
+    def datetime(
+        self,
+        datetime: datetime.date | None = None,
+        format: _PredefinedTimeFormat | str = 'medium',
+    ) -> str:
         """Return a date and time formatted according to the given pattern.

         >>> from datetime import datetime
         >>> from babel.dates import get_timezone
         >>> fmt = Format('en_US', tzinfo=get_timezone('US/Eastern'))
         >>> fmt.datetime(datetime(2007, 4, 1, 15, 30))
-        u'Apr 1, 2007, 11:30:00 AM'
+        u'Apr 1, 2007, 11:30:00\u202fAM'
         """
-        pass
+        return format_datetime(datetime, format, tzinfo=self.tzinfo, locale=self.locale)

-    def time(self, time: (datetime.time | datetime.datetime | None)=None,
-        format: (_PredefinedTimeFormat | str)='medium') ->str:
+    def time(
+        self,
+        time: datetime.time | datetime.datetime | None = None,
+        format: _PredefinedTimeFormat | str = 'medium',
+    ) -> str:
         """Return a time formatted according to the given pattern.

         >>> from datetime import datetime
         >>> from babel.dates import get_timezone
         >>> fmt = Format('en_US', tzinfo=get_timezone('US/Eastern'))
         >>> fmt.time(datetime(2007, 4, 1, 15, 30))
-        u'11:30:00 AM'
-        """
-        pass
-
-    def timedelta(self, delta: (datetime.timedelta | int), granularity:
-        Literal['year', 'month', 'week', 'day', 'hour', 'minute', 'second']
-        ='second', threshold: float=0.85, format: Literal['narrow', 'short',
-        'medium', 'long']='long', add_direction: bool=False) ->str:
+        u'11:30:00\u202fAM'
+        """
+        return format_time(time, format, tzinfo=self.tzinfo, locale=self.locale)
+
+    def timedelta(
+        self,
+        delta: datetime.timedelta | int,
+        granularity: Literal["year", "month", "week", "day", "hour", "minute", "second"] = "second",
+        threshold: float = 0.85,
+        format: Literal["narrow", "short", "medium", "long"] = "long",
+        add_direction: bool = False,
+    ) -> str:
         """Return a time delta according to the rules of the given locale.

         >>> from datetime import timedelta
@@ -98,30 +126,35 @@ class Format:
         >>> fmt.timedelta(timedelta(weeks=11))
         u'3 months'
         """
-        pass
+        return format_timedelta(delta, granularity=granularity,
+                                threshold=threshold,
+                                format=format, add_direction=add_direction,
+                                locale=self.locale)

-    def number(self, number: (float | decimal.Decimal | str)) ->str:
+    def number(self, number: float | decimal.Decimal | str) -> str:
         """Return an integer number formatted for the locale.

         >>> fmt = Format('en_US')
         >>> fmt.number(1099)
         u'1,099'
         """
-        pass
+        return format_decimal(number, locale=self.locale, numbering_system=self.numbering_system)

-    def decimal(self, number: (float | decimal.Decimal | str), format: (str |
-        None)=None) ->str:
+    def decimal(self, number: float | decimal.Decimal | str, format: str | None = None) -> str:
         """Return a decimal number formatted for the locale.

         >>> fmt = Format('en_US')
         >>> fmt.decimal(1.2345)
         u'1.234'
         """
-        pass
+        return format_decimal(number, format, locale=self.locale, numbering_system=self.numbering_system)

-    def compact_decimal(self, number: (float | decimal.Decimal | str),
-        format_type: Literal['short', 'long']='short', fraction_digits: int=0
-        ) ->str:
+    def compact_decimal(
+        self,
+        number: float | decimal.Decimal | str,
+        format_type: Literal['short', 'long'] = 'short',
+        fraction_digits: int = 0,
+    ) -> str:
         """Return a number formatted in compact form for the locale.

         >>> fmt = Format('en_US')
@@ -130,39 +163,48 @@ class Format:
         >>> fmt.compact_decimal(1234567, format_type='long', fraction_digits=2)
         '1.23 million'
         """
-        pass
+        return format_compact_decimal(
+            number,
+            format_type=format_type,
+            fraction_digits=fraction_digits,
+            locale=self.locale,
+            numbering_system=self.numbering_system,
+        )

-    def currency(self, number: (float | decimal.Decimal | str), currency: str
-        ) ->str:
+    def currency(self, number: float | decimal.Decimal | str, currency: str) -> str:
         """Return a number in the given currency formatted for the locale.
         """
-        pass
+        return format_currency(number, currency, locale=self.locale, numbering_system=self.numbering_system)

-    def compact_currency(self, number: (float | decimal.Decimal | str),
-        currency: str, format_type: Literal['short']='short',
-        fraction_digits: int=0) ->str:
+    def compact_currency(
+        self,
+        number: float | decimal.Decimal | str,
+        currency: str,
+        format_type: Literal['short'] = 'short',
+        fraction_digits: int = 0,
+    ) -> str:
         """Return a number in the given currency formatted for the locale
         using the compact number format.

         >>> Format('en_US').compact_currency(1234567, "USD", format_type='short', fraction_digits=2)
         '$1.23M'
         """
-        pass
+        return format_compact_currency(number, currency, format_type=format_type, fraction_digits=fraction_digits,
+                                       locale=self.locale, numbering_system=self.numbering_system)

-    def percent(self, number: (float | decimal.Decimal | str), format: (str |
-        None)=None) ->str:
+    def percent(self, number: float | decimal.Decimal | str, format: str | None = None) -> str:
         """Return a number formatted as percentage for the locale.

         >>> fmt = Format('en_US')
         >>> fmt.percent(0.34)
         u'34%'
         """
-        pass
+        return format_percent(number, format, locale=self.locale, numbering_system=self.numbering_system)

-    def scientific(self, number: (float | decimal.Decimal | str)) ->str:
+    def scientific(self, number: float | decimal.Decimal | str) -> str:
         """Return a number formatted using scientific notation for the locale.
         """
-        pass
+        return format_scientific(number, locale=self.locale, numbering_system=self.numbering_system)


 class LazyProxy:
@@ -200,8 +242,8 @@ class LazyProxy:
     Hello, universe!
     Hello, world!
     """
-    __slots__ = ['_func', '_args', '_kwargs', '_value', '_is_cache_enabled',
-        '_attribute_error']
+    __slots__ = ['_func', '_args', '_kwargs', '_value', '_is_cache_enabled', '_attribute_error']
+
     if TYPE_CHECKING:
         _func: Callable[..., Any]
         _args: tuple[Any, ...]
@@ -210,8 +252,8 @@ class LazyProxy:
         _value: Any
         _attribute_error: AttributeError | None

-    def __init__(self, func: Callable[..., Any], *args: Any, enable_cache:
-        bool=True, **kwargs: Any) ->None:
+    def __init__(self, func: Callable[..., Any], *args: Any, enable_cache: bool = True, **kwargs: Any) -> None:
+        # Avoid triggering our own __setattr__ implementation
         object.__setattr__(self, '_func', func)
         object.__setattr__(self, '_args', args)
         object.__setattr__(self, '_kwargs', kwargs)
@@ -219,158 +261,200 @@ class LazyProxy:
         object.__setattr__(self, '_value', None)
         object.__setattr__(self, '_attribute_error', None)

-    def __contains__(self, key: object) ->bool:
+    @property
+    def value(self) -> Any:
+        if self._value is None:
+            try:
+                value = self._func(*self._args, **self._kwargs)
+            except AttributeError as error:
+                object.__setattr__(self, '_attribute_error', error)
+                raise
+
+            if not self._is_cache_enabled:
+                return value
+            object.__setattr__(self, '_value', value)
+        return self._value
+
+    def __contains__(self, key: object) -> bool:
         return key in self.value

-    def __bool__(self) ->bool:
+    def __bool__(self) -> bool:
         return bool(self.value)

-    def __dir__(self) ->list[str]:
+    def __dir__(self) -> list[str]:
         return dir(self.value)

-    def __iter__(self) ->Iterator[Any]:
+    def __iter__(self) -> Iterator[Any]:
         return iter(self.value)

-    def __len__(self) ->int:
+    def __len__(self) -> int:
         return len(self.value)

-    def __str__(self) ->str:
+    def __str__(self) -> str:
         return str(self.value)

-    def __add__(self, other: object) ->Any:
+    def __add__(self, other: object) -> Any:
         return self.value + other

-    def __radd__(self, other: object) ->Any:
+    def __radd__(self, other: object) -> Any:
         return other + self.value

-    def __mod__(self, other: object) ->Any:
+    def __mod__(self, other: object) -> Any:
         return self.value % other

-    def __rmod__(self, other: object) ->Any:
+    def __rmod__(self, other: object) -> Any:
         return other % self.value

-    def __mul__(self, other: object) ->Any:
+    def __mul__(self, other: object) -> Any:
         return self.value * other

-    def __rmul__(self, other: object) ->Any:
+    def __rmul__(self, other: object) -> Any:
         return other * self.value

-    def __call__(self, *args: Any, **kwargs: Any) ->Any:
+    def __call__(self, *args: Any, **kwargs: Any) -> Any:
         return self.value(*args, **kwargs)

-    def __lt__(self, other: object) ->bool:
+    def __lt__(self, other: object) -> bool:
         return self.value < other

-    def __le__(self, other: object) ->bool:
+    def __le__(self, other: object) -> bool:
         return self.value <= other

-    def __eq__(self, other: object) ->bool:
+    def __eq__(self, other: object) -> bool:
         return self.value == other

-    def __ne__(self, other: object) ->bool:
+    def __ne__(self, other: object) -> bool:
         return self.value != other

-    def __gt__(self, other: object) ->bool:
+    def __gt__(self, other: object) -> bool:
         return self.value > other

-    def __ge__(self, other: object) ->bool:
+    def __ge__(self, other: object) -> bool:
         return self.value >= other

-    def __delattr__(self, name: str) ->None:
+    def __delattr__(self, name: str) -> None:
         delattr(self.value, name)

-    def __getattr__(self, name: str) ->Any:
+    def __getattr__(self, name: str) -> Any:
         if self._attribute_error is not None:
             raise self._attribute_error
         return getattr(self.value, name)

-    def __setattr__(self, name: str, value: Any) ->None:
+    def __setattr__(self, name: str, value: Any) -> None:
         setattr(self.value, name, value)

-    def __delitem__(self, key: Any) ->None:
+    def __delitem__(self, key: Any) -> None:
         del self.value[key]

-    def __getitem__(self, key: Any) ->Any:
+    def __getitem__(self, key: Any) -> Any:
         return self.value[key]

-    def __setitem__(self, key: Any, value: Any) ->None:
+    def __setitem__(self, key: Any, value: Any) -> None:
         self.value[key] = value

-    def __copy__(self) ->LazyProxy:
-        return LazyProxy(self._func, *self._args, enable_cache=self.
-            _is_cache_enabled, **self._kwargs)
+    def __copy__(self) -> LazyProxy:
+        return LazyProxy(
+            self._func,
+            enable_cache=self._is_cache_enabled,
+            *self._args,  # noqa: B026
+            **self._kwargs,
+        )

-    def __deepcopy__(self, memo: Any) ->LazyProxy:
+    def __deepcopy__(self, memo: Any) -> LazyProxy:
         from copy import deepcopy
-        return LazyProxy(deepcopy(self._func, memo), *deepcopy(self._args,
-            memo), enable_cache=deepcopy(self._is_cache_enabled, memo), **
-            deepcopy(self._kwargs, memo))
+        return LazyProxy(
+            deepcopy(self._func, memo),
+            enable_cache=deepcopy(self._is_cache_enabled, memo),
+            *deepcopy(self._args, memo),  # noqa: B026
+            **deepcopy(self._kwargs, memo),
+        )


 class NullTranslations(gettext.NullTranslations):
+
     if TYPE_CHECKING:
         _info: dict[str, str]
         _fallback: NullTranslations | None
+
     DEFAULT_DOMAIN = None

-    def __init__(self, fp: (gettext._TranslationsReader | None)=None) ->None:
+    def __init__(self, fp: gettext._TranslationsReader | None = None) -> None:
         """Initialize a simple translations class which is not backed by a
         real catalog. Behaves similar to gettext.NullTranslations but also
         offers Babel's on *gettext methods (e.g. 'dgettext()').

         :param fp: a file-like object (ignored in this class)
         """
+        # These attributes are set by gettext.NullTranslations when a catalog
+        # is parsed (fp != None). Ensure that they are always present because
+        # some *gettext methods (including '.gettext()') rely on the attributes.
         self._catalog: dict[tuple[str, Any] | str, str] = {}
-        self.plural: Callable[[float | decimal.Decimal], int] = lambda n: int(
-            n != 1)
+        self.plural: Callable[[float | decimal.Decimal], int] = lambda n: int(n != 1)
         super().__init__(fp=fp)
         self.files = list(filter(None, [getattr(fp, 'name', None)]))
         self.domain = self.DEFAULT_DOMAIN
         self._domains: dict[str, NullTranslations] = {}

-    def dgettext(self, domain: str, message: str) ->str:
+    def dgettext(self, domain: str, message: str) -> str:
         """Like ``gettext()``, but look the message up in the specified
         domain.
         """
-        pass
+        return self._domains.get(domain, self).gettext(message)

-    def ldgettext(self, domain: str, message: str) ->str:
+    def ldgettext(self, domain: str, message: str) -> str:
         """Like ``lgettext()``, but look the message up in the specified
         domain.
         """
-        pass
+        import warnings
+        warnings.warn(
+            'ldgettext() is deprecated, use dgettext() instead',
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        return self._domains.get(domain, self).lgettext(message)

-    def udgettext(self, domain: str, message: str) ->str:
+    def udgettext(self, domain: str, message: str) -> str:
         """Like ``ugettext()``, but look the message up in the specified
         domain.
         """
-        pass
+        return self._domains.get(domain, self).ugettext(message)
+    # backward compatibility with 0.9
     dugettext = udgettext

-    def dngettext(self, domain: str, singular: str, plural: str, num: int
-        ) ->str:
+    def dngettext(self, domain: str, singular: str, plural: str, num: int) -> str:
         """Like ``ngettext()``, but look the message up in the specified
         domain.
         """
-        pass
+        return self._domains.get(domain, self).ngettext(singular, plural, num)

-    def ldngettext(self, domain: str, singular: str, plural: str, num: int
-        ) ->str:
+    def ldngettext(self, domain: str, singular: str, plural: str, num: int) -> str:
         """Like ``lngettext()``, but look the message up in the specified
         domain.
         """
-        pass
+        import warnings
+        warnings.warn(
+            'ldngettext() is deprecated, use dngettext() instead',
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        return self._domains.get(domain, self).lngettext(singular, plural, num)

-    def udngettext(self, domain: str, singular: str, plural: str, num: int
-        ) ->str:
+    def udngettext(self, domain: str, singular: str, plural: str, num: int) -> str:
         """Like ``ungettext()`` but look the message up in the specified
         domain.
         """
-        pass
+        return self._domains.get(domain, self).ungettext(singular, plural, num)
+    # backward compatibility with 0.9
     dungettext = udngettext
+
+    # Most of the downwards code, until it gets included in stdlib, from:
+    #    https://bugs.python.org/file10036/gettext-pgettext.patch
+    #
+    # The encoding of a msgctxt and a msgid in a .mo file is
+    # msgctxt + "\x04" + msgid (gettext version >= 0.15)
     CONTEXT_ENCODING = '%s\x04%s'

-    def pgettext(self, context: str, message: str) ->(str | object):
+    def pgettext(self, context: str, message: str) -> str | object:
         """Look up the `context` and `message` id in the catalog and return the
         corresponding message string, as an 8-bit string encoded with the
         catalog's charset encoding, if known.  If there is no entry in the
@@ -378,17 +462,31 @@ class NullTranslations(gettext.NullTranslations):
         set, the look up is forwarded to the fallback's ``pgettext()``
         method. Otherwise, the `message` id is returned.
         """
-        pass
+        ctxt_msg_id = self.CONTEXT_ENCODING % (context, message)
+        missing = object()
+        tmsg = self._catalog.get(ctxt_msg_id, missing)
+        if tmsg is missing:
+            if self._fallback:
+                return self._fallback.pgettext(context, message)
+            return message
+        return tmsg

-    def lpgettext(self, context: str, message: str) ->(str | bytes | object):
+    def lpgettext(self, context: str, message: str) -> str | bytes | object:
         """Equivalent to ``pgettext()``, but the translation is returned in the
         preferred system encoding, if no other encoding was explicitly set with
         ``bind_textdomain_codeset()``.
         """
-        pass
-
-    def npgettext(self, context: str, singular: str, plural: str, num: int
-        ) ->str:
+        import warnings
+        warnings.warn(
+            'lpgettext() is deprecated, use pgettext() instead',
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        tmsg = self.pgettext(context, message)
+        encoding = getattr(self, "_output_charset", None) or locale.getpreferredencoding()
+        return tmsg.encode(encoding) if isinstance(tmsg, str) else tmsg
+
+    def npgettext(self, context: str, singular: str, plural: str, num: int) -> str:
         """Do a plural-forms lookup of a message id.  `singular` is used as the
         message id for purposes of lookup in the catalog, while `num` is used to
         determine which plural form to use.  The returned message string is an
@@ -399,27 +497,60 @@ class NullTranslations(gettext.NullTranslations):
         ``npgettext()`` method.  Otherwise, when ``num`` is 1 ``singular`` is
         returned, and ``plural`` is returned in all other cases.
         """
-        pass
-
-    def lnpgettext(self, context: str, singular: str, plural: str, num: int
-        ) ->(str | bytes):
+        ctxt_msg_id = self.CONTEXT_ENCODING % (context, singular)
+        try:
+            tmsg = self._catalog[(ctxt_msg_id, self.plural(num))]
+            return tmsg
+        except KeyError:
+            if self._fallback:
+                return self._fallback.npgettext(context, singular, plural, num)
+            if num == 1:
+                return singular
+            else:
+                return plural
+
+    def lnpgettext(self, context: str, singular: str, plural: str, num: int) -> str | bytes:
         """Equivalent to ``npgettext()``, but the translation is returned in the
         preferred system encoding, if no other encoding was explicitly set with
         ``bind_textdomain_codeset()``.
         """
-        pass
-
-    def upgettext(self, context: str, message: str) ->str:
+        import warnings
+        warnings.warn(
+            'lnpgettext() is deprecated, use npgettext() instead',
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        ctxt_msg_id = self.CONTEXT_ENCODING % (context, singular)
+        try:
+            tmsg = self._catalog[(ctxt_msg_id, self.plural(num))]
+            encoding = getattr(self, "_output_charset", None) or locale.getpreferredencoding()
+            return tmsg.encode(encoding)
+        except KeyError:
+            if self._fallback:
+                return self._fallback.lnpgettext(context, singular, plural, num)
+            if num == 1:
+                return singular
+            else:
+                return plural
+
+    def upgettext(self, context: str, message: str) -> str:
         """Look up the `context` and `message` id in the catalog and return the
         corresponding message string, as a Unicode string.  If there is no entry
         in the catalog for the `message` id and `context`, and a fallback has
         been set, the look up is forwarded to the fallback's ``upgettext()``
         method.  Otherwise, the `message` id is returned.
         """
-        pass
-
-    def unpgettext(self, context: str, singular: str, plural: str, num: int
-        ) ->str:
+        ctxt_message_id = self.CONTEXT_ENCODING % (context, message)
+        missing = object()
+        tmsg = self._catalog.get(ctxt_message_id, missing)
+        if tmsg is missing:
+            if self._fallback:
+                return self._fallback.upgettext(context, message)
+            return str(message)
+        assert isinstance(tmsg, str)
+        return tmsg
+
+    def unpgettext(self, context: str, singular: str, plural: str, num: int) -> str:
         """Do a plural-forms lookup of a message id.  `singular` is used as the
         message id for purposes of lookup in the catalog, while `num` is used to
         determine which plural form to use.  The returned message string is a
@@ -430,62 +561,70 @@ class NullTranslations(gettext.NullTranslations):
         ``unpgettext()`` method.  Otherwise, when `num` is 1 `singular` is
         returned, and `plural` is returned in all other cases.
         """
-        pass
+        ctxt_message_id = self.CONTEXT_ENCODING % (context, singular)
+        try:
+            tmsg = self._catalog[(ctxt_message_id, self.plural(num))]
+        except KeyError:
+            if self._fallback:
+                return self._fallback.unpgettext(context, singular, plural, num)
+            tmsg = str(singular) if num == 1 else str(plural)
+        return tmsg

-    def dpgettext(self, domain: str, context: str, message: str) ->(str |
-        object):
+    def dpgettext(self, domain: str, context: str, message: str) -> str | object:
         """Like `pgettext()`, but look the message up in the specified
         `domain`.
         """
-        pass
+        return self._domains.get(domain, self).pgettext(context, message)

-    def udpgettext(self, domain: str, context: str, message: str) ->str:
+    def udpgettext(self, domain: str, context: str, message: str) -> str:
         """Like `upgettext()`, but look the message up in the specified
         `domain`.
         """
-        pass
+        return self._domains.get(domain, self).upgettext(context, message)
+    # backward compatibility with 0.9
     dupgettext = udpgettext

-    def ldpgettext(self, domain: str, context: str, message: str) ->(str |
-        bytes | object):
+    def ldpgettext(self, domain: str, context: str, message: str) -> str | bytes | object:
         """Equivalent to ``dpgettext()``, but the translation is returned in the
         preferred system encoding, if no other encoding was explicitly set with
         ``bind_textdomain_codeset()``.
         """
-        pass
+        return self._domains.get(domain, self).lpgettext(context, message)

-    def dnpgettext(self, domain: str, context: str, singular: str, plural:
-        str, num: int) ->str:
+    def dnpgettext(self, domain: str, context: str, singular: str, plural: str, num: int) -> str:
         """Like ``npgettext``, but look the message up in the specified
         `domain`.
         """
-        pass
+        return self._domains.get(domain, self).npgettext(context, singular,
+                                                         plural, num)

-    def udnpgettext(self, domain: str, context: str, singular: str, plural:
-        str, num: int) ->str:
+    def udnpgettext(self, domain: str, context: str, singular: str, plural: str, num: int) -> str:
         """Like ``unpgettext``, but look the message up in the specified
         `domain`.
         """
-        pass
+        return self._domains.get(domain, self).unpgettext(context, singular,
+                                                          plural, num)
+    # backward compatibility with 0.9
     dunpgettext = udnpgettext

-    def ldnpgettext(self, domain: str, context: str, singular: str, plural:
-        str, num: int) ->(str | bytes):
+    def ldnpgettext(self, domain: str, context: str, singular: str, plural: str, num: int) -> str | bytes:
         """Equivalent to ``dnpgettext()``, but the translation is returned in
         the preferred system encoding, if no other encoding was explicitly set
         with ``bind_textdomain_codeset()``.
         """
-        pass
+        return self._domains.get(domain, self).lnpgettext(context, singular,
+                                                          plural, num)
+
     ugettext = gettext.NullTranslations.gettext
     ungettext = gettext.NullTranslations.ngettext


 class Translations(NullTranslations, gettext.GNUTranslations):
     """An extended translation catalog class."""
+
     DEFAULT_DOMAIN = 'messages'

-    def __init__(self, fp: (gettext._TranslationsReader | None)=None,
-        domain: (str | None)=None):
+    def __init__(self, fp: gettext._TranslationsReader | None = None, domain: str | None = None):
         """Initialize the translations catalog.

         :param fp: the file-like object the translation should be read from
@@ -493,13 +632,17 @@ class Translations(NullTranslations, gettext.GNUTranslations):
         """
         super().__init__(fp=fp)
         self.domain = domain or self.DEFAULT_DOMAIN
+
     ugettext = gettext.GNUTranslations.gettext
     ungettext = gettext.GNUTranslations.ngettext

     @classmethod
-    def load(cls, dirname: (str | os.PathLike[str] | None)=None, locales: (
-        Iterable[str | Locale] | str | Locale | None)=None, domain: (str |
-        None)=None) ->NullTranslations:
+    def load(
+        cls,
+        dirname: str | os.PathLike[str] | None = None,
+        locales: Iterable[str | Locale] | str | Locale | None = None,
+        domain: str | None = None,
+    ) -> NullTranslations:
         """Load translations from the given directory.

         :param dirname: the directory containing the ``MO`` files
@@ -508,13 +651,19 @@ class Translations(NullTranslations, gettext.GNUTranslations):
                         strings)
         :param domain: the message domain (default: 'messages')
         """
-        pass
+        if not domain:
+            domain = cls.DEFAULT_DOMAIN
+        filename = gettext.find(domain, dirname, _locales_to_names(locales))
+        if not filename:
+            return NullTranslations()
+        with open(filename, 'rb') as fp:
+            return cls(fp=fp, domain=domain)

-    def __repr__(self) ->str:
+    def __repr__(self) -> str:
         version = self._info.get('project-id-version')
         return f'<{type(self).__name__}: "{version}">'

-    def add(self, translations: Translations, merge: bool=True):
+    def add(self, translations: Translations, merge: bool = True):
         """Add the given translations to the catalog.

         If the domain of the translations is different than that of the
@@ -527,7 +676,18 @@ class Translations(NullTranslations, gettext.GNUTranslations):
                       already been added should be merged with the existing
                       translations
         """
-        pass
+        domain = getattr(translations, 'domain', self.DEFAULT_DOMAIN)
+        if merge and domain == self.domain:
+            return self.merge(translations)
+
+        existing = self._domains.get(domain)
+        if merge and isinstance(existing, Translations):
+            existing.merge(translations)
+        else:
+            translations.add_fallback(self)
+            self._domains[domain] = translations
+
+        return self

     def merge(self, translations: Translations):
         """Merge the given translations into the catalog.
@@ -538,15 +698,27 @@ class Translations(NullTranslations, gettext.GNUTranslations):
         :param translations: the `Translations` instance with the messages to
                              merge
         """
-        pass
+        if isinstance(translations, gettext.GNUTranslations):
+            self._catalog.update(translations._catalog)
+            if isinstance(translations, Translations):
+                self.files.extend(translations.files)
+
+        return self


-def _locales_to_names(locales: (Iterable[str | Locale] | str | Locale | None)
-    ) ->(list[str] | None):
+def _locales_to_names(
+    locales: Iterable[str | Locale] | str | Locale | None,
+) -> list[str] | None:
     """Normalize a `locales` argument to a list of locale names.

     :param locales: the list of locales in order of preference (items in
                     this list can be either `Locale` objects or locale
                     strings)
     """
-    pass
+    if locales is None:
+        return None
+    if isinstance(locales, Locale):
+        return [str(locales)]
+    if isinstance(locales, str):
+        return [locales]
+    return [str(locale) for locale in locales]
diff --git a/babel/units.py b/babel/units.py
index d6014db..36206d0 100644
--- a/babel/units.py
+++ b/babel/units.py
@@ -1,20 +1,25 @@
 from __future__ import annotations
+
 import decimal
 from typing import TYPE_CHECKING
+
 from babel.core import Locale
 from babel.numbers import LC_NUMERIC, format_decimal
+
 if TYPE_CHECKING:
     from typing_extensions import Literal


 class UnknownUnitError(ValueError):
-
-    def __init__(self, unit: str, locale: Locale) ->None:
-        ValueError.__init__(self, f'{unit} is not a known unit in {locale}')
+    def __init__(self, unit: str, locale: Locale) -> None:
+        ValueError.__init__(self, f"{unit} is not a known unit in {locale}")


-def get_unit_name(measurement_unit: str, length: Literal['short', 'long',
-    'narrow']='long', locale: (Locale | str | None)=LC_NUMERIC) ->(str | None):
+def get_unit_name(
+    measurement_unit: str,
+    length: Literal['short', 'long', 'narrow'] = 'long',
+    locale: Locale | str | None = LC_NUMERIC,
+) -> str | None:
     """
     Get the display name for a measurement unit in the given locale.

@@ -36,11 +41,14 @@ def get_unit_name(measurement_unit: str, length: Literal['short', 'long',
     :param locale: the `Locale` object or locale identifier
     :return: The unit display name, or None.
     """
-    pass
+    locale = Locale.parse(locale)
+    unit = _find_unit_pattern(measurement_unit, locale=locale)
+    if not unit:
+        raise UnknownUnitError(unit=measurement_unit, locale=locale)
+    return locale.unit_display_names.get(unit, {}).get(length)


-def _find_unit_pattern(unit_id: str, locale: (Locale | str | None)=LC_NUMERIC
-    ) ->(str | None):
+def _find_unit_pattern(unit_id: str, locale: Locale | str | None = LC_NUMERIC) -> str | None:
     """
     Expand a unit into a qualified form.

@@ -57,13 +65,25 @@ def _find_unit_pattern(unit_id: str, locale: (Locale | str | None)=LC_NUMERIC
     :param unit_id: the code of a measurement unit.
     :return: A key to the `unit_patterns` mapping, or None.
     """
-    pass
-
-
-def format_unit(value: (str | float | decimal.Decimal), measurement_unit:
-    str, length: Literal['short', 'long', 'narrow']='long', format: (str |
-    None)=None, locale: (Locale | str | None)=LC_NUMERIC, *,
-    numbering_system: (Literal['default'] | str)='latn') ->str:
+    locale = Locale.parse(locale)
+    unit_patterns = locale._data["unit_patterns"]
+    if unit_id in unit_patterns:
+        return unit_id
+    for unit_pattern in sorted(unit_patterns, key=len):
+        if unit_pattern.endswith(unit_id):
+            return unit_pattern
+    return None
+
+
+def format_unit(
+    value: str | float | decimal.Decimal,
+    measurement_unit: str,
+    length: Literal['short', 'long', 'narrow'] = 'long',
+    format: str | None = None,
+    locale: Locale | str | None = LC_NUMERIC,
+    *,
+    numbering_system: Literal["default"] | str = "latn",
+) -> str:
     """Format a value of a given unit.

     Values are formatted according to the locale's usual pluralization rules
@@ -115,11 +135,34 @@ def format_unit(value: (str | float | decimal.Decimal), measurement_unit:
                              The special value "default" will use the default numbering system of the locale.
     :raise `UnsupportedNumberingSystemError`: If the numbering system is not supported by the locale.
     """
-    pass
-
-
-def _find_compound_unit(numerator_unit: str, denominator_unit: str, locale:
-    (Locale | str | None)=LC_NUMERIC) ->(str | None):
+    locale = Locale.parse(locale)
+
+    q_unit = _find_unit_pattern(measurement_unit, locale=locale)
+    if not q_unit:
+        raise UnknownUnitError(unit=measurement_unit, locale=locale)
+    unit_patterns = locale._data["unit_patterns"][q_unit].get(length, {})
+
+    if isinstance(value, str):  # Assume the value is a preformatted singular.
+        formatted_value = value
+        plural_form = "one"
+    else:
+        formatted_value = format_decimal(value, format, locale, numbering_system=numbering_system)
+        plural_form = locale.plural_form(value)
+
+    if plural_form in unit_patterns:
+        return unit_patterns[plural_form].format(formatted_value)
+
+    # Fall back to a somewhat bad representation.
+    # nb: This is marked as no-cover, as the current CLDR seemingly has no way for this to happen.
+    fallback_name = get_unit_name(measurement_unit, length=length, locale=locale)  # pragma: no cover
+    return f"{formatted_value} {fallback_name or measurement_unit}"  # pragma: no cover
+
+
+def _find_compound_unit(
+    numerator_unit: str,
+    denominator_unit: str,
+    locale: Locale | str | None = LC_NUMERIC,
+) -> str | None:
     """
     Find a predefined compound unit pattern.

@@ -143,15 +186,41 @@ def _find_compound_unit(numerator_unit: str, denominator_unit: str, locale:
     :return: A key to the `unit_patterns` mapping, or None.
     :rtype: str|None
     """
-    pass
+    locale = Locale.parse(locale)
+
+    # Qualify the numerator and denominator units.  This will turn possibly partial
+    # units like "kilometer" or "hour" into actual units like "length-kilometer" and
+    # "duration-hour".
+
+    resolved_numerator_unit = _find_unit_pattern(numerator_unit, locale=locale)
+    resolved_denominator_unit = _find_unit_pattern(denominator_unit, locale=locale)
+
+    # If either was not found, we can't possibly build a suitable compound unit either.
+    if not (resolved_numerator_unit and resolved_denominator_unit):
+        return None
+
+    # Since compound units are named "speed-kilometer-per-hour", we'll have to slice off
+    # the quantities (i.e. "length", "duration") from both qualified units.
+
+    bare_numerator_unit = resolved_numerator_unit.split("-", 1)[-1]
+    bare_denominator_unit = resolved_denominator_unit.split("-", 1)[-1]
+
+    # Now we can try and rebuild a compound unit specifier, then qualify it:
+
+    return _find_unit_pattern(f"{bare_numerator_unit}-per-{bare_denominator_unit}", locale=locale)


-def format_compound_unit(numerator_value: (str | float | decimal.Decimal),
-    numerator_unit: (str | None)=None, denominator_value: (str | float |
-    decimal.Decimal)=1, denominator_unit: (str | None)=None, length:
-    Literal['short', 'long', 'narrow']='long', format: (str | None)=None,
-    locale: (Locale | str | None)=LC_NUMERIC, *, numbering_system: (Literal
-    ['default'] | str)='latn') ->(str | None):
+def format_compound_unit(
+    numerator_value: str | float | decimal.Decimal,
+    numerator_unit: str | None = None,
+    denominator_value: str | float | decimal.Decimal = 1,
+    denominator_unit: str | None = None,
+    length: Literal["short", "long", "narrow"] = "long",
+    format: str | None = None,
+    locale: Locale | str | None = LC_NUMERIC,
+    *,
+    numbering_system: Literal["default"] | str = "latn",
+) -> str | None:
     """
     Format a compound number value, i.e. "kilometers per hour" or similar.

@@ -202,4 +271,73 @@ def format_compound_unit(numerator_value: (str | float | decimal.Decimal),
     :return: A formatted compound value.
     :raise `UnsupportedNumberingSystemError`: If the numbering system is not supported by the locale.
     """
-    pass
+    locale = Locale.parse(locale)
+
+    # Look for a specific compound unit first...
+
+    if numerator_unit and denominator_unit and denominator_value == 1:
+        compound_unit = _find_compound_unit(numerator_unit, denominator_unit, locale=locale)
+        if compound_unit:
+            return format_unit(
+                numerator_value,
+                compound_unit,
+                length=length,
+                format=format,
+                locale=locale,
+                numbering_system=numbering_system,
+            )
+
+    # ... failing that, construct one "by hand".
+
+    if isinstance(numerator_value, str):  # Numerator is preformatted
+        formatted_numerator = numerator_value
+    elif numerator_unit:  # Numerator has unit
+        formatted_numerator = format_unit(
+            numerator_value,
+            numerator_unit,
+            length=length,
+            format=format,
+            locale=locale,
+            numbering_system=numbering_system,
+        )
+    else:  # Unitless numerator
+        formatted_numerator = format_decimal(
+            numerator_value,
+            format=format,
+            locale=locale,
+            numbering_system=numbering_system,
+        )
+
+    if isinstance(denominator_value, str):  # Denominator is preformatted
+        formatted_denominator = denominator_value
+    elif denominator_unit:  # Denominator has unit
+        if denominator_value == 1:  # support perUnitPatterns when the denominator is 1
+            denominator_unit = _find_unit_pattern(denominator_unit, locale=locale)
+            per_pattern = locale._data["unit_patterns"].get(denominator_unit, {}).get(length, {}).get("per")
+            if per_pattern:
+                return per_pattern.format(formatted_numerator)
+            # See TR-35's per-unit pattern algorithm, point 3.2.
+            # For denominator 1, we replace the value to be formatted with the empty string;
+            # this will make `format_unit` return " second" instead of "1 second".
+            denominator_value = ""
+
+        formatted_denominator = format_unit(
+            denominator_value,
+            measurement_unit=(denominator_unit or ""),
+            length=length,
+            format=format,
+            locale=locale,
+            numbering_system=numbering_system,
+        ).strip()
+    else:  # Bare denominator
+        formatted_denominator = format_decimal(
+            denominator_value,
+            format=format,
+            locale=locale,
+            numbering_system=numbering_system,
+        )
+
+    # TODO: this doesn't support "compound_variations" (or "prefix"), and will fall back to the "x/y" representation
+    per_pattern = locale._data["compound_unit_patterns"].get("per", {}).get(length, {}).get("compound", "{0}/{1}")
+
+    return per_pattern.format(formatted_numerator, formatted_denominator)
diff --git a/babel/util.py b/babel/util.py
index cec00b5..093197f 100644
--- a/babel/util.py
+++ b/babel/util.py
@@ -8,6 +8,7 @@
     :license: BSD, see LICENSE for more details.
 """
 from __future__ import annotations
+
 import codecs
 import collections
 import datetime
@@ -16,12 +17,15 @@ import re
 import textwrap
 from collections.abc import Generator, Iterable
 from typing import IO, Any, TypeVar
+
 from babel import dates, localtime
+
 missing = object()
-_T = TypeVar('_T')
+
+_T = TypeVar("_T")


-def distinct(iterable: Iterable[_T]) ->Generator[_T, None, None]:
+def distinct(iterable: Iterable[_T]) -> Generator[_T, None, None]:
     """Yield all items in an iterable collection that are distinct.

     Unlike when using sets for a similar effect, the original ordering of the
@@ -34,14 +38,19 @@ def distinct(iterable: Iterable[_T]) ->Generator[_T, None, None]:

     :param iterable: the iterable collection providing the data
     """
-    pass
+    seen = set()
+    for item in iter(iterable):
+        if item not in seen:
+            yield item
+            seen.add(item)


+# Regexp to match python magic encoding line
 PYTHON_MAGIC_COMMENT_re = re.compile(
-    b'[ \\t\\f]* \\# .* coding[=:][ \\t]*([-\\w.]+)', re.VERBOSE)
+    br'[ \t\f]* \# .* coding[=:][ \t]*([-\w.]+)', re.VERBOSE)


-def parse_encoding(fp: IO[bytes]) ->(str | None):
+def parse_encoding(fp: IO[bytes]) -> str | None:
     """Deduce the encoding of a source file from magic comment.

     It does this in the same way as the `Python interpreter`__
@@ -52,21 +61,80 @@ def parse_encoding(fp: IO[bytes]) ->(str | None):

     (From Jeff Dairiki)
     """
-    pass
+    pos = fp.tell()
+    fp.seek(0)
+    try:
+        line1 = fp.readline()
+        has_bom = line1.startswith(codecs.BOM_UTF8)
+        if has_bom:
+            line1 = line1[len(codecs.BOM_UTF8):]
+
+        m = PYTHON_MAGIC_COMMENT_re.match(line1)
+        if not m:
+            try:
+                import ast
+                ast.parse(line1.decode('latin-1'))
+            except (ImportError, SyntaxError, UnicodeEncodeError):
+                # Either it's a real syntax error, in which case the source is
+                # not valid python source, or line2 is a continuation of line1,
+                # in which case we don't want to scan line2 for a magic
+                # comment.
+                pass
+            else:
+                line2 = fp.readline()
+                m = PYTHON_MAGIC_COMMENT_re.match(line2)
+
+        if has_bom:
+            if m:
+                magic_comment_encoding = m.group(1).decode('latin-1')
+                if magic_comment_encoding != 'utf-8':
+                    raise SyntaxError(f"encoding problem: {magic_comment_encoding} with BOM")
+            return 'utf-8'
+        elif m:
+            return m.group(1).decode('latin-1')
+        else:
+            return None
+    finally:
+        fp.seek(pos)


 PYTHON_FUTURE_IMPORT_re = re.compile(
-    'from\\s+__future__\\s+import\\s+\\(*(.+)\\)*')
+    r'from\s+__future__\s+import\s+\(*(.+)\)*')


-def parse_future_flags(fp: IO[bytes], encoding: str='latin-1') ->int:
+def parse_future_flags(fp: IO[bytes], encoding: str = 'latin-1') -> int:
     """Parse the compiler flags by :mod:`__future__` from the given Python
     code.
     """
-    pass
-
-
-def pathmatch(pattern: str, filename: str) ->bool:
+    import __future__
+    pos = fp.tell()
+    fp.seek(0)
+    flags = 0
+    try:
+        body = fp.read().decode(encoding)
+
+        # Fix up the source to be (hopefully) parsable by regexpen.
+        # This will likely do untoward things if the source code itself is broken.
+
+        # (1) Fix `import (\n...` to be `import (...`.
+        body = re.sub(r'import\s*\([\r\n]+', 'import (', body)
+        # (2) Join line-ending commas with the next line.
+        body = re.sub(r',\s*[\r\n]+', ', ', body)
+        # (3) Remove backslash line continuations.
+        body = re.sub(r'\\\s*[\r\n]+', ' ', body)
+
+        for m in PYTHON_FUTURE_IMPORT_re.finditer(body):
+            names = [x.strip().strip('()') for x in m.group(1).split(',')]
+            for name in names:
+                feature = getattr(__future__, name, None)
+                if feature:
+                    flags |= feature.compiler_flag
+    finally:
+        fp.seek(pos)
+    return flags
+
+
+def pathmatch(pattern: str, filename: str) -> bool:
     """Extended pathname pattern matching.

     This function is similar to what is provided by the ``fnmatch`` module in
@@ -104,16 +172,41 @@ def pathmatch(pattern: str, filename: str) ->bool:
     :param pattern: the glob pattern
     :param filename: the path name of the file to match against
     """
-    pass
+    symbols = {
+        '?': '[^/]',
+        '?/': '[^/]/',
+        '*': '[^/]+',
+        '*/': '[^/]+/',
+        '**/': '(?:.+/)*?',
+        '**': '(?:.+/)*?[^/]+',
+    }
+
+    if pattern.startswith('^'):
+        buf = ['^']
+        pattern = pattern[1:]
+    elif pattern.startswith('./'):
+        buf = ['^']
+        pattern = pattern[2:]
+    else:
+        buf = []
+
+    for idx, part in enumerate(re.split('([?*]+/?)', pattern)):
+        if idx % 2:
+            buf.append(symbols[part])
+        elif part:
+            buf.append(re.escape(part))
+    match = re.match(f"{''.join(buf)}$", filename.replace(os.sep, "/"))
+    return match is not None


 class TextWrapper(textwrap.TextWrapper):
     wordsep_re = re.compile(
-        '(\\s+|(?<=[\\w\\!\\"\\\'\\&\\.\\,\\?])-{2,}(?=\\w))')
+        r'(\s+|'                                  # any whitespace
+        r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))',   # em-dash
+    )


-def wraptext(text: str, width: int=70, initial_indent: str='',
-    subsequent_indent: str='') ->list[str]:
+def wraptext(text: str, width: int = 70, initial_indent: str = '', subsequent_indent: str = '') -> list[str]:
     """Simple wrapper around the ``textwrap.wrap`` function in the standard
     library. This version does not wrap lines on hyphens in words.

@@ -124,28 +217,45 @@ def wraptext(text: str, width: int=70, initial_indent: str='',
     :param subsequent_indent: string that will be prepended to all lines save
                               the first of wrapped output
     """
-    pass
+    wrapper = TextWrapper(width=width, initial_indent=initial_indent,
+                          subsequent_indent=subsequent_indent,
+                          break_long_words=False)
+    return wrapper.wrap(text)


+# TODO (Babel 3.x): Remove this re-export
 odict = collections.OrderedDict


 class FixedOffsetTimezone(datetime.tzinfo):
     """Fixed offset in minutes east from UTC."""

-    def __init__(self, offset: float, name: (str | None)=None) ->None:
+    def __init__(self, offset: float, name: str | None = None) -> None:
+
         self._offset = datetime.timedelta(minutes=offset)
         if name is None:
             name = 'Etc/GMT%+d' % offset
         self.zone = name

-    def __str__(self) ->str:
+    def __str__(self) -> str:
         return self.zone

-    def __repr__(self) ->str:
+    def __repr__(self) -> str:
         return f'<FixedOffset "{self.zone}" {self._offset}>'

+    def utcoffset(self, dt: datetime.datetime) -> datetime.timedelta:
+        return self._offset

+    def tzname(self, dt: datetime.datetime) -> str:
+        return self.zone
+
+    def dst(self, dt: datetime.datetime) -> datetime.timedelta:
+        return ZERO
+
+
+# Export the localtime functionality here because that's
+# where it was in the past.
+# TODO(3.0): remove these aliases
 UTC = dates.UTC
 LOCALTZ = dates.LOCALTZ
 get_localzone = localtime.get_localzone
@@ -153,3 +263,7 @@ STDOFFSET = localtime.STDOFFSET
 DSTOFFSET = localtime.DSTOFFSET
 DSTDIFF = localtime.DSTDIFF
 ZERO = localtime.ZERO
+
+
+def _cmp(a: Any, b: Any):
+    return (a > b) - (a < b)