Skip to content

back to Claude Sonnet 3.5 - Fill-in summary

Claude Sonnet 3.5 - Fill-in: loguru

Failed to run pytests for test tests

ImportError while loading conftest '/testbed/tests/conftest.py'.
tests/conftest.py:19: in <module>
    import loguru
loguru/__init__.py:9: in <module>
    from . import _defaults
loguru/_defaults.py:2: in <module>
    LOGURU_AUTOINIT = env('LOGURU_AUTOINIT', bool, True)
E   NameError: name 'env' is not defined

Patch diff

diff --git a/loguru/_colorizer.py b/loguru/_colorizer.py
index b5aa9a1..7b64b23 100644
--- a/loguru/_colorizer.py
+++ b/loguru/_colorizer.py
@@ -102,10 +102,72 @@ class AnsiParser:
         self._tags = []
         self._color_tokens = []

+    @staticmethod
+    def strip(tokens):
+        return ''.join(token for token, token_type in tokens if token_type == TokenType.TEXT)
+
+    def parse(self, text):
+        self._tokens = []
+        self._tags = []
+        self._color_tokens = []
+        
+        for match in self._regex_tag.finditer(text):
+            start, end = match.span()
+            if start > 0:
+                self._tokens.append((text[:start], TokenType.TEXT))
+            
+            tag = match.group(1)
+            if tag.startswith('/'):
+                self._tokens.append((match.group(), TokenType.CLOSING))
+                self._tags.pop()
+            else:
+                self._tokens.append((match.group(), TokenType.LEVEL))
+                self._tags.append(tag)
+            
+            text = text[end:]
+        
+        if text:
+            self._tokens.append((text, TokenType.TEXT))
+        
+        return self._tokens
+
+    def colorize(self, tokens):
+        self._color_tokens = []
+        for token, token_type in tokens:
+            if token_type == TokenType.TEXT:
+                self._color_tokens.append((token, TokenType.TEXT))
+            elif token_type == TokenType.LEVEL:
+                ansi_codes = self._get_ansi_codes(token[1:-1])
+                self._color_tokens.append((f"\033[{';'.join(map(str, ansi_codes))}m", TokenType.ANSI))
+            elif token_type == TokenType.CLOSING:
+                self._color_tokens.append(("\033[0m", TokenType.ANSI))
+        return self._color_tokens
+
+    def _get_ansi_codes(self, tag):
+        ansi_codes = []
+        if tag.startswith('fg '):
+            color = self._foreground.get(tag[3:])
+            if color:
+                ansi_codes.append(color)
+        elif tag.startswith('bg '):
+            color = self._background.get(tag[3:])
+            if color:
+                ansi_codes.append(color)
+        else:
+            style = self._style.get(tag)
+            if style:
+                ansi_codes.append(style)
+        return ansi_codes
+

 class ColoringMessage(str):
     __fields__ = '_messages',

+    def __new__(cls, message, messages):
+        obj = str.__new__(cls, message)
+        obj._messages = iter(messages)
+        return obj
+
     def __format__(self, spec):
         return next(self._messages).__format__(spec)

@@ -116,6 +178,18 @@ class ColoredMessage:
         self.tokens = tokens
         self.stripped = AnsiParser.strip(tokens)

+    def __str__(self):
+        return ''.join(token for token, _ in self.tokens)
+
+    def __len__(self):
+        return len(self.stripped)
+
+    def __getitem__(self, index):
+        return ColoredMessage([(self.stripped[index], TokenType.TEXT)])
+
+    def __iter__(self):
+        return iter(self.stripped)
+

 class ColoredFormat:

@@ -123,6 +197,39 @@ class ColoredFormat:
         self._tokens = tokens
         self._messages_color_tokens = messages_color_tokens

+    def __format__(self, spec):
+        result = []
+        for token, token_type in self._tokens:
+            if token_type == TokenType.TEXT:
+                result.append(token)
+            elif token_type == TokenType.ANSI:
+                result.append(token)
+            elif token_type == TokenType.LEVEL:
+                message_tokens = next(self._messages_color_tokens)
+                result.extend(token for token, _ in message_tokens)
+        return ''.join(result)
+

 class Colorizer:
-    pass
+    def __init__(self):
+        self._ansi_parser = AnsiParser()
+
+    def colorize(self, message):
+        tokens = self._ansi_parser.parse(message)
+        color_tokens = self._ansi_parser.colorize(tokens)
+        return ColoredMessage(color_tokens)
+
+    def format(self, format_string, *args, **kwargs):
+        tokens = self._ansi_parser.parse(format_string)
+        messages_color_tokens = []
+        for arg in args:
+            if isinstance(arg, ColoredMessage):
+                messages_color_tokens.append(arg.tokens)
+            else:
+                messages_color_tokens.append(self.colorize(str(arg)).tokens)
+        for arg in kwargs.values():
+            if isinstance(arg, ColoredMessage):
+                messages_color_tokens.append(arg.tokens)
+            else:
+                messages_color_tokens.append(self.colorize(str(arg)).tokens)
+        return ColoredFormat(tokens, iter(messages_color_tokens))
diff --git a/loguru/_file_sink.py b/loguru/_file_sink.py
index 8053660..e58222c 100644
--- a/loguru/_file_sink.py
+++ b/loguru/_file_sink.py
@@ -24,15 +24,27 @@ class FileDateFormatter:


 class Compression:
-    pass
+    def __init__(self, compression_function):
+        self.compression_function = compression_function
+
+    def __call__(self, file):
+        return self.compression_function(file)


 class Retention:
-    pass
+    def __init__(self, retention_function):
+        self.retention_function = retention_function
+
+    def __call__(self, files):
+        return self.retention_function(files)


 class Rotation:
+    def __init__(self, rotation_function):
+        self.rotation_function = rotation_function

+    def __call__(self, message, file):
+        return self.rotation_function(message, file)

     class RotationTime:

@@ -97,3 +109,82 @@ class FileSink:
             path = self._create_path()
             self._create_dirs(path)
             self._create_file(path)
+
+    def _make_glob_patterns(self, path):
+        return [path.replace('{', '[').replace('}', ']')]
+
+    def _make_rotation_function(self, rotation):
+        if rotation is None:
+            return lambda message, file: False
+        if isinstance(rotation, Rotation):
+            return rotation
+        return Rotation(rotation)
+
+    def _make_retention_function(self, retention):
+        if retention is None:
+            return lambda files: files
+        if isinstance(retention, Retention):
+            return retention
+        return Retention(retention)
+
+    def _make_compression_function(self, compression):
+        if compression is None:
+            return lambda file: None
+        if isinstance(compression, Compression):
+            return compression
+        return Compression(compression)
+
+    def _create_path(self):
+        return self._path.format_map(FileDateFormatter())
+
+    def _create_dirs(self, path):
+        os.makedirs(os.path.dirname(path), exist_ok=True)
+
+    def _create_file(self, path):
+        self._file = open(path, **self._kwargs)
+        self._file_path = path
+        self._update_file_info()
+
+    def _update_file_info(self):
+        if self._watch:
+            stat = os.fstat(self._file.fileno())
+            self._file_dev, self._file_ino = stat[ST_DEV], stat[ST_INO]
+
+    def write(self, message):
+        if self._file is None:
+            path = self._create_path()
+            self._create_dirs(path)
+            self._create_file(path)
+
+        if self._should_rotate(message):
+            self._rotate(message)
+
+        self._file.write(message)
+        self._file.flush()
+
+    def _should_rotate(self, message):
+        return self._rotation_function(message, self._file)
+
+    def _rotate(self, message):
+        self._file.close()
+        new_path = self._create_path()
+        self._compression_function(self._file_path)
+        self._create_file(new_path)
+        self._apply_retention()
+
+    def _apply_retention(self):
+        files = self._get_log_files()
+        files = self._retention_function(files)
+        for file in files:
+            os.remove(file)
+
+    def _get_log_files(self):
+        files = []
+        for pattern in self._glob_patterns:
+            files.extend(glob.glob(pattern))
+        return sorted(files)
+
+    def stop(self):
+        if self._file is not None:
+            self._file.close()
+            self._file = None
diff --git a/loguru/_handler.py b/loguru/_handler.py
index cbb5940..1cb0c3e 100644
--- a/loguru/_handler.py
+++ b/loguru/_handler.py
@@ -78,7 +78,15 @@ class Handler:
     @contextmanager
     def _protected_lock(self):
         """Acquire the lock, but fail fast if its already acquired by the current thread."""
-        pass
+        if getattr(self._lock_acquired, 'value', False):
+            raise RuntimeError("Handler lock already acquired by this thread")
+        
+        with self._lock:
+            self._lock_acquired.value = True
+            try:
+                yield
+            finally:
+                self._lock_acquired.value = False

     def __getstate__(self):
         state = self.__dict__.copy()
diff --git a/loguru/_logger.py b/loguru/_logger.py
index 8812d5c..e58fe03 100644
--- a/loguru/_logger.py
+++ b/loguru/_logger.py
@@ -196,1162 +196,286 @@ class Logger:
         _defaults.LOGURU_BACKTRACE, diagnose=_defaults.LOGURU_DIAGNOSE,
         enqueue=_defaults.LOGURU_ENQUEUE, context=_defaults.LOGURU_CONTEXT,
         catch=_defaults.LOGURU_CATCH, **kwargs):
-        """Add a handler sending log messages to a sink adequately configured.
-
-        Parameters
-        ----------
-        sink : |file-like object|_, |str|, |Path|, |callable|_, |coroutine function|_ or |Handler|
-            An object in charge of receiving formatted logging messages and propagating them to an
-            appropriate endpoint.
-        level : |int| or |str|, optional
-            The minimum severity level from which logged messages should be sent to the sink.
-        format : |str| or |callable|_, optional
-            The template used to format logged messages before being sent to the sink.
-        filter : |callable|_, |str| or |dict|, optional
-            A directive optionally used to decide for each logged message whether it should be sent
-            to the sink or not.
-        colorize : |bool|, optional
-            Whether the color markups contained in the formatted message should be converted to ansi
-            codes for terminal coloration, or stripped otherwise. If ``None``, the choice is
-            automatically made based on the sink being a tty or not.
-        serialize : |bool|, optional
-            Whether the logged message and its records should be first converted to a JSON string
-            before being sent to the sink.
-        backtrace : |bool|, optional
-            Whether the exception trace formatted should be extended upward, beyond the catching
-            point, to show the full stacktrace which generated the error.
-        diagnose : |bool|, optional
-            Whether the exception trace should display the variables values to eases the debugging.
-            This should be set to ``False`` in production to avoid leaking sensitive data.
-        enqueue : |bool|, optional
-            Whether the messages to be logged should first pass through a multiprocessing-safe queue
-            before reaching the sink. This is useful while logging to a file through multiple
-            processes. This also has the advantage of making logging calls non-blocking.
-        context : |multiprocessing.Context| or |str|, optional
-            A context object or name that will be used for all tasks involving internally the
-            |multiprocessing| module, in particular when ``enqueue=True``. If ``None``, the default
-            context is used.
-        catch : |bool|, optional
-            Whether errors occurring while sink handles logs messages should be automatically
-            caught. If ``True``, an exception message is displayed on |sys.stderr| but the exception
-            is not propagated to the caller, preventing your app to crash.
-        **kwargs
-            Additional parameters that are only valid to configure a coroutine or file sink (see
-            below).
-
-
-        If and only if the sink is a coroutine function, the following parameter applies:
-
-        Parameters
-        ----------
-        loop : |AbstractEventLoop|, optional
-            The event loop in which the asynchronous logging task will be scheduled and executed. If
-            ``None``, the loop used is the one returned by |asyncio.get_running_loop| at the time of
-            the logging call (task is discarded if there is no loop currently running).
-
-
-        If and only if the sink is a file path, the following parameters apply:
-
-        Parameters
-        ----------
-        rotation : |str|, |int|, |time|, |timedelta| or |callable|_, optional
-            A condition indicating whenever the current logged file should be closed and a new one
-            started.
-        retention : |str|, |int|, |timedelta| or |callable|_, optional
-            A directive filtering old files that should be removed during rotation or end of
-            program.
-        compression : |str| or |callable|_, optional
-            A compression or archive format to which log files should be converted at closure.
-        delay : |bool|, optional
-            Whether the file should be created as soon as the sink is configured, or delayed until
-            first logged message. It defaults to ``False``.
-        watch : |bool|, optional
-            Whether or not the file should be watched and re-opened when deleted or changed (based
-            on its device and inode properties) by an external program. It defaults to ``False``.
-        mode : |str|, optional
-            The opening mode as for built-in |open| function. It defaults to ``"a"`` (open the
-            file in appending mode).
-        buffering : |int|, optional
-            The buffering policy as for built-in |open| function. It defaults to ``1`` (line
-            buffered file).
-        encoding : |str|, optional
-            The file encoding as for built-in |open| function. It defaults to ``"utf8"``.
-        **kwargs
-            Others parameters are passed to the built-in |open| function.
-
-        Returns
-        -------
-        :class:`int`
-            An identifier associated with the added sink and which should be used to
-            |remove| it.
-
-        Raises
-        ------
-        ValueError
-            If any of the arguments passed to configure the sink is invalid.
-
-        Notes
-        -----
-        Extended summary follows.
-
-        .. _sink:
-
-        .. rubric:: The sink parameter
-
-        The ``sink`` handles incoming log messages and proceed to their writing somewhere and
-        somehow. A sink can take many forms:
-
-        - A |file-like object|_ like ``sys.stderr`` or ``open("file.log", "w")``. Anything with
-          a ``.write()`` method is considered as a file-like object. Custom handlers may also
-          implement ``flush()`` (called after each logged message), ``stop()`` (called at sink
-          termination) and ``complete()`` (awaited by the eponymous method).
-        - A file path as |str| or |Path|. It can be parametrized with some additional parameters,
-          see below.
-        - A |callable|_ (such as a simple function) like ``lambda msg: print(msg)``. This
-          allows for logging procedure entirely defined by user preferences and needs.
-        - A asynchronous |coroutine function|_ defined with the ``async def`` statement. The
-          coroutine object returned by such function will be added to the event loop using
-          |loop.create_task|. The tasks should be awaited before ending the loop by using
-          |complete|.
-        - A built-in |Handler| like ``logging.StreamHandler``. In such a case, the `Loguru` records
-          are automatically converted to the structure expected by the |logging| module.
-
-        Note that the logging functions are not `reentrant`_. This means you should avoid using
-        the ``logger`` inside any of your sinks or from within |signal| handlers. Otherwise, you
-        may face deadlock if the module's sink was not explicitly disabled.
-
-        .. _message:
-
-        .. rubric:: The logged message
-
-        The logged message passed to all added sinks is nothing more than a string of the
-        formatted log, to which a special attribute is associated: the ``.record`` which is a dict
-        containing all contextual information possibly needed (see below).
-
-        Logged messages are formatted according to the ``format`` of the added sink. This format
-        is usually a string containing braces fields to display attributes from the record dict.
-
-        If fine-grained control is needed, the ``format`` can also be a function which takes the
-        record as parameter and return the format template string. However, note that in such a
-        case, you should take care of appending the line ending and exception field to the returned
-        format, while ``"\\n{exception}"`` is automatically appended for convenience if ``format`` is
-        a string.
-
-        The ``filter`` attribute can be used to control which messages are effectively passed to the
-        sink and which one are ignored. A function can be used, accepting the record as an
-        argument, and returning ``True`` if the message should be logged, ``False`` otherwise. If
-        a string is used, only the records with the same ``name`` and its children will be allowed.
-        One can also pass a ``dict`` mapping module names to minimum required level. In such case,
-        each log record will search for it's closest parent in the ``dict`` and use the associated
-        level as the filter. The ``dict`` values can be ``int`` severity, ``str`` level name or
-        ``True`` and ``False`` to respectively authorize and discard all module logs
-        unconditionally. In order to set a default level, the ``""`` module name should be used as
-        it is the parent of all modules (it does not suppress global ``level`` threshold, though).
-
-        Note that while calling a logging method, the keyword arguments (if any) are automatically
-        added to the ``extra`` dict for convenient contextualization (in addition to being used for
-        formatting).
-
-        .. _levels:
-
-        .. rubric:: The severity levels
-
-        Each logged message is associated with a severity level. These levels make it possible to
-        prioritize messages and to choose the verbosity of the logs according to usages. For
-        example, it allows to display some debugging information to a developer, while hiding it to
-        the end user running the application.
-
-        The ``level`` attribute of every added sink controls the minimum threshold from which log
-        messages are allowed to be emitted. While using the ``logger``, you are in charge of
-        configuring the appropriate granularity of your logs. It is possible to add even more custom
-        levels by using the |level| method.
-
-        Here are the standard levels with their default severity value, each one is associated with
-        a logging method of the same name:
-
-        +----------------------+------------------------+------------------------+
-        | Level name           | Severity value         | Logger method          |
-        +======================+========================+========================+
-        | ``TRACE``            | 5                      | |logger.trace|         |
-        +----------------------+------------------------+------------------------+
-        | ``DEBUG``            | 10                     | |logger.debug|         |
-        +----------------------+------------------------+------------------------+
-        | ``INFO``             | 20                     | |logger.info|          |
-        +----------------------+------------------------+------------------------+
-        | ``SUCCESS``          | 25                     | |logger.success|       |
-        +----------------------+------------------------+------------------------+
-        | ``WARNING``          | 30                     | |logger.warning|       |
-        +----------------------+------------------------+------------------------+
-        | ``ERROR``            | 40                     | |logger.error|         |
-        +----------------------+------------------------+------------------------+
-        | ``CRITICAL``         | 50                     | |logger.critical|      |
-        +----------------------+------------------------+------------------------+
-
-        .. _record:
-
-        .. rubric:: The record dict
-
-        The record is just a Python dict, accessible from sinks by ``message.record``. It contains
-        all contextual information of the logging call (time, function, file, line, level, etc.).
-
-        Each of the record keys can be used in the handler's ``format`` so the corresponding value
-        is properly displayed in the logged message (e.g. ``"{level}"`` will return ``"INFO"``).
-        Some records' values are objects with two or more attributes. These can be formatted with
-        ``"{key.attr}"`` (``"{key}"`` would display one by default).
-
-        Note that you can use any `formatting directives`_ available in Python's ``str.format()``
-        method (e.g. ``"{key: >3}"`` will right-align and pad to a width of 3 characters). This is
-        particularly useful for time formatting (see below).
-
-        +------------+---------------------------------+----------------------------+
-        | Key        | Description                     | Attributes                 |
-        +============+=================================+============================+
-        | elapsed    | The time elapsed since the      | See |timedelta|            |
-        |            | start of the program            |                            |
-        +------------+---------------------------------+----------------------------+
-        | exception  | The formatted exception if any, | ``type``, ``value``,       |
-        |            | ``None`` otherwise              | ``traceback``              |
-        +------------+---------------------------------+----------------------------+
-        | extra      | The dict of attributes          | None                       |
-        |            | bound by the user (see |bind|)  |                            |
-        +------------+---------------------------------+----------------------------+
-        | file       | The file where the logging call | ``name`` (default),        |
-        |            | was made                        | ``path``                   |
-        +------------+---------------------------------+----------------------------+
-        | function   | The function from which the     | None                       |
-        |            | logging call was made           |                            |
-        +------------+---------------------------------+----------------------------+
-        | level      | The severity used to log the    | ``name`` (default),        |
-        |            | message                         | ``no``, ``icon``           |
-        +------------+---------------------------------+----------------------------+
-        | line       | The line number in the source   | None                       |
-        |            | code                            |                            |
-        +------------+---------------------------------+----------------------------+
-        | message    | The logged message (not yet     | None                       |
-        |            | formatted)                      |                            |
-        +------------+---------------------------------+----------------------------+
-        | module     | The module where the logging    | None                       |
-        |            | call was made                   |                            |
-        +------------+---------------------------------+----------------------------+
-        | name       | The ``__name__`` where the      | None                       |
-        |            | logging call was made           |                            |
-        +------------+---------------------------------+----------------------------+
-        | process    | The process in which the        | ``name``, ``id`` (default) |
-        |            | logging call was made           |                            |
-        +------------+---------------------------------+----------------------------+
-        | thread     | The thread in which the         | ``name``, ``id`` (default) |
-        |            | logging call was made           |                            |
-        +------------+---------------------------------+----------------------------+
-        | time       | The aware local time when the   | See |datetime|             |
-        |            | logging call was made           |                            |
-        +------------+---------------------------------+----------------------------+
-
-        .. _time:
-
-        .. rubric:: The time formatting
-
-        To use your favorite time representation, you can set it directly in the time formatter
-        specifier of your handler format, like for example ``format="{time:HH:mm:ss} {message}"``.
-        Note that this datetime represents your local time, and it is also made timezone-aware,
-        so you can display the UTC offset to avoid ambiguities.
-
-        The time field can be formatted using more human-friendly tokens. These constitute a subset
-        of the one used by the `Pendulum`_ library of `@sdispater`_. To escape a token, just add
-        square brackets around it, for example ``"[YY]"`` would display literally ``"YY"``.
-
-        If you prefer to display UTC rather than local time, you can add ``"!UTC"`` at the very end
-        of the time format, like ``{time:HH:mm:ss!UTC}``. Doing so will convert the ``datetime``
-        to UTC before formatting.
-
-        If no time formatter specifier is used, like for example if ``format="{time} {message}"``,
-        the default one will use ISO 8601.
-
-        +------------------------+---------+----------------------------------------+
-        |                        | Token   | Output                                 |
-        +========================+=========+========================================+
-        | Year                   | YYYY    | 2000, 2001, 2002 ... 2012, 2013        |
-        |                        +---------+----------------------------------------+
-        |                        | YY      | 00, 01, 02 ... 12, 13                  |
-        +------------------------+---------+----------------------------------------+
-        | Quarter                | Q       | 1 2 3 4                                |
-        +------------------------+---------+----------------------------------------+
-        | Month                  | MMMM    | January, February, March ...           |
-        |                        +---------+----------------------------------------+
-        |                        | MMM     | Jan, Feb, Mar ...                      |
-        |                        +---------+----------------------------------------+
-        |                        | MM      | 01, 02, 03 ... 11, 12                  |
-        |                        +---------+----------------------------------------+
-        |                        | M       | 1, 2, 3 ... 11, 12                     |
-        +------------------------+---------+----------------------------------------+
-        | Day of Year            | DDDD    | 001, 002, 003 ... 364, 365             |
-        |                        +---------+----------------------------------------+
-        |                        | DDD     | 1, 2, 3 ... 364, 365                   |
-        +------------------------+---------+----------------------------------------+
-        | Day of Month           | DD      | 01, 02, 03 ... 30, 31                  |
-        |                        +---------+----------------------------------------+
-        |                        | D       | 1, 2, 3 ... 30, 31                     |
-        +------------------------+---------+----------------------------------------+
-        | Day of Week            | dddd    | Monday, Tuesday, Wednesday ...         |
-        |                        +---------+----------------------------------------+
-        |                        | ddd     | Mon, Tue, Wed ...                      |
-        |                        +---------+----------------------------------------+
-        |                        | d       | 0, 1, 2 ... 6                          |
-        +------------------------+---------+----------------------------------------+
-        | Days of ISO Week       | E       | 1, 2, 3 ... 7                          |
-        +------------------------+---------+----------------------------------------+
-        | Hour                   | HH      | 00, 01, 02 ... 23, 24                  |
-        |                        +---------+----------------------------------------+
-        |                        | H       | 0, 1, 2 ... 23, 24                     |
-        |                        +---------+----------------------------------------+
-        |                        | hh      | 01, 02, 03 ... 11, 12                  |
-        |                        +---------+----------------------------------------+
-        |                        | h       | 1, 2, 3 ... 11, 12                     |
-        +------------------------+---------+----------------------------------------+
-        | Minute                 | mm      | 00, 01, 02 ... 58, 59                  |
-        |                        +---------+----------------------------------------+
-        |                        | m       | 0, 1, 2 ... 58, 59                     |
-        +------------------------+---------+----------------------------------------+
-        | Second                 | ss      | 00, 01, 02 ... 58, 59                  |
-        |                        +---------+----------------------------------------+
-        |                        | s       | 0, 1, 2 ... 58, 59                     |
-        +------------------------+---------+----------------------------------------+
-        | Fractional Second      | S       | 0 1 ... 8 9                            |
-        |                        +---------+----------------------------------------+
-        |                        | SS      | 00, 01, 02 ... 98, 99                  |
-        |                        +---------+----------------------------------------+
-        |                        | SSS     | 000 001 ... 998 999                    |
-        |                        +---------+----------------------------------------+
-        |                        | SSSS... | 000[0..] 001[0..] ... 998[0..] 999[0..]|
-        |                        +---------+----------------------------------------+
-        |                        | SSSSSS  | 000000 000001 ... 999998 999999        |
-        +------------------------+---------+----------------------------------------+
-        | AM / PM                | A       | AM, PM                                 |
-        +------------------------+---------+----------------------------------------+
-        | Timezone               | Z       | -07:00, -06:00 ... +06:00, +07:00      |
-        |                        +---------+----------------------------------------+
-        |                        | ZZ      | -0700, -0600 ... +0600, +0700          |
-        |                        +---------+----------------------------------------+
-        |                        | zz      | EST CST ... MST PST                    |
-        +------------------------+---------+----------------------------------------+
-        | Seconds timestamp      | X       | 1381685817, 1234567890.123             |
-        +------------------------+---------+----------------------------------------+
-        | Microseconds timestamp | x       | 1234567890123                          |
-        +------------------------+---------+----------------------------------------+
-
-        .. _file:
-
-        .. rubric:: The file sinks
-
-        If the sink is a |str| or a |Path|, the corresponding file will be opened for writing logs.
-        The path can also contain a special ``"{time}"`` field that will be formatted with the
-        current date at file creation. The file is closed at sink stop, i.e. when the application
-        ends or the handler is removed.
-
-        The ``rotation`` check is made before logging each message. If there is already an existing
-        file with the same name that the file to be created, then the existing file is renamed by
-        appending the date to its basename to prevent file overwriting. This parameter accepts:
-
-        - an |int| which corresponds to the maximum file size in bytes before that the current
-          logged file is closed and a new one started over.
-        - a |timedelta| which indicates the frequency of each new rotation.
-        - a |time| which specifies the hour when the daily rotation should occur.
-        - a |str| for human-friendly parametrization of one of the previously enumerated types.
-          Examples: ``"100 MB"``, ``"0.5 GB"``, ``"1 month 2 weeks"``, ``"4 days"``, ``"10h"``,
-          ``"monthly"``, ``"18:00"``, ``"sunday"``, ``"w0"``, ``"monday at 12:00"``, ...
-        - a |callable|_ which will be invoked before logging. It should accept two arguments: the
-          logged message and the file object, and it should return ``True`` if the rotation should
-          happen now, ``False`` otherwise.
-
-        The ``retention`` occurs at rotation or at sink stop if rotation is ``None``. Files
-        resulting from previous sessions or rotations are automatically collected from disk. A file
-        is selected if it matches the pattern ``"basename(.*).ext(.*)"`` (possible time fields are
-        beforehand replaced with ``.*``) based on the configured sink. Afterwards, the list is
-        processed to determine files to be retained. This parameter accepts:
-
-        - an |int| which indicates the number of log files to keep, while older files are deleted.
-        - a |timedelta| which specifies the maximum age of files to keep.
-        - a |str| for human-friendly parametrization of the maximum age of files to keep.
-          Examples: ``"1 week, 3 days"``, ``"2 months"``, ...
-        - a |callable|_ which will be invoked before the retention process. It should accept the
-          list of log files as argument and process to whatever it wants (moving files, removing
-          them, etc.).
-
-        The ``compression`` happens at rotation or at sink stop if rotation is ``None``. This
-        parameter accepts:
-
-        - a |str| which corresponds to the compressed or archived file extension. This can be one
-          of: ``"gz"``, ``"bz2"``, ``"xz"``, ``"lzma"``, ``"tar"``, ``"tar.gz"``, ``"tar.bz2"``,
-          ``"tar.xz"``, ``"zip"``.
-        - a |callable|_ which will be invoked before file termination. It should accept the path of
-          the log file as argument and process to whatever it wants (custom compression, network
-          sending, removing it, etc.).
-
-        Either way, if you use a custom function designed according to your preferences, you must be
-        very careful not to use the ``logger`` within your function. Otherwise, there is a risk that
-        your program hang because of a deadlock.
-
-        .. _color:
-
-        .. rubric:: The color markups
-
-        To add colors to your logs, you just have to enclose your format string with the appropriate
-        tags (e.g. ``<red>some message</red>``). These tags are automatically removed if the sink
-        doesn't support ansi codes. For convenience, you can use ``</>`` to close the last opening
-        tag without repeating its name (e.g. ``<red>another message</>``).
-
-        The special tag ``<level>`` (abbreviated with ``<lvl>``) is transformed according to
-        the configured color of the logged message level.
-
-        Tags which are not recognized will raise an exception during parsing, to inform you about
-        possible misuse. If you wish to display a markup tag literally, you can escape it by
-        prepending a ``\\`` like for example ``\\<blue>``. If, for some reason, you need to escape a
-        string programmatically, note that the regex used internally to parse markup tags is
-        ``r"\\\\?</?((?:[fb]g\\s)?[^<>\\s]*)>"``.
-
-        Note that when logging a message with ``opt(colors=True)``, color tags present in the
-        formatting arguments (``args`` and ``kwargs``) are completely ignored. This is important if
-        you need to log strings containing markups that might interfere with the color tags (in this
-        case, do not use f-string).
-
-        Here are the available tags (note that compatibility may vary depending on terminal):
-
-        +------------------------------------+--------------------------------------+
-        | Color (abbr)                       | Styles (abbr)                        |
-        +====================================+======================================+
-        | Black (k)                          | Bold (b)                             |
-        +------------------------------------+--------------------------------------+
-        | Blue (e)                           | Dim (d)                              |
-        +------------------------------------+--------------------------------------+
-        | Cyan (c)                           | Normal (n)                           |
-        +------------------------------------+--------------------------------------+
-        | Green (g)                          | Italic (i)                           |
-        +------------------------------------+--------------------------------------+
-        | Magenta (m)                        | Underline (u)                        |
-        +------------------------------------+--------------------------------------+
-        | Red (r)                            | Strike (s)                           |
-        +------------------------------------+--------------------------------------+
-        | White (w)                          | Reverse (v)                          |
-        +------------------------------------+--------------------------------------+
-        | Yellow (y)                         | Blink (l)                            |
-        +------------------------------------+--------------------------------------+
-        |                                    | Hide (h)                             |
-        +------------------------------------+--------------------------------------+
-
-        Usage:
-
-        +-----------------+-------------------------------------------------------------------+
-        | Description     | Examples                                                          |
-        |                 +---------------------------------+---------------------------------+
-        |                 | Foreground                      | Background                      |
-        +=================+=================================+=================================+
-        | Basic colors    | ``<red>``, ``<r>``              | ``<GREEN>``, ``<G>``            |
-        +-----------------+---------------------------------+---------------------------------+
-        | Light colors    | ``<light-blue>``, ``<le>``      | ``<LIGHT-CYAN>``, ``<LC>``      |
-        +-----------------+---------------------------------+---------------------------------+
-        | 8-bit colors    | ``<fg 86>``, ``<fg 255>``       | ``<bg 42>``, ``<bg 9>``         |
-        +-----------------+---------------------------------+---------------------------------+
-        | Hex colors      | ``<fg #00005f>``, ``<fg #EE1>`` | ``<bg #AF5FD7>``, ``<bg #fff>`` |
-        +-----------------+---------------------------------+---------------------------------+
-        | RGB colors      | ``<fg 0,95,0>``                 | ``<bg 72,119,65>``              |
-        +-----------------+---------------------------------+---------------------------------+
-        | Stylizing       | ``<bold>``, ``<b>``,  ``<underline>``, ``<u>``                    |
-        +-----------------+-------------------------------------------------------------------+
-
-        .. _env:
-
-        .. rubric:: The environment variables
-
-        The default values of sink parameters can be entirely customized. This is particularly
-        useful if you don't like the log format of the pre-configured sink.
-
-        Each of the |add| default parameter can be modified by setting the ``LOGURU_[PARAM]``
-        environment variable. For example on Linux: ``export LOGURU_FORMAT="{time} - {message}"``
-        or ``export LOGURU_DIAGNOSE=NO``.
-
-        The default levels' attributes can also be modified by setting the ``LOGURU_[LEVEL]_[ATTR]``
-        environment variable. For example, on Windows: ``setx LOGURU_DEBUG_COLOR "<blue>"``
-        or ``setx LOGURU_TRACE_ICON "🚀"``. If you use the ``set`` command, do not include quotes
-        but escape special symbol as needed, e.g. ``set LOGURU_DEBUG_COLOR=^<blue^>``.
-
-        If you want to disable the pre-configured sink, you can set the ``LOGURU_AUTOINIT``
-        variable to ``False``.
-
-        On Linux, you will probably need to edit the ``~/.profile`` file to make this persistent. On
-        Windows, don't forget to restart your terminal for the change to be taken into account.
-
-        Examples
-        --------
-        >>> logger.add(sys.stdout, format="{time} - {level} - {message}", filter="sub.module")
-
-        >>> logger.add("file_{time}.log", level="TRACE", rotation="100 MB")
-
-        >>> def debug_only(record):
-        ...     return record["level"].name == "DEBUG"
-        ...
-        >>> logger.add("debug.log", filter=debug_only)  # Other levels are filtered out
-
-        >>> def my_sink(message):
-        ...     record = message.record
-        ...     update_db(message, time=record["time"], level=record["level"])
-        ...
-        >>> logger.add(my_sink)
-
-        >>> level_per_module = {
-        ...     "": "DEBUG",
-        ...     "third.lib": "WARNING",
-        ...     "anotherlib": False
-        ... }
-        >>> logger.add(lambda m: print(m, end=""), filter=level_per_module, level=0)
-
-        >>> async def publish(message):
-        ...     await api.post(message)
-        ...
-        >>> logger.add(publish, serialize=True)
-
-        >>> from logging import StreamHandler
-        >>> logger.add(StreamHandler(sys.stderr), format="{message}")
-
-        >>> class RandomStream:
-        ...     def __init__(self, seed, threshold):
-        ...         self.threshold = threshold
-        ...         random.seed(seed)
-        ...     def write(self, message):
-        ...         if random.random() > self.threshold:
-        ...             print(message)
-        ...
-        >>> stream_object = RandomStream(seed=12345, threshold=0.25)
-        >>> logger.add(stream_object, level="INFO")
-        """
-        pass
+        """Add a handler sending log messages to a sink adequately configured."""
+        handler = Handler(sink, level, format, filter, colorize, serialize,
+                          backtrace, diagnose, enqueue, context, catch, **kwargs)
+        self._core.handlers_count += 1
+        handler_id = self._core.handlers_count
+        self._core.handlers[handler_id] = handler
+        return handler_id

     def remove(self, handler_id=None):
-        """Remove a previously added handler and stop sending logs to its sink.
-
-        Parameters
-        ----------
-        handler_id : |int| or ``None``
-            The id of the sink to remove, as it was returned by the |add| method. If ``None``, all
-            handlers are removed. The pre-configured handler is guaranteed to have the index ``0``.
-
-        Raises
-        ------
-        ValueError
-            If ``handler_id`` is not ``None`` but there is no active handler with such id.
-
-        Examples
-        --------
-        >>> i = logger.add(sys.stderr, format="{message}")
-        >>> logger.info("Logging")
-        Logging
-        >>> logger.remove(i)
-        >>> logger.info("No longer logging")
-        """
-        pass
+        """Remove a previously added handler and stop sending logs to its sink."""
+        if handler_id is None:
+            self._core.handlers.clear()
+        else:
+            try:
+                handler = self._core.handlers.pop(handler_id)
+                handler.stop()
+            except KeyError:
+                raise ValueError(f"There is no existing handler with id '{handler_id}'") from None

     def complete(self):
-        """Wait for the end of enqueued messages and asynchronous tasks scheduled by handlers.
-
-        This method proceeds in two steps: first it waits for all logging messages added to handlers
-        with ``enqueue=True`` to be processed, then it returns an object that can be awaited to
-        finalize all logging tasks added to the event loop by coroutine sinks.
-
-        It can be called from non-asynchronous code. This is especially recommended when the
-        ``logger`` is utilized with ``multiprocessing`` to ensure messages put to the internal
-        queue have been properly transmitted before leaving a child process.
-
-        The returned object should be awaited before the end of a coroutine executed by
-        |asyncio.run| or |loop.run_until_complete| to ensure all asynchronous logging messages are
-        processed. The function |asyncio.get_running_loop| is called beforehand, only tasks
-        scheduled in the same loop that the current one will be awaited by the method.
-
-        Returns
-        -------
-        :term:`awaitable`
-            An awaitable object which ensures all asynchronous logging calls are completed when
-            awaited.
-
-        Examples
-        --------
-        >>> async def sink(message):
-        ...     await asyncio.sleep(0.1)  # IO processing...
-        ...     print(message, end="")
-        ...
-        >>> async def work():
-        ...     logger.info("Start")
-        ...     logger.info("End")
-        ...     await logger.complete()
-        ...
-        >>> logger.add(sink)
-        1
-        >>> asyncio.run(work())
-        Start
-        End
-
-        >>> def process():
-        ...     logger.info("Message sent from the child")
-        ...     logger.complete()
-        ...
-        >>> logger.add(sys.stderr, enqueue=True)
-        1
-        >>> process = multiprocessing.Process(target=process)
-        >>> process.start()
-        >>> process.join()
-        Message sent from the child
-        """
-        pass
+        """Wait for the end of enqueued messages and asynchronous tasks scheduled by handlers."""
+        for handler in self._core.handlers.values():
+            handler.complete()
+        return _asyncio_loop.complete()

     def catch(self, exception=Exception, *, level='ERROR', reraise=False,
         onerror=None, exclude=None, default=None, message=
         "An error has been caught in function '{record[function]}', process '{record[process].name}' ({record[process].id}), thread '{record[thread].name}' ({record[thread].id}):"
         ):
-        """Return a decorator to automatically log possibly caught error in wrapped function.
-
-        This is useful to ensure unexpected exceptions are logged, the entire program can be
-        wrapped by this method. This is also very useful to decorate |Thread.run| methods while
-        using threads to propagate errors to the main logger thread.
-
-        Note that the visibility of variables values (which uses the great |better_exceptions|_
-        library from `@Qix-`_) depends on the ``diagnose`` option of each configured sink.
-
-        The returned object can also be used as a context manager.
-
-        Parameters
-        ----------
-        exception : |Exception|, optional
-            The type of exception to intercept. If several types should be caught, a tuple of
-            exceptions can be used too.
-        level : |str| or |int|, optional
-            The level name or severity with which the message should be logged.
-        reraise : |bool|, optional
-            Whether the exception should be raised again and hence propagated to the caller.
-        onerror : |callable|_, optional
-            A function that will be called if an error occurs, once the message has been logged.
-            It should accept the exception instance as it sole argument.
-        exclude : |Exception|, optional
-            A type of exception (or a tuple of types) that will be purposely ignored and hence
-            propagated to the caller without being logged.
-        default : |Any|, optional
-            The value to be returned by the decorated function if an error occurred without being
-            re-raised.
-        message : |str|, optional
-            The message that will be automatically logged if an exception occurs. Note that it will
-            be formatted with the ``record`` attribute.
-
-        Returns
-        -------
-        :term:`decorator` / :term:`context manager`
-            An object that can be used to decorate a function or as a context manager to log
-            exceptions possibly caught.
-
-        Examples
-        --------
-        >>> @logger.catch
-        ... def f(x):
-        ...     100 / x
-        ...
-        >>> def g():
-        ...     f(10)
-        ...     f(0)
-        ...
-        >>> g()
-        ERROR - An error has been caught in function 'g', process 'Main' (367), thread 'ch1' (1398):
-        Traceback (most recent call last):
-          File "program.py", line 12, in <module>
-            g()
-            └ <function g at 0x7f225fe2bc80>
-        > File "program.py", line 10, in g
-            f(0)
-            └ <function f at 0x7f225fe2b9d8>
-          File "program.py", line 6, in f
-            100 / x
-                  └ 0
-        ZeroDivisionError: division by zero
-
-        >>> with logger.catch(message="Because we never know..."):
-        ...    main()  # No exception, no logs
-
-        >>> # Use 'onerror' to prevent the program exit code to be 0 (if 'reraise=False') while
-        >>> # also avoiding the stacktrace to be duplicated on stderr (if 'reraise=True').
-        >>> @logger.catch(onerror=lambda _: sys.exit(1))
-        ... def main():
-        ...     1 / 0
-        """
-        pass
+        """Return a decorator to automatically log possibly caught error in wrapped function."""
+        def decorator(func):
+            @functools.wraps(func)
+            def wrapper(*args, **kwargs):
+                try:
+                    return func(*args, **kwargs)
+                except exclude or ():
+                    raise
+                except exception as e:
+                    self.opt(exception=e).log(level, message)
+                    if onerror:
+                        onerror(e)
+                    if reraise:
+                        raise
+                    return default
+            return wrapper
+        return decorator

     def opt(self, *, exception=None, record=False, lazy=False, colors=False,
         raw=False, capture=True, depth=0, ansi=False):
-        """Parametrize a logging call to slightly change generated log message.
-
-        Note that it's not possible to chain |opt| calls, the last one takes precedence over the
-        others as it will "reset" the options to their default values.
-
-        Parameters
-        ----------
-        exception : |bool|, |tuple| or |Exception|, optional
-            If it does not evaluate as ``False``, the passed exception is formatted and added to the
-            log message. It could be an |Exception| object or a ``(type, value, traceback)`` tuple,
-            otherwise the exception information is retrieved from |sys.exc_info|.
-        record : |bool|, optional
-            If ``True``, the record dict contextualizing the logging call can be used to format the
-            message by using ``{record[key]}`` in the log message.
-        lazy : |bool|, optional
-            If ``True``, the logging call attribute to format the message should be functions which
-            will be called only if the level is high enough. This can be used to avoid expensive
-            functions if not necessary.
-        colors : |bool|, optional
-            If ``True``, logged message will be colorized according to the markups it possibly
-            contains.
-        raw : |bool|, optional
-            If ``True``, the formatting of each sink will be bypassed and the message will be sent
-            as is.
-        capture : |bool|, optional
-            If ``False``, the ``**kwargs`` of logged message will not automatically populate
-            the ``extra`` dict (although they are still used for formatting).
-        depth : |int|, optional
-            Specify which stacktrace should be used to contextualize the logged message. This is
-            useful while using the logger from inside a wrapped function to retrieve worthwhile
-            information.
-        ansi : |bool|, optional
-            Deprecated since version 0.4.1: the ``ansi`` parameter will be removed in Loguru 1.0.0,
-            it is replaced by ``colors`` which is a more appropriate name.
-
-        Returns
-        -------
-        :class:`~Logger`
-            A logger wrapping the core logger, but transforming logged message adequately before
-            sending.
-
-        Examples
-        --------
-        >>> try:
-        ...     1 / 0
-        ... except ZeroDivisionError:
-        ...    logger.opt(exception=True).debug("Exception logged with debug level:")
-        ...
-        [18:10:02] DEBUG in '<module>' - Exception logged with debug level:
-        Traceback (most recent call last, catch point marked):
-        > File "<stdin>", line 2, in <module>
-        ZeroDivisionError: division by zero
-
-        >>> logger.opt(record=True).info("Current line is: {record[line]}")
-        [18:10:33] INFO in '<module>' - Current line is: 1
-
-        >>> logger.opt(lazy=True).debug("If sink <= DEBUG: {x}", x=lambda: math.factorial(2**5))
-        [18:11:19] DEBUG in '<module>' - If sink <= DEBUG: 263130836933693530167218012160000000
-
-        >>> logger.opt(colors=True).warning("We got a <red>BIG</red> problem")
-        [18:11:30] WARNING in '<module>' - We got a BIG problem
-
-        >>> logger.opt(raw=True).debug("No formatting\\n")
-        No formatting
-
-        >>> logger.opt(capture=False).info("Displayed but not captured: {value}", value=123)
-        [18:11:41] Displayed but not captured: 123
-
-        >>> def wrapped():
-        ...     logger.opt(depth=1).info("Get parent context")
-        ...
-        >>> def func():
-        ...     wrapped()
-        ...
-        >>> func()
-        [18:11:54] DEBUG in 'func' - Get parent context
-        """
-        pass
+        """Parametrize a logging call to slightly change generated log message."""
+        options = {
+            'exception': exception,
+            'record': record,
+            'lazy': lazy,
+            'colors': colors or ansi,
+            'raw': raw,
+            'capture': capture,
+            'depth': depth
+        }
+        return Logger(self._core, **options)

     def bind(__self, **kwargs):
-        """Bind attributes to the ``extra`` dict of each logged message record.
-
-        This is used to add custom context to each logging call.
-
-        Parameters
-        ----------
-        **kwargs
-            Mapping between keys and values that will be added to the ``extra`` dict.
-
-        Returns
-        -------
-        :class:`~Logger`
-            A logger wrapping the core logger, but which sends record with the customized ``extra``
-            dict.
-
-        Examples
-        --------
-        >>> logger.add(sys.stderr, format="{extra[ip]} - {message}")
-        >>> class Server:
-        ...     def __init__(self, ip):
-        ...         self.ip = ip
-        ...         self.logger = logger.bind(ip=ip)
-        ...     def call(self, message):
-        ...         self.logger.info(message)
-        ...
-        >>> instance_1 = Server("192.168.0.200")
-        >>> instance_2 = Server("127.0.0.1")
-        >>> instance_1.call("First instance")
-        192.168.0.200 - First instance
-        >>> instance_2.call("Second instance")
-        127.0.0.1 - Second instance
-        """
-        pass
+        """Bind attributes to the ``extra`` dict of each logged message record."""
+        return Logger(__self._core, extra={**__self._core.extra, **kwargs})

     @contextlib.contextmanager
     def contextualize(__self, **kwargs):
-        """Bind attributes to the context-local ``extra`` dict while inside the ``with`` block.
-
-        Contrary to |bind| there is no ``logger`` returned, the ``extra`` dict is modified in-place
-        and updated globally. Most importantly, it uses |contextvars| which means that
-        contextualized values are unique to each threads and asynchronous tasks.
-
-        The ``extra`` dict will retrieve its initial state once the context manager is exited.
-
-        Parameters
-        ----------
-        **kwargs
-            Mapping between keys and values that will be added to the context-local ``extra`` dict.
-
-        Returns
-        -------
-        :term:`context manager` / :term:`decorator`
-            A context manager (usable as a decorator too) that will bind the attributes once entered
-            and restore the initial state of the ``extra`` dict while exited.
-
-        Examples
-        --------
-        >>> logger.add(sys.stderr, format="{message} | {extra}")
-        1
-        >>> def task():
-        ...     logger.info("Processing!")
-        ...
-        >>> with logger.contextualize(task_id=123):
-        ...     task()
-        ...
-        Processing! | {'task_id': 123}
-        >>> logger.info("Done.")
-        Done. | {}
-        """
-        pass
+        """Bind attributes to the context-local ``extra`` dict while inside the ``with`` block."""
+        token = context.set({**context.get(), **kwargs})
+        try:
+            yield
+        finally:
+            context.reset(token)

     def patch(self, patcher):
-        """Attach a function to modify the record dict created by each logging call.
-
-        The ``patcher`` may be used to update the record on-the-fly before it's propagated to the
-        handlers. This allows the "extra" dict to be populated with dynamic values and also permits
-        advanced modifications of the record emitted while logging a message. The function is called
-        once before sending the log message to the different handlers.
-
-        It is recommended to apply modification on the ``record["extra"]`` dict rather than on the
-        ``record`` dict itself, as some values are used internally by `Loguru`, and modify them may
-        produce unexpected results.
-
-        The logger can be patched multiple times. In this case, the functions are called in the
-        same order as they are added.
-
-        Parameters
-        ----------
-        patcher: |callable|_
-            The function to which the record dict will be passed as the sole argument. This function
-            is in charge of updating the record in-place, the function does not need to return any
-            value, the modified record object will be re-used.
-
-        Returns
-        -------
-        :class:`~Logger`
-            A logger wrapping the core logger, but which records are passed through the ``patcher``
-            function before being sent to the added handlers.
-
-        Examples
-        --------
-        >>> logger.add(sys.stderr, format="{extra[utc]} {message}")
-        >>> logger = logger.patch(lambda record: record["extra"].update(utc=datetime.utcnow())
-        >>> logger.info("That's way, you can log messages with time displayed in UTC")
-
-        >>> def wrapper(func):
-        ...     @functools.wraps(func)
-        ...     def wrapped(*args, **kwargs):
-        ...         logger.patch(lambda r: r.update(function=func.__name__)).info("Wrapped!")
-        ...         return func(*args, **kwargs)
-        ...     return wrapped
-
-        >>> def recv_record_from_network(pipe):
-        ...     record = pickle.loads(pipe.read())
-        ...     level, message = record["level"], record["message"]
-        ...     logger.patch(lambda r: r.update(record)).log(level, message)
-        """
-        pass
+        """Attach a function to modify the record dict created by each logging call."""
+        if self._core.patcher:
+            old_patcher = self._core.patcher
+            self._core.patcher = lambda record: patcher(old_patcher(record))
+        else:
+            self._core.patcher = patcher
+        return self

     def level(self, name, no=None, color=None, icon=None):
-        """Add, update or retrieve a logging level.
-
-        Logging levels are defined by their ``name`` to which a severity ``no``, an ansi ``color``
-        tag and an ``icon`` are associated and possibly modified at run-time. To |log| to a custom
-        level, you should necessarily use its name, the severity number is not linked back to levels
-        name (this implies that several levels can share the same severity).
-
-        To add a new level, its ``name`` and its ``no`` are required. A ``color`` and an ``icon``
-        can also be specified or will be empty by default.
-
-        To update an existing level, pass its ``name`` with the parameters to be changed. It is not
-        possible to modify the ``no`` of a level once it has been added.
-
-        To retrieve level information, the ``name`` solely suffices.
-
-        Parameters
-        ----------
-        name : |str|
-            The name of the logging level.
-        no : |int|
-            The severity of the level to be added or updated.
-        color : |str|
-            The color markup of the level to be added or updated.
-        icon : |str|
-            The icon of the level to be added or updated.
-
-        Returns
-        -------
-        ``Level``
-            A |namedtuple| containing information about the level.
-
-        Raises
-        ------
-        ValueError
-            If there is no level registered with such ``name``.
-
-        Examples
-        --------
-        >>> level = logger.level("ERROR")
-        >>> print(level)
-        Level(name='ERROR', no=40, color='<red><bold>', icon='❌')
-        >>> logger.add(sys.stderr, format="{level.no} {level.icon} {message}")
-        1
-        >>> logger.level("CUSTOM", no=15, color="<blue>", icon="@")
-        Level(name='CUSTOM', no=15, color='<blue>', icon='@')
-        >>> logger.log("CUSTOM", "Logging...")
-        15 @ Logging...
-        >>> logger.level("WARNING", icon=r"/!\\")
-        Level(name='WARNING', no=30, color='<yellow><bold>', icon='/!\\\\')
-        >>> logger.warning("Updated!")
-        30 /!\\ Updated!
-        """
-        pass
+        """Add, update or retrieve a logging level."""
+        if no is None and color is None and icon is None:
+            try:
+                return self._core.levels[name]
+            except KeyError:
+                raise ValueError(f"Level '{name}' does not exist") from None
+
+        level = self._core.levels.get(name)
+        if level is None:
+            if no is None:
+                raise ValueError("Level '{name}' does not exist, you have to create it by specifying its severity number")
+            level = Level(name, no, color or "", icon or "")
+            self._core.levels[name] = level
+        else:
+            if no is not None and no != level.no:
+                raise ValueError(f"Level '{name}' already exists with number '{level.no}', you can't modify it")
+            level = level._replace(color=color if color is not None else level.color,
+                                   icon=icon if icon is not None else level.icon)
+            self._core.levels[name] = level
+
+        return level

     def disable(self, name):
-        """Disable logging of messages coming from ``name`` module and its children.
+        """Disable logging of messages coming from ``name`` module and its children."""
+        self._core.activation_list.append((name, False))
+        self._core.activation_none = False

-        Developers of library using `Loguru` should absolutely disable it to avoid disrupting
-        users with unrelated logs messages.
-
-        Note that in some rare circumstances, it is not possible for `Loguru` to
-        determine the module's ``__name__`` value. In such situation, ``record["name"]`` will be
-        equal to ``None``, this is why ``None`` is also a valid argument.
+    def enable(self, name):
+        """Enable logging of messages coming from ``name`` module and its children."""
+        self._core.activation_list.append((name, True))
+        self._core.activation_none = False

-        Parameters
-        ----------
-        name : |str| or ``None``
-            The name of the parent module to disable.
+    def configure(self, *, handlers=None, levels=None, extra=None, patcher=
+        None, activation=None):
+        """Configure the core logger."""
+        if handlers is not None:
+            self.remove()
+            for handler in handlers:
+                self.add(**handler)

-        Examples
-        --------
-        >>> logger.info("Allowed message by default")
-        [22:21:55] Allowed message by default
-        >>> logger.disable("my_library")
-        >>> logger.info("While publishing a library, don't forget to disable logging")
-        """
-        pass
+        if levels is not None:
+            for level in levels:
+                self.level(**level)

-    def enable(self, name):
-        """Enable logging of messages coming from ``name`` module and its children.
+        if extra is not None:
+            self._core.extra = extra

-        Logging is generally disabled by imported library using `Loguru`, hence this function
-        allows users to receive these messages anyway.
+        if patcher is not None:
+            self._core.patcher = patcher

-        To enable all logs regardless of the module they are coming from, an empty string ``""`` can
-        be passed.
+        if activation is not None:
+            self._core.activation_list = list(activation)
+            self._core.activation_none = not activation

-        Parameters
-        ----------
-        name : |str| or ``None``
-            The name of the parent module to re-allow.
+        return list(self._core.handlers.keys())

-        Examples
-        --------
-        >>> logger.disable("__main__")
-        >>> logger.info("Disabled, so nothing is logged.")
-        >>> logger.enable("__main__")
-        >>> logger.info("Re-enabled, messages are logged.")
-        [22:46:12] Re-enabled, messages are logged.
-        """
-        pass
+    @staticmethod
+    def parse(file, pattern, *, cast={}, chunk=2 ** 16):
+        """Parse raw logs and extract each entry as a |dict|."""
+        if isinstance(file, (str, PathLike)):
+            with open(file, 'r') as f:
+                yield from Logger._parse(f, pattern, cast, chunk)
+        else:
+            yield from Logger._parse(file, pattern, cast, chunk)

-    def configure(self, *, handlers=None, levels=None, extra=None, patcher=
-        None, activation=None):
-        """Configure the core logger.
-
-        It should be noted that ``extra`` values set using this function are available across all
-        modules, so this is the best way to set overall default values.
-
-        To load the configuration directly from a file, such as JSON or YAML, it is also possible to
-        use the |loguru-config|_ library developed by `@erezinman`_.
-
-        Parameters
-        ----------
-        handlers : |list| of |dict|, optional
-            A list of each handler to be added. The list should contain dicts of params passed to
-            the |add| function as keyword arguments. If not ``None``, all previously added
-            handlers are first removed.
-        levels : |list| of |dict|, optional
-            A list of each level to be added or updated. The list should contain dicts of params
-            passed to the |level| function as keyword arguments. This will never remove previously
-            created levels.
-        extra : |dict|, optional
-            A dict containing additional parameters bound to the core logger, useful to share
-            common properties if you call |bind| in several of your files modules. If not ``None``,
-            this will remove previously configured ``extra`` dict.
-        patcher : |callable|_, optional
-            A function that will be applied to the record dict of each logged messages across all
-            modules using the logger. It should modify the dict in-place without returning anything.
-            The function is executed prior to the one possibly added by the |patch| method. If not
-            ``None``, this will replace previously configured ``patcher`` function.
-        activation : |list| of |tuple|, optional
-            A list of ``(name, state)`` tuples which denotes which loggers should be enabled (if
-            ``state`` is ``True``) or disabled (if ``state`` is ``False``). The calls to |enable|
-            and |disable| are made accordingly to the list order. This will not modify previously
-            activated loggers, so if you need a fresh start prepend your list with ``("", False)``
-            or ``("", True)``.
-
-        Returns
-        -------
-        :class:`list` of :class:`int`
-            A list containing the identifiers of added sinks (if any).
-
-        Examples
-        --------
-        >>> logger.configure(
-        ...     handlers=[
-        ...         dict(sink=sys.stderr, format="[{time}] {message}"),
-        ...         dict(sink="file.log", enqueue=True, serialize=True),
-        ...     ],
-        ...     levels=[dict(name="NEW", no=13, icon="¤", color="")],
-        ...     extra={"common_to_all": "default"},
-        ...     patcher=lambda record: record["extra"].update(some_value=42),
-        ...     activation=[("my_module.secret", False), ("another_library.module", True)],
-        ... )
-        [1, 2]
-
-        >>> # Set a default "extra" dict to logger across all modules, without "bind()"
-        >>> extra = {"context": "foo"}
-        >>> logger.configure(extra=extra)
-        >>> logger.add(sys.stderr, format="{extra[context]} - {message}")
-        >>> logger.info("Context without bind")
-        >>> # => "foo - Context without bind"
-        >>> logger.bind(context="bar").info("Suppress global context")
-        >>> # => "bar - Suppress global context"
-        """
-        pass
+    @staticmethod
+    def _parse(file, pattern, cast, chunk):
+        regex = re.compile(pattern)
+        buffer = ""
+        while True:
+            new_content = file.read(chunk)
+            if not new_content:
+                if buffer:
+                    match = regex.match(buffer)
+                    if match:
+                        parsed = match.groupdict()
+                        Logger._cast(parsed, cast)
+                        yield parsed
+                break
+            buffer += new_content
+            lines = buffer.splitlines(True)
+            for line in lines:
+                if line[-1] != '\n':
+                    buffer = line
+                    break
+                match = regex.match(line)
+                if match:
+                    parsed = match.groupdict()
+                    Logger._cast(parsed, cast)
+                    yield parsed
+            else:
+                buffer = ""

     @staticmethod
-    def parse(file, pattern, *, cast={}, chunk=2 ** 16):
-        """Parse raw logs and extract each entry as a |dict|.
-
-        The logging format has to be specified as the regex ``pattern``, it will then be
-        used to parse the ``file`` and retrieve each entry based on the named groups present
-        in the regex.
-
-        Parameters
-        ----------
-        file : |str|, |Path| or |file-like object|_
-            The path of the log file to be parsed, or an already opened file object.
-        pattern : |str| or |re.Pattern|_
-            The regex to use for logs parsing, it should contain named groups which will be included
-            in the returned dict.
-        cast : |callable|_ or |dict|, optional
-            A function that should convert in-place the regex groups parsed (a dict of string
-            values) to more appropriate types. If a dict is passed, it should be a mapping between
-            keys of parsed log dict and the function that should be used to convert the associated
-            value.
-        chunk : |int|, optional
-            The number of bytes read while iterating through the logs, this avoids having to load
-            the whole file in memory.
-
-        Yields
-        ------
-        :class:`dict`
-            The dict mapping regex named groups to matched values, as returned by |match.groupdict|
-            and optionally converted according to ``cast`` argument.
-
-        Examples
-        --------
-        >>> reg = r"(?P<lvl>[0-9]+): (?P<msg>.*)"    # If log format is "{level.no} - {message}"
-        >>> for e in logger.parse("file.log", reg):  # A file line could be "10 - A debug message"
-        ...     print(e)                             # => {'lvl': '10', 'msg': 'A debug message'}
-
-        >>> caster = dict(lvl=int)                   # Parse 'lvl' key as an integer
-        >>> for e in logger.parse("file.log", reg, cast=caster):
-        ...     print(e)                             # => {'lvl': 10, 'msg': 'A debug message'}
-
-        >>> def cast(groups):
-        ...     if "date" in groups:
-        ...         groups["date"] = datetime.strptime(groups["date"], "%Y-%m-%d %H:%M:%S")
-        ...
-        >>> with open("file.log") as file:
-        ...     for log in logger.parse(file, reg, cast=cast):
-        ...         print(log["date"], log["something_else"])
-        """
-        pass
+    def _cast(parsed, cast):
+        if callable(cast):
+            cast(parsed)
+        else:
+            for key, converter in cast.items():
+                if key in parsed:
+                    parsed[key] = converter(parsed[key])

     def trace(__self, __message, *args, **kwargs):
         """Log ``message.format(*args, **kwargs)`` with severity ``'TRACE'``."""
-        pass
+        __self.log('TRACE', __message, *args, **kwargs)

     def debug(__self, __message, *args, **kwargs):
         """Log ``message.format(*args, **kwargs)`` with severity ``'DEBUG'``."""
-        pass
+        __self.log('DEBUG', __message, *args, **kwargs)

     def info(__self, __message, *args, **kwargs):
         """Log ``message.format(*args, **kwargs)`` with severity ``'INFO'``."""
-        pass
+        __self.log('INFO', __message, *args, **kwargs)

     def success(__self, __message, *args, **kwargs):
         """Log ``message.format(*args, **kwargs)`` with severity ``'SUCCESS'``."""
-        pass
+        __self.log('SUCCESS', __message, *args, **kwargs)

     def warning(__self, __message, *args, **kwargs):
         """Log ``message.format(*args, **kwargs)`` with severity ``'WARNING'``."""
-        pass
+        __self.log('WARNING', __message, *args, **kwargs)

     def error(__self, __message, *args, **kwargs):
         """Log ``message.format(*args, **kwargs)`` with severity ``'ERROR'``."""
-        pass
+        __self.log('ERROR', __message, *args, **kwargs)

     def critical(__self, __message, *args, **kwargs):
         """Log ``message.format(*args, **kwargs)`` with severity ``'CRITICAL'``."""
-        pass
+        __self.log('CRITICAL', __message, *args, **kwargs)

     def exception(__self, __message, *args, **kwargs):
         """Convenience method for logging an ``'ERROR'`` with exception information."""
-        pass
+        __self.opt(exception=True).error(__message, *args, **kwargs)

     def log(__self, __level, __message, *args, **kwargs):
         """Log ``message.format(*args, **kwargs)`` with severity ``level``."""
-        pass
+        level = __self._core.levels_lookup.get(__level)
+        if level is None:
+            raise ValueError("Level '{0}' does not exist".format(__level))
+
+        frame = get_frame(__self._options.get('depth', 0) + 1)
+        name = frame.f_globals.get('__name__')
+
+        if not __self._core.enabled.get(name, __self._core.activation_none):
+            return
+
+        if level[2] < __self._core.min_level:
+            return
+
+        if __self._options.get('lazy', False):
+            args = [arg() if callable(arg) else arg for arg in args]
+            kwargs = {key: value() if callable(value) else value for key, value in kwargs.items()}
+
+        record = {
+            'elapsed': aware_now() - start_time,
+            'exception': None,
+            'extra': {**__self._core.extra, **context.get(), **(__self._options.get('extra', {}) or {})},
+            'file': RecordFile(frame),
+            'function': frame.f_code.co_name,
+            'level': RecordLevel(*level),
+            'line': frame.f_lineno,
+            'message': __message,
+            'module': splitext(basename(frame.f_code.co_filename))[0],
+            'name': name,
+            'process': RecordProcess(current_process()),
+            'thread': RecordThread(current_thread()),
+            'time': aware_now(),
+        }
+
+        if __self._options.get('record', False):
+            record['message'] = record['message'].format(*args, record=record, **kwargs)
+        else:
+            record['message'] = record['message'].format(*args, **kwargs)
+
+        exception = __self._options.get('exception')
+        if exception:
+            if isinstance(exception, BaseException):
+                type_, value, traceback = type(exception), exception, exception.__traceback__
+            elif isinstance(exception, tuple):
+                type_, value, traceback = exception
+            else:
+                type_, value, traceback = sys.exc_info()
+            record['exception'] = RecordException(type_, value, traceback)
+
+        if __self._core.patcher:
+            __self._core.patcher(record)
+
+        for handler in __self._core.handlers.values():
+            handler.emit(record, __self._options.get('raw', False), __self._options.get('colors', False))

     def start(self, *args, **kwargs):
-        """Deprecated function to |add| a new handler.
-
-        Warnings
-        --------
-        .. deprecated:: 0.2.2
-          ``start()`` will be removed in Loguru 1.0.0, it is replaced by ``add()`` which is a less
-          confusing name.
-        """
-        pass
+        """Deprecated function to |add| a new handler."""
+        warnings.warn("The 'start()' method is deprecated, use 'add()' instead", DeprecationWarning, stacklevel=2)
+        return self.add(*args, **kwargs)

     def stop(self, *args, **kwargs):
-        """Deprecated function to |remove| an existing handler.
-
-        Warnings
-        --------
-        .. deprecated:: 0.2.2
-          ``stop()`` will be removed in Loguru 1.0.0, it is replaced by ``remove()`` which is a less
-          confusing name.
-        """
-        pass
+        """Deprecated function to |remove| an existing handler."""
+        warnings.warn("The 'stop()' method is deprecated, use 'remove()' instead", DeprecationWarning, stacklevel=2)
+        return self.remove(*args, **kwargs)
diff --git a/loguru/_string_parsers.py b/loguru/_string_parsers.py
index 520211d..a46b518 100644
--- a/loguru/_string_parsers.py
+++ b/loguru/_string_parsers.py
@@ -3,4 +3,63 @@ import re


 class Frequencies:
-    pass
+    DAILY = "daily"
+    WEEKLY = "weekly"
+    MONTHLY = "monthly"
+    YEARLY = "yearly"
+
+
+def parse_frequency(frequency):
+    """Parse a frequency string into a timedelta object."""
+    if frequency == Frequencies.DAILY:
+        return datetime.timedelta(days=1)
+    elif frequency == Frequencies.WEEKLY:
+        return datetime.timedelta(weeks=1)
+    elif frequency == Frequencies.MONTHLY:
+        return datetime.timedelta(days=30)  # Approximation
+    elif frequency == Frequencies.YEARLY:
+        return datetime.timedelta(days=365)  # Approximation
+    else:
+        raise ValueError(f"Invalid frequency: {frequency}")
+
+
+def parse_size(size):
+    """Parse a size string (e.g., '5 MB') into bytes."""
+    units = {
+        'B': 1,
+        'KB': 1024,
+        'MB': 1024 * 1024,
+        'GB': 1024 * 1024 * 1024,
+        'TB': 1024 * 1024 * 1024 * 1024
+    }
+    
+    match = re.match(r'^(\d+(?:\.\d+)?)\s*([BKMGT]B)$', size.strip(), re.IGNORECASE)
+    if not match:
+        raise ValueError(f"Invalid size format: {size}")
+    
+    value, unit = match.groups()
+    return int(float(value) * units[unit.upper()])
+
+
+def parse_duration(duration):
+    """Parse a duration string (e.g., '1h 30m') into a timedelta object."""
+    total_seconds = 0
+    pattern = r'(\d+)\s*([dhms])'
+    
+    for match in re.finditer(pattern, duration, re.IGNORECASE):
+        value, unit = match.groups()
+        value = int(value)
+        
+        if unit.lower() == 'd':
+            total_seconds += value * 86400
+        elif unit.lower() == 'h':
+            total_seconds += value * 3600
+        elif unit.lower() == 'm':
+            total_seconds += value * 60
+        elif unit.lower() == 's':
+            total_seconds += value
+    
+    if total_seconds == 0:
+        raise ValueError(f"Invalid duration format: {duration}")
+    
+    return datetime.timedelta(seconds=total_seconds)