Skip to content

back to Reference (Gold) summary

Reference (Gold): seaborn

Pytest Summary for test tests

status count
passed 2362
skipped 11
xfailed 6
failed 1
total 2380
collected 2380

Failed pytests:

test_scales.py::TestNominal::test_color_numeric_int_float_mix

test_scales.py::TestNominal::test_color_numeric_int_float_mix
[gw4] linux -- Python 3.12.6 /testbed/.venv/bin/python3

test_plot.py::TestScaling::test_log_scale_name

test_plot.py::TestScaling::test_log_scale_name
[gw1] linux -- Python 3.12.6 /testbed/.venv/bin/python3

test_plot.py::TestLayerAddition::test_stat_default

test_plot.py::TestLayerAddition::test_stat_default
[gw0] linux -- Python 3.12.6 /testbed/.venv/bin/python3

test_plot.py::TestScaling::test_identity_mapping_color_strings

test_plot.py::TestScaling::test_identity_mapping_color_strings
[gw1] linux -- Python 3.12.6 /testbed/.venv/bin/python3

test_plot.py::TestScaling::test_undefined_variable_raises

test_plot.py::TestScaling::test_undefined_variable_raises
[gw1] linux -- Python 3.12.6 /testbed/.venv/bin/python3

test_plot.py::TestScaling::test_categorical_as_datetime

test_plot.py::TestScaling::test_categorical_as_datetime
[gw0] linux -- Python 3.12.6 /testbed/.venv/bin/python3

test_distributions.py::TestKDEPlotBivariate::test_weights

test_distributions.py::TestKDEPlotBivariate::test_weights
[gw1] linux -- Python 3.12.6 /testbed/.venv/bin/python3

Patch diff

diff --git a/seaborn/_base.py b/seaborn/_base.py
index d7a46b61..0b435231 100644
--- a/seaborn/_base.py
+++ b/seaborn/_base.py
@@ -6,30 +6,76 @@ from collections import UserString
 from collections.abc import Iterable, Sequence, Mapping
 from numbers import Number
 from datetime import datetime
+
 import numpy as np
 import pandas as pd
 import matplotlib as mpl
+
 from seaborn._core.data import PlotData
-from seaborn.palettes import QUAL_PALETTES, color_palette
-from seaborn.utils import _check_argument, _version_predates, desaturate, locator_to_legend_entries, get_color_cycle, remove_na
+from seaborn.palettes import (
+    QUAL_PALETTES,
+    color_palette,
+)
+from seaborn.utils import (
+    _check_argument,
+    _version_predates,
+    desaturate,
+    locator_to_legend_entries,
+    get_color_cycle,
+    remove_na,
+)


 class SemanticMapping:
     """Base class for mapping data values to plot attributes."""
+
+    # -- Default attributes that all SemanticMapping subclasses must set
+
+    # Whether the mapping is numeric, categorical, or datetime
     map_type: str | None = None
+
+    # Ordered list of unique values in the input data
     levels = None
+
+    # A mapping from the data values to corresponding plot attributes
     lookup_table = None

     def __init__(self, plotter):
+
+        # TODO Putting this here so we can continue to use a lot of the
+        # logic that's built into the library, but the idea of this class
+        # is to move towards semantic mappings that are agnostic about the
+        # kind of plot they're going to be used to draw.
+        # Fully achieving that is going to take some thinking.
         self.plotter = plotter

     def _check_list_length(self, levels, values, variable):
         """Input check when values are provided as a list."""
-        pass
+        # Copied from _core/properties; eventually will be replaced for that.
+        message = ""
+        if len(levels) > len(values):
+            message = " ".join([
+                f"\nThe {variable} list has fewer values ({len(values)})",
+                f"than needed ({len(levels)}) and will cycle, which may",
+                "produce an uninterpretable plot."
+            ])
+            values = [x for _, x in zip(levels, itertools.cycle(values))]
+
+        elif len(values) > len(levels):
+            message = " ".join([
+                f"The {variable} list has more values ({len(values)})",
+                f"than needed ({len(levels)}), which may not be intended.",
+            ])
+            values = values[:len(levels)]
+
+        if message:
+            warnings.warn(message, UserWarning, stacklevel=6)
+
+        return values

     def _lookup_single(self, key):
         """Apply the mapping to a single data value."""
-        pass
+        return self.lookup_table[key]

     def __call__(self, key, *args, **kwargs):
         """Get the attribute(s) values for the data key."""
@@ -41,12 +87,18 @@ class SemanticMapping:

 class HueMapping(SemanticMapping):
     """Mapping that sets artist colors according to data values."""
+    # A specification of the colors that should appear in the plot
     palette = None
+
+    # An object that normalizes data values to [0, 1] range for color mapping
     norm = None
+
+    # A continuous colormap object for interpolating in a numeric context
     cmap = None

-    def __init__(self, plotter, palette=None, order=None, norm=None,
-        saturation=1):
+    def __init__(
+        self, plotter, palette=None, order=None, norm=None, saturation=1,
+    ):
         """Map the levels of the `hue` variable to distinct colors.

         Parameters
@@ -55,34 +107,61 @@ class HueMapping(SemanticMapping):

         """
         super().__init__(plotter)
-        data = plotter.plot_data.get('hue', pd.Series(dtype=float))
+
+        data = plotter.plot_data.get("hue", pd.Series(dtype=float))
+
         if isinstance(palette, np.ndarray):
             msg = (
-                'Numpy array is not a supported type for `palette`. Please convert your palette to a list. This will become an error in v0.14'
-                )
+                "Numpy array is not a supported type for `palette`. "
+                "Please convert your palette to a list. "
+                "This will become an error in v0.14"
+            )
             warnings.warn(msg, stacklevel=4)
             palette = palette.tolist()
+
         if data.isna().all():
             if palette is not None:
-                msg = (
-                    'Ignoring `palette` because no `hue` variable has been assigned.'
-                    )
+                msg = "Ignoring `palette` because no `hue` variable has been assigned."
                 warnings.warn(msg, stacklevel=4)
         else:
-            map_type = self.infer_map_type(palette, norm, plotter.
-                input_format, plotter.var_types['hue'])
-            if map_type == 'numeric':
+
+            map_type = self.infer_map_type(
+                palette, norm, plotter.input_format, plotter.var_types["hue"]
+            )
+
+            # Our goal is to end up with a dictionary mapping every unique
+            # value in `data` to a color. We will also keep track of the
+            # metadata about this mapping we will need for, e.g., a legend
+
+            # --- Option 1: numeric mapping with a matplotlib colormap
+
+            if map_type == "numeric":
+
                 data = pd.to_numeric(data)
-                levels, lookup_table, norm, cmap = self.numeric_mapping(data,
-                    palette, norm)
-            elif map_type == 'categorical':
+                levels, lookup_table, norm, cmap = self.numeric_mapping(
+                    data, palette, norm,
+                )
+
+            # --- Option 2: categorical mapping using seaborn palette
+
+            elif map_type == "categorical":
+
                 cmap = norm = None
-                levels, lookup_table = self.categorical_mapping(data,
-                    palette, order)
+                levels, lookup_table = self.categorical_mapping(
+                    data, palette, order,
+                )
+
+            # --- Option 3: datetime mapping
+
             else:
+                # TODO this needs actual implementation
                 cmap = norm = None
-                levels, lookup_table = self.categorical_mapping(list(data),
-                    palette, order)
+                levels, lookup_table = self.categorical_mapping(
+                    # Casting data to list to handle differences in the way
+                    # pandas and numpy represent datetime64 data
+                    list(data), palette, order,
+                )
+
             self.saturation = saturation
             self.map_type = map_type
             self.lookup_table = lookup_table
@@ -93,26 +172,137 @@ class HueMapping(SemanticMapping):

     def _lookup_single(self, key):
         """Get the color for a single value, using colormap to interpolate."""
-        pass
+        try:
+            # Use a value that's in the original data vector
+            value = self.lookup_table[key]
+        except KeyError:
+
+            if self.norm is None:
+                # Currently we only get here in scatterplot with hue_order,
+                # because scatterplot does not consider hue a grouping variable
+                # So unused hue levels are in the data, but not the lookup table
+                return (0, 0, 0, 0)
+
+            # Use the colormap to interpolate between existing datapoints
+            # (e.g. in the context of making a continuous legend)
+            try:
+                normed = self.norm(key)
+            except TypeError as err:
+                if np.isnan(key):
+                    value = (0, 0, 0, 0)
+                else:
+                    raise err
+            else:
+                if np.ma.is_masked(normed):
+                    normed = np.nan
+                value = self.cmap(normed)
+
+        if self.saturation < 1:
+            value = desaturate(value, self.saturation)
+
+        return value

     def infer_map_type(self, palette, norm, input_format, var_type):
         """Determine how to implement the mapping."""
-        pass
+        if palette in QUAL_PALETTES:
+            map_type = "categorical"
+        elif norm is not None:
+            map_type = "numeric"
+        elif isinstance(palette, (dict, list)):
+            map_type = "categorical"
+        elif input_format == "wide":
+            map_type = "categorical"
+        else:
+            map_type = var_type
+
+        return map_type

     def categorical_mapping(self, data, palette, order):
         """Determine colors when the hue mapping is categorical."""
-        pass
+        # -- Identify the order and name of the levels
+
+        levels = categorical_order(data, order)
+        n_colors = len(levels)
+
+        # -- Identify the set of colors to use
+
+        if isinstance(palette, dict):
+
+            missing = set(levels) - set(palette)
+            if any(missing):
+                err = "The palette dictionary is missing keys: {}"
+                raise ValueError(err.format(missing))
+
+            lookup_table = palette
+
+        else:
+
+            if palette is None:
+                if n_colors <= len(get_color_cycle()):
+                    colors = color_palette(None, n_colors)
+                else:
+                    colors = color_palette("husl", n_colors)
+            elif isinstance(palette, list):
+                colors = self._check_list_length(levels, palette, "palette")
+            else:
+                colors = color_palette(palette, n_colors)
+
+            lookup_table = dict(zip(levels, colors))
+
+        return levels, lookup_table

     def numeric_mapping(self, data, palette, norm):
         """Determine colors when the hue variable is quantitative."""
-        pass
+        if isinstance(palette, dict):
+
+            # The presence of a norm object overrides a dictionary of hues
+            # in specifying a numeric mapping, so we need to process it here.
+            levels = list(sorted(palette))
+            colors = [palette[k] for k in sorted(palette)]
+            cmap = mpl.colors.ListedColormap(colors)
+            lookup_table = palette.copy()
+
+        else:
+
+            # The levels are the sorted unique values in the data
+            levels = list(np.sort(remove_na(data.unique())))
+
+            # --- Sort out the colormap to use from the palette argument
+
+            # Default numeric palette is our default cubehelix palette
+            # TODO do we want to do something complicated to ensure contrast?
+            palette = "ch:" if palette is None else palette
+
+            if isinstance(palette, mpl.colors.Colormap):
+                cmap = palette
+            else:
+                cmap = color_palette(palette, as_cmap=True)
+
+            # Now sort out the data normalization
+            if norm is None:
+                norm = mpl.colors.Normalize()
+            elif isinstance(norm, tuple):
+                norm = mpl.colors.Normalize(*norm)
+            elif not isinstance(norm, mpl.colors.Normalize):
+                err = "``hue_norm`` must be None, tuple, or Normalize object."
+                raise ValueError(err)
+
+            if not norm.scaled():
+                norm(np.asarray(data.dropna()))
+
+            lookup_table = dict(zip(levels, cmap(norm(levels))))
+
+        return levels, lookup_table, norm, cmap


 class SizeMapping(SemanticMapping):
     """Mapping that sets artist sizes according to data values."""
+    # An object that normalizes data values to [0, 1] range
     norm = None

-    def __init__(self, plotter, sizes=None, order=None, norm=None):
+    def __init__(
+        self, plotter, sizes=None, order=None, norm=None,
+    ):
         """Map the levels of the `size` variable to distinct values.

         Parameters
@@ -121,21 +311,44 @@ class SizeMapping(SemanticMapping):

         """
         super().__init__(plotter)
-        data = plotter.plot_data.get('size', pd.Series(dtype=float))
+
+        data = plotter.plot_data.get("size", pd.Series(dtype=float))
+
         if data.notna().any():
-            map_type = self.infer_map_type(norm, sizes, plotter.var_types[
-                'size'])
-            if map_type == 'numeric':
+
+            map_type = self.infer_map_type(
+                norm, sizes, plotter.var_types["size"]
+            )
+
+            # --- Option 1: numeric mapping
+
+            if map_type == "numeric":
+
                 levels, lookup_table, norm, size_range = self.numeric_mapping(
-                    data, sizes, norm)
-            elif map_type == 'categorical':
-                levels, lookup_table = self.categorical_mapping(data, sizes,
-                    order)
+                    data, sizes, norm,
+                )
+
+            # --- Option 2: categorical mapping
+
+            elif map_type == "categorical":
+
+                levels, lookup_table = self.categorical_mapping(
+                    data, sizes, order,
+                )
                 size_range = None
+
+            # --- Option 3: datetime mapping
+
+            # TODO this needs an actual implementation
             else:
-                levels, lookup_table = self.categorical_mapping(list(data),
-                    sizes, order)
+
+                levels, lookup_table = self.categorical_mapping(
+                    # Casting data to list to handle differences in the way
+                    # pandas and numpy represent datetime64 data
+                    list(data), sizes, order,
+                )
                 size_range = None
+
             self.map_type = map_type
             self.levels = levels
             self.norm = norm
@@ -143,10 +356,171 @@ class SizeMapping(SemanticMapping):
             self.size_range = size_range
             self.lookup_table = lookup_table

+    def infer_map_type(self, norm, sizes, var_type):
+
+        if norm is not None:
+            map_type = "numeric"
+        elif isinstance(sizes, (dict, list)):
+            map_type = "categorical"
+        else:
+            map_type = var_type
+
+        return map_type
+
+    def _lookup_single(self, key):
+
+        try:
+            value = self.lookup_table[key]
+        except KeyError:
+            normed = self.norm(key)
+            if np.ma.is_masked(normed):
+                normed = np.nan
+            value = self.size_range[0] + normed * np.ptp(self.size_range)
+        return value
+
+    def categorical_mapping(self, data, sizes, order):
+
+        levels = categorical_order(data, order)
+
+        if isinstance(sizes, dict):
+
+            # Dict inputs map existing data values to the size attribute
+            missing = set(levels) - set(sizes)
+            if any(missing):
+                err = f"Missing sizes for the following levels: {missing}"
+                raise ValueError(err)
+            lookup_table = sizes.copy()
+
+        elif isinstance(sizes, list):
+
+            # List inputs give size values in the same order as the levels
+            sizes = self._check_list_length(levels, sizes, "sizes")
+            lookup_table = dict(zip(levels, sizes))
+
+        else:
+
+            if isinstance(sizes, tuple):
+
+                # Tuple input sets the min, max size values
+                if len(sizes) != 2:
+                    err = "A `sizes` tuple must have only 2 values"
+                    raise ValueError(err)
+
+            elif sizes is not None:
+
+                err = f"Value for `sizes` not understood: {sizes}"
+                raise ValueError(err)
+
+            else:
+
+                # Otherwise, we need to get the min, max size values from
+                # the plotter object we are attached to.
+
+                # TODO this is going to cause us trouble later, because we
+                # want to restructure things so that the plotter is generic
+                # across the visual representation of the data. But at this
+                # point, we don't know the visual representation. Likely we
+                # want to change the logic of this Mapping so that it gives
+                # points on a normalized range that then gets un-normalized
+                # when we know what we're drawing. But given the way the
+                # package works now, this way is cleanest.
+                sizes = self.plotter._default_size_range
+
+            # For categorical sizes, use regularly-spaced linear steps
+            # between the minimum and maximum sizes. Then reverse the
+            # ramp so that the largest value is used for the first entry
+            # in size_order, etc. This is because "ordered" categories
+            # are often though to go in decreasing priority.
+            sizes = np.linspace(*sizes, len(levels))[::-1]
+            lookup_table = dict(zip(levels, sizes))
+
+        return levels, lookup_table
+
+    def numeric_mapping(self, data, sizes, norm):
+
+        if isinstance(sizes, dict):
+            # The presence of a norm object overrides a dictionary of sizes
+            # in specifying a numeric mapping, so we need to process it
+            # dictionary here
+            levels = list(np.sort(list(sizes)))
+            size_values = sizes.values()
+            size_range = min(size_values), max(size_values)
+
+        else:
+
+            # The levels here will be the unique values in the data
+            levels = list(np.sort(remove_na(data.unique())))
+
+            if isinstance(sizes, tuple):
+
+                # For numeric inputs, the size can be parametrized by
+                # the minimum and maximum artist values to map to. The
+                # norm object that gets set up next specifies how to
+                # do the mapping.
+
+                if len(sizes) != 2:
+                    err = "A `sizes` tuple must have only 2 values"
+                    raise ValueError(err)
+
+                size_range = sizes
+
+            elif sizes is not None:
+
+                err = f"Value for `sizes` not understood: {sizes}"
+                raise ValueError(err)
+
+            else:
+
+                # When not provided, we get the size range from the plotter
+                # object we are attached to. See the note in the categorical
+                # method about how this is suboptimal for future development.
+                size_range = self.plotter._default_size_range
+
+        # Now that we know the minimum and maximum sizes that will get drawn,
+        # we need to map the data values that we have into that range. We will
+        # use a matplotlib Normalize class, which is typically used for numeric
+        # color mapping but works fine here too. It takes data values and maps
+        # them into a [0, 1] interval, potentially nonlinear-ly.
+
+        if norm is None:
+            # Default is a linear function between the min and max data values
+            norm = mpl.colors.Normalize()
+        elif isinstance(norm, tuple):
+            # It is also possible to give different limits in data space
+            norm = mpl.colors.Normalize(*norm)
+        elif not isinstance(norm, mpl.colors.Normalize):
+            err = f"Value for size `norm` parameter not understood: {norm}"
+            raise ValueError(err)
+        else:
+            # If provided with Normalize object, copy it so we can modify
+            norm = copy(norm)
+
+        # Set the mapping so all output values are in [0, 1]
+        norm.clip = True
+
+        # If the input range is not set, use the full range of the data
+        if not norm.scaled():
+            norm(levels)
+
+        # Map from data values to [0, 1] range
+        sizes_scaled = norm(levels)
+
+        # Now map from the scaled range into the artist units
+        if isinstance(sizes, dict):
+            lookup_table = sizes
+        else:
+            lo, hi = size_range
+            sizes = lo + sizes_scaled * (hi - lo)
+            lookup_table = dict(zip(levels, sizes))
+
+        return levels, lookup_table, norm, size_range
+

 class StyleMapping(SemanticMapping):
     """Mapping that sets artist style according to data values."""
-    map_type = 'categorical'
+
+    # Style mapping is always treated as categorical
+    map_type = "categorical"

     def __init__(self, plotter, markers=None, dashes=None, order=None):
         """Map the levels of the `style` variable to distinct values.
@@ -157,15 +531,26 @@ class StyleMapping(SemanticMapping):

         """
         super().__init__(plotter)
-        data = plotter.plot_data.get('style', pd.Series(dtype=float))
+
+        data = plotter.plot_data.get("style", pd.Series(dtype=float))
+
         if data.notna().any():
-            if variable_type(data) == 'datetime':
+
+            # Cast to list to handle numpy/pandas datetime quirks
+            if variable_type(data) == "datetime":
                 data = list(data)
+
+            # Find ordered unique values
             levels = categorical_order(data, order)
-            markers = self._map_attributes(markers, levels, unique_markers(
-                len(levels)), 'markers')
-            dashes = self._map_attributes(dashes, levels, unique_dashes(len
-                (levels)), 'dashes')
+
+            markers = self._map_attributes(
+                markers, levels, unique_markers(len(levels)), "markers",
+            )
+            dashes = self._map_attributes(
+                dashes, levels, unique_dashes(len(levels)), "dashes",
+            )
+
+            # Build the paths matplotlib will use to draw the markers
             paths = {}
             filled_markers = []
             for k, m in markers.items():
@@ -173,48 +558,92 @@ class StyleMapping(SemanticMapping):
                     m = mpl.markers.MarkerStyle(m)
                 paths[k] = m.get_path().transformed(m.get_transform())
                 filled_markers.append(m.is_filled())
+
+            # Mixture of filled and unfilled markers will show line art markers
+            # in the edge color, which defaults to white. This can be handled,
+            # but there would be additional complexity with specifying the
+            # weight of the line art markers without overwhelming the filled
+            # ones with the edges. So for now, we will disallow mixtures.
             if any(filled_markers) and not all(filled_markers):
-                err = 'Filled and line art markers cannot be mixed'
+                err = "Filled and line art markers cannot be mixed"
                 raise ValueError(err)
+
             lookup_table = {}
             for key in levels:
                 lookup_table[key] = {}
                 if markers:
-                    lookup_table[key]['marker'] = markers[key]
-                    lookup_table[key]['path'] = paths[key]
+                    lookup_table[key]["marker"] = markers[key]
+                    lookup_table[key]["path"] = paths[key]
                 if dashes:
-                    lookup_table[key]['dashes'] = dashes[key]
+                    lookup_table[key]["dashes"] = dashes[key]
+
             self.levels = levels
             self.lookup_table = lookup_table

     def _lookup_single(self, key, attr=None):
         """Get attribute(s) for a given data point."""
-        pass
+        if attr is None:
+            value = self.lookup_table[key]
+        else:
+            value = self.lookup_table[key][attr]
+        return value

     def _map_attributes(self, arg, levels, defaults, attr):
         """Handle the specification for a given style attribute."""
-        pass
+        if arg is True:
+            lookup_table = dict(zip(levels, defaults))
+        elif isinstance(arg, dict):
+            missing = set(levels) - set(arg)
+            if missing:
+                err = f"These `{attr}` levels are missing values: {missing}"
+                raise ValueError(err)
+            lookup_table = arg
+        elif isinstance(arg, Sequence):
+            arg = self._check_list_length(levels, arg, attr)
+            lookup_table = dict(zip(levels, arg))
+        elif arg:
+            err = f"This `{attr}` argument was not understood: {arg}"
+            raise ValueError(err)
+        else:
+            lookup_table = {}
+
+        return lookup_table
+
+
+# =========================================================================== #


 class VectorPlotter:
     """Base class for objects underlying *plot functions."""
-    wide_structure = {'x': '@index', 'y': '@values', 'hue': '@columns',
-        'style': '@columns'}
-    flat_structure = {'x': '@index', 'y': '@values'}
-    _default_size_range = 1, 2
+
+    wide_structure = {
+        "x": "@index", "y": "@values", "hue": "@columns", "style": "@columns",
+    }
+    flat_structure = {"x": "@index", "y": "@values"}
+
+    _default_size_range = 1, 2  # Unused but needed in tests, ugh

     def __init__(self, data=None, variables={}):
+
         self._var_levels = {}
-        self._var_ordered = {'x': False, 'y': False}
+        # var_ordered is relevant only for categorical axis variables, and may
+        # be better handled by an internal axis information object that tracks
+        # such information and is set up by the scale_* methods. The analogous
+        # information for numeric axes would be information about log scales.
+        self._var_ordered = {"x": False, "y": False}  # alt., used DefaultDict
         self.assign_variables(data, variables)
-        for var in ['hue', 'size', 'style']:
+
+        # TODO Lots of tests assume that these are called to initialize the
+        # mappings to default values on class initialization. I'd prefer to
+        # move away from that and only have a mapping when explicitly called.
+        for var in ["hue", "size", "style"]:
             if var in variables:
-                getattr(self, f'map_{var}')()
+                getattr(self, f"map_{var}")()

     @property
     def has_xy_data(self):
         """Return True at least one of x or y is defined."""
-        pass
+        return bool({"x", "y"} & set(self.variables))

     @property
     def var_levels(self):
@@ -229,11 +658,39 @@ class VectorPlotter:
         tracking plot variables.

         """
-        pass
+        for var in self.variables:
+            if (map_obj := getattr(self, f"_{var}_map", None)) is not None:
+                self._var_levels[var] = map_obj.levels
+        return self._var_levels

     def assign_variables(self, data=None, variables={}):
         """Define plot variables, optionally using lookup from `data`."""
-        pass
+        x = variables.get("x", None)
+        y = variables.get("y", None)
+
+        if x is None and y is None:
+            self.input_format = "wide"
+            frame, names = self._assign_variables_wideform(data, **variables)
+        else:
+            # When dealing with long-form input, use the newer PlotData
+            # object (internal but introduced for the objects interface)
+            # to centralize / standardize data consumption logic.
+            self.input_format = "long"
+            plot_data = PlotData(data, variables)
+            frame = plot_data.frame
+            names = plot_data.names
+
+        self.plot_data = frame
+        self.variables = names
+        self.var_types = {
+            v: variable_type(
+                frame[v],
+                boolean_type="numeric" if v in "xy" else "categorical"
+            )
+            for v in names
+        }
+
+        return self

     def _assign_variables_wideform(self, data=None, **kwargs):
         """Define plot variables given wide-form data.
@@ -257,10 +714,143 @@ class VectorPlotter:
             the inputs (or None when no name can be determined).

         """
-        pass
+        # Raise if semantic or other variables are assigned in wide-form mode
+        assigned = [k for k, v in kwargs.items() if v is not None]
+        if any(assigned):
+            s = "s" if len(assigned) > 1 else ""
+            err = f"The following variable{s} cannot be assigned with wide-form data: "
+            err += ", ".join(f"`{v}`" for v in assigned)
+            raise ValueError(err)
+
+        # Determine if the data object actually has any data in it
+        empty = data is None or not len(data)
+
+        # Then, determine if we have "flat" data (a single vector)
+        if isinstance(data, dict):
+            values = data.values()
+        else:
+            values = np.atleast_1d(np.asarray(data, dtype=object))
+        flat = not any(
+            isinstance(v, Iterable) and not isinstance(v, (str, bytes))
+            for v in values
+        )
+
+        if empty:
+
+            # Make an object with the structure of plot_data, but empty
+            plot_data = pd.DataFrame()
+            variables = {}
+
+        elif flat:
+
+            # Handle flat data by converting to pandas Series and using the
+            # index and/or values to define x and/or y
+            # (Could be accomplished with a more general to_series() interface)
+            flat_data = pd.Series(data).copy()
+            names = {
+                "@values": flat_data.name,
+                "@index": flat_data.index.name
+            }
+
+            plot_data = {}
+            variables = {}
+
+            for var in ["x", "y"]:
+                if var in self.flat_structure:
+                    attr = self.flat_structure[var]
+                    plot_data[var] = getattr(flat_data, attr[1:])
+                    variables[var] = names[self.flat_structure[var]]

-    def iter_data(self, grouping_vars=None, *, reverse=False,
-        from_comp_data=False, by_facet=True, allow_empty=False, dropna=True):
+            plot_data = pd.DataFrame(plot_data)
+
+        else:
+
+            # Otherwise assume we have some collection of vectors.
+
+            # Handle Python sequences such that entries end up in the columns,
+            # not in the rows, of the intermediate wide DataFrame.
+            # One way to accomplish this is to convert to a dict of Series.
+            if isinstance(data, Sequence):
+                data_dict = {}
+                for i, var in enumerate(data):
+                    key = getattr(var, "name", i)
+                    # TODO is there a safer/more generic way to ensure Series?
+                    # sort of like np.asarray, but for pandas?
+                    data_dict[key] = pd.Series(var)
+
+                data = data_dict
+
+            # Pandas requires that dict values either be Series objects
+            # or all have the same length, but we want to allow "ragged" inputs
+            if isinstance(data, Mapping):
+                data = {key: pd.Series(val) for key, val in data.items()}
+
+            # Otherwise, delegate to the pandas DataFrame constructor
+            # This is where we'd prefer to use a general interface that says
+            # "give me this data as a pandas DataFrame", so we can accept
+            # DataFrame objects from other libraries
+            wide_data = pd.DataFrame(data, copy=True)
+
+            # At this point we should reduce the dataframe to numeric cols
+            numeric_cols = [
+                k for k, v in wide_data.items() if variable_type(v) == "numeric"
+            ]
+            wide_data = wide_data[numeric_cols]
+
+            # Now melt the data to long form
+            melt_kws = {"var_name": "@columns", "value_name": "@values"}
+            use_index = "@index" in self.wide_structure.values()
+            if use_index:
+                melt_kws["id_vars"] = "@index"
+                try:
+                    orig_categories = wide_data.columns.categories
+                    orig_ordered = wide_data.columns.ordered
+                    wide_data.columns = wide_data.columns.add_categories("@index")
+                except AttributeError:
+                    category_columns = False
+                else:
+                    category_columns = True
+                wide_data["@index"] = wide_data.index.to_series()
+
+            plot_data = wide_data.melt(**melt_kws)
+
+            if use_index and category_columns:
+                plot_data["@columns"] = pd.Categorical(plot_data["@columns"],
+                                                       orig_categories,
+                                                       orig_ordered)
+
+            # Assign names corresponding to plot semantics
+            for var, attr in self.wide_structure.items():
+                plot_data[var] = plot_data[attr]
+
+            # Define the variable names
+            variables = {}
+            for var, attr in self.wide_structure.items():
+                obj = getattr(wide_data, attr[1:])
+                variables[var] = getattr(obj, "name", None)
+
+            # Remove redundant columns from plot_data
+            plot_data = plot_data[list(variables)]
+
+        return plot_data, variables
+
+    def map_hue(self, palette=None, order=None, norm=None, saturation=1):
+        mapping = HueMapping(self, palette, order, norm, saturation)
+        self._hue_map = mapping
+
+    def map_size(self, sizes=None, order=None, norm=None):
+        mapping = SizeMapping(self, sizes, order, norm)
+        self._size_map = mapping
+
+    def map_style(self, markers=None, dashes=None, order=None):
+        mapping = StyleMapping(self, markers, dashes, order)
+        self._style_map = mapping
+
+    def iter_data(
+        self, grouping_vars=None, *,
+        reverse=False, from_comp_data=False,
+        by_facet=True, allow_empty=False, dropna=True,
+    ):
         """Generator for getting subsets of data defined by semantic variables.

         Also injects "col" and "row" into grouping semantics.
@@ -289,18 +879,158 @@ class VectorPlotter:
             Subset of ``plot_data`` for this combination of semantic values.

         """
-        pass
+        # TODO should this default to using all (non x/y?) semantics?
+        # or define grouping vars somewhere?
+        if grouping_vars is None:
+            grouping_vars = []
+        elif isinstance(grouping_vars, str):
+            grouping_vars = [grouping_vars]
+        elif isinstance(grouping_vars, tuple):
+            grouping_vars = list(grouping_vars)
+
+        # Always insert faceting variables
+        if by_facet:
+            facet_vars = {"col", "row"}
+            grouping_vars.extend(
+                facet_vars & set(self.variables) - set(grouping_vars)
+            )
+
+        # Reduce to the semantics used in this plot
+        grouping_vars = [var for var in grouping_vars if var in self.variables]
+
+        if from_comp_data:
+            data = self.comp_data
+        else:
+            data = self.plot_data
+
+        if dropna:
+            data = data.dropna()
+
+        levels = self.var_levels.copy()
+        if from_comp_data:
+            for axis in {"x", "y"} & set(grouping_vars):
+                converter = self.converters[axis].iloc[0]
+                if self.var_types[axis] == "categorical":
+                    if self._var_ordered[axis]:
+                        # If the axis is ordered, then the axes in a possible
+                        # facet grid are by definition "shared", or there is a
+                        # single axis with a unique cat -> idx mapping.
+                        # So we can just take the first converter object.
+                        levels[axis] = converter.convert_units(levels[axis])
+                    else:
+                        # Otherwise, the mappings may not be unique, but we can
+                        # use the unique set of index values in comp_data.
+                        levels[axis] = np.sort(data[axis].unique())
+                else:
+                    transform = converter.get_transform().transform
+                    levels[axis] = transform(converter.convert_units(levels[axis]))
+
+        if grouping_vars:
+
+            grouped_data = data.groupby(
+                grouping_vars, sort=False, as_index=False, observed=False,
+            )
+
+            grouping_keys = []
+            for var in grouping_vars:
+                key = levels.get(var)
+                grouping_keys.append([] if key is None else key)
+
+            iter_keys = itertools.product(*grouping_keys)
+            if reverse:
+                iter_keys = reversed(list(iter_keys))
+
+            for key in iter_keys:
+
+                pd_key = (
+                    key[0] if len(key) == 1 and _version_predates(pd, "2.2.0") else key
+                )
+                try:
+                    data_subset = grouped_data.get_group(pd_key)
+                except KeyError:
+                    # XXX we are adding this to allow backwards compatibility
+                    # with the empty artists that old categorical plots would
+                    # add (before 0.12), which we may decide to break, in which
+                    # case this option could be removed
+                    data_subset = data.loc[[]]
+
+                if data_subset.empty and not allow_empty:
+                    continue
+
+                sub_vars = dict(zip(grouping_vars, key))
+
+                yield sub_vars, data_subset.copy()
+
+        else:
+
+            yield {}, data.copy()

     @property
     def comp_data(self):
         """Dataframe with numeric x and y, after unit conversion and log scaling."""
-        pass
+        if not hasattr(self, "ax"):
+            # Probably a good idea, but will need a bunch of tests updated
+            # Most of these tests should just use the external interface
+            # Then this can be re-enabled.
+            # raise AttributeError("No Axes attached to plotter")
+            return self.plot_data
+
+        if not hasattr(self, "_comp_data"):
+
+            comp_data = (
+                self.plot_data
+                .copy(deep=False)
+                .drop(["x", "y"], axis=1, errors="ignore")
+            )
+
+            for var in "yx":
+                if var not in self.variables:
+                    continue
+
+                parts = []
+                grouped = self.plot_data[var].groupby(self.converters[var], sort=False)
+                for converter, orig in grouped:
+                    orig = orig.mask(orig.isin([np.inf, -np.inf]), np.nan)
+                    orig = orig.dropna()
+                    if var in self.var_levels:
+                        # TODO this should happen in some centralized location
+                        # it is similar to GH2419, but more complicated because
+                        # supporting `order` in categorical plots is tricky
+                        orig = orig[orig.isin(self.var_levels[var])]
+                    comp = pd.to_numeric(converter.convert_units(orig)).astype(float)
+                    transform = converter.get_transform().transform
+                    parts.append(pd.Series(transform(comp), orig.index, name=orig.name))
+                if parts:
+                    comp_col = pd.concat(parts)
+                else:
+                    comp_col = pd.Series(dtype=float, name=var)
+                comp_data.insert(0, var, comp_col)
+
+            self._comp_data = comp_data
+
+        return self._comp_data

     def _get_axes(self, sub_vars):
         """Return an Axes object based on existence of row/col variables."""
-        pass
-
-    def _attach(self, obj, allowed_types=None, log_scale=None):
+        row = sub_vars.get("row", None)
+        col = sub_vars.get("col", None)
+        if row is not None and col is not None:
+            return self.facets.axes_dict[(row, col)]
+        elif row is not None:
+            return self.facets.axes_dict[row]
+        elif col is not None:
+            return self.facets.axes_dict[col]
+        elif self.ax is None:
+            return self.facets.ax
+        else:
+            return self.ax
+
+    def _attach(
+        self,
+        obj,
+        allowed_types=None,
+        log_scale=None,
+    ):
         """Associate the plotter with an Axes manager and initialize its units.

         Parameters
@@ -316,25 +1046,314 @@ class VectorPlotter:
             arguments for the x and y axes.

         """
-        pass
+        from .axisgrid import FacetGrid
+        if isinstance(obj, FacetGrid):
+            self.ax = None
+            self.facets = obj
+            ax_list = obj.axes.flatten()
+            if obj.col_names is not None:
+                self.var_levels["col"] = obj.col_names
+            if obj.row_names is not None:
+                self.var_levels["row"] = obj.row_names
+        else:
+            self.ax = obj
+            self.facets = None
+            ax_list = [obj]
+
+        # Identify which "axis" variables we have defined
+        axis_variables = set("xy").intersection(self.variables)
+
+        # -- Verify the types of our x and y variables here.
+        # This doesn't really make complete sense being here here, but it's a fine
+        # place for it, given  the current system.
+        # (Note that for some plots, there might be more complicated restrictions)
+        # e.g. the categorical plots have their own check that as specific to the
+        # non-categorical axis.
+        if allowed_types is None:
+            allowed_types = ["numeric", "datetime", "categorical"]
+        elif isinstance(allowed_types, str):
+            allowed_types = [allowed_types]
+
+        for var in axis_variables:
+            var_type = self.var_types[var]
+            if var_type not in allowed_types:
+                err = (
+                    f"The {var} variable is {var_type}, but one of "
+                    f"{allowed_types} is required"
+                )
+                raise TypeError(err)
+
+        # -- Get axis objects for each row in plot_data for type conversions and scaling
+
+        facet_dim = {"x": "col", "y": "row"}
+
+        self.converters = {}
+        for var in axis_variables:
+            other_var = {"x": "y", "y": "x"}[var]
+
+            converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)
+            share_state = getattr(self.facets, f"_share{var}", True)
+
+            # Simplest cases are that we have a single axes, all axes are shared,
+            # or sharing is only on the orthogonal facet dimension. In these cases,
+            # all datapoints get converted the same way, so use the first axis
+            if share_state is True or share_state == facet_dim[other_var]:
+                converter.loc[:] = getattr(ax_list[0], f"{var}axis")
+
+            else:
+
+                # Next simplest case is when no axes are shared, and we can
+                # use the axis objects within each facet
+                if share_state is False:
+                    for axes_vars, axes_data in self.iter_data():
+                        ax = self._get_axes(axes_vars)
+                        converter.loc[axes_data.index] = getattr(ax, f"{var}axis")
+
+                # In the more complicated case, the axes are shared within each
+                # "file" of the facetgrid. In that case, we need to subset the data
+                # for that file and assign it the first axis in the slice of the grid
+                else:
+
+                    names = getattr(self.facets, f"{share_state}_names")
+                    for i, level in enumerate(names):
+                        idx = (i, 0) if share_state == "row" else (0, i)
+                        axis = getattr(self.facets.axes[idx], f"{var}axis")
+                        converter.loc[self.plot_data[share_state] == level] = axis
+
+            # Store the converter vector, which we use elsewhere (e.g comp_data)
+            self.converters[var] = converter
+
+            # Now actually update the matplotlib objects to do the conversion we want
+            grouped = self.plot_data[var].groupby(self.converters[var], sort=False)
+            for converter, seed_data in grouped:
+                if self.var_types[var] == "categorical":
+                    if self._var_ordered[var]:
+                        order = self.var_levels[var]
+                    else:
+                        order = None
+                    seed_data = categorical_order(seed_data, order)
+                converter.update_units(seed_data)
+
+        # -- Set numerical axis scales
+
+        # First unpack the log_scale argument
+        if log_scale is None:
+            scalex = scaley = False
+        else:
+            # Allow single value or x, y tuple
+            try:
+                scalex, scaley = log_scale
+            except TypeError:
+                scalex = log_scale if self.var_types.get("x") == "numeric" else False
+                scaley = log_scale if self.var_types.get("y") == "numeric" else False
+
+        # Now use it
+        for axis, scale in zip("xy", (scalex, scaley)):
+            if scale:
+                for ax in ax_list:
+                    set_scale = getattr(ax, f"set_{axis}scale")
+                    if scale is True:
+                        set_scale("log", nonpositive="mask")
+                    else:
+                        set_scale("log", base=scale, nonpositive="mask")
+
+        # For categorical y, we want the "first" level to be at the top of the axis
+        if self.var_types.get("y", None) == "categorical":
+            for ax in ax_list:
+                ax.yaxis.set_inverted(True)
+
+        # TODO -- Add axes labels

     def _get_scale_transforms(self, axis):
         """Return a function implementing the scale transform (or its inverse)."""
-        pass
+        if self.ax is None:
+            axis_list = [getattr(ax, f"{axis}axis") for ax in self.facets.axes.flat]
+            scales = {axis.get_scale() for axis in axis_list}
+            if len(scales) > 1:
+                # It is a simplifying assumption that faceted axes will always have
+                # the same scale (even if they are unshared and have distinct limits).
+                # Nothing in the seaborn API allows you to create a FacetGrid with
+                # a mixture of scales, although it's possible via matplotlib.
+                # This is constraining, but no more so than previous behavior that
+                # only (properly) handled log scales, and there are some places where
+                # it would be much too complicated to use axes-specific transforms.
+                err = "Cannot determine transform with mixed scales on faceted axes."
+                raise RuntimeError(err)
+            transform_obj = axis_list[0].get_transform()
+        else:
+            # This case is more straightforward
+            transform_obj = getattr(self.ax, f"{axis}axis").get_transform()

-    def _add_axis_labels(self, ax, default_x='', default_y=''):
-        """Add axis labels if not present, set visibility to match ticklabels."""
-        pass
+        return transform_obj.transform, transform_obj.inverted().transform

-    def add_legend_data(self, ax, func, common_kws=None, attrs=None,
-        semantic_kws=None):
+    def _add_axis_labels(self, ax, default_x="", default_y=""):
+        """Add axis labels if not present, set visibility to match ticklabels."""
+        # TODO ax could default to None and use attached axes if present
+        # but what to do about the case of facets? Currently using FacetGrid's
+        # set_axis_labels method, which doesn't add labels to the interior even
+        # when the axes are not shared. Maybe that makes sense?
+        if not ax.get_xlabel():
+            x_visible = any(t.get_visible() for t in ax.get_xticklabels())
+            ax.set_xlabel(self.variables.get("x", default_x), visible=x_visible)
+        if not ax.get_ylabel():
+            y_visible = any(t.get_visible() for t in ax.get_yticklabels())
+            ax.set_ylabel(self.variables.get("y", default_y), visible=y_visible)
+
+    def add_legend_data(
+        self, ax, func, common_kws=None, attrs=None, semantic_kws=None,
+    ):
         """Add labeled artists to represent the different plot semantics."""
-        pass
-
-    def _update_legend_data(self, update, var, verbosity, title, title_kws,
-        attr_names, other_props):
+        verbosity = self.legend
+        if isinstance(verbosity, str) and verbosity not in ["auto", "brief", "full"]:
+            err = "`legend` must be 'auto', 'brief', 'full', or a boolean."
+            raise ValueError(err)
+        elif verbosity is True:
+            verbosity = "auto"
+
+        keys = []
+        legend_kws = {}
+        common_kws = {} if common_kws is None else common_kws.copy()
+        semantic_kws = {} if semantic_kws is None else semantic_kws.copy()
+
+        # Assign a legend title if there is only going to be one sub-legend,
+        # otherwise, subtitles will be inserted into the texts list with an
+        # invisible handle (which is a hack)
+        titles = {
+            title for title in
+            (self.variables.get(v, None) for v in ["hue", "size", "style"])
+            if title is not None
+        }
+        title = "" if len(titles) != 1 else titles.pop()
+        title_kws = dict(
+            visible=False, color="w", s=0, linewidth=0, marker="", dashes=""
+        )
+
+        def update(var_name, val_name, **kws):
+
+            key = var_name, val_name
+            if key in legend_kws:
+                legend_kws[key].update(**kws)
+            else:
+                keys.append(key)
+                legend_kws[key] = dict(**kws)
+
+        if attrs is None:
+            attrs = {"hue": "color", "size": ["linewidth", "s"], "style": None}
+        for var, names in attrs.items():
+            self._update_legend_data(
+                update, var, verbosity, title, title_kws, names, semantic_kws.get(var),
+            )
+
+        legend_data = {}
+        legend_order = []
+
+        # Don't allow color=None so we can set a neutral color for size/style legends
+        if common_kws.get("color", False) is None:
+            common_kws.pop("color")
+
+        for key in keys:
+
+            _, label = key
+            kws = legend_kws[key]
+            level_kws = {}
+            use_attrs = [
+                *self._legend_attributes,
+                *common_kws,
+                *[attr for var_attrs in semantic_kws.values() for attr in var_attrs],
+            ]
+            for attr in use_attrs:
+                if attr in kws:
+                    level_kws[attr] = kws[attr]
+            artist = func(label=label, **{"color": ".2", **common_kws, **level_kws})
+            if _version_predates(mpl, "3.5.0"):
+                if isinstance(artist, mpl.lines.Line2D):
+                    ax.add_line(artist)
+                elif isinstance(artist, mpl.patches.Patch):
+                    ax.add_patch(artist)
+                elif isinstance(artist, mpl.collections.Collection):
+                    ax.add_collection(artist)
+            else:
+                ax.add_artist(artist)
+            legend_data[key] = artist
+            legend_order.append(key)
+
+        self.legend_title = title
+        self.legend_data = legend_data
+        self.legend_order = legend_order
+
+    def _update_legend_data(
+        self,
+        update,
+        var,
+        verbosity,
+        title,
+        title_kws,
+        attr_names,
+        other_props,
+    ):
         """Generate legend tick values and formatted labels."""
-        pass
+        brief_ticks = 6
+        mapper = getattr(self, f"_{var}_map", None)
+        if mapper is None:
+            return
+
+        brief = mapper.map_type == "numeric" and (
+            verbosity == "brief"
+            or (verbosity == "auto" and len(mapper.levels) > brief_ticks)
+        )
+        if brief:
+            if isinstance(mapper.norm, mpl.colors.LogNorm):
+                locator = mpl.ticker.LogLocator(numticks=brief_ticks)
+            else:
+                locator = mpl.ticker.MaxNLocator(nbins=brief_ticks)
+            limits = min(mapper.levels), max(mapper.levels)
+            levels, formatted_levels = locator_to_legend_entries(
+                locator, limits, self.plot_data[var].infer_objects().dtype
+            )
+        elif mapper.levels is None:
+            levels = formatted_levels = []
+        else:
+            levels = formatted_levels = mapper.levels
+
+        if not title and self.variables.get(var, None) is not None:
+            update((self.variables[var], "title"), self.variables[var], **title_kws)
+
+        other_props = {} if other_props is None else other_props
+
+        for level, formatted_level in zip(levels, formatted_levels):
+            if level is not None:
+                attr = mapper(level)
+                if isinstance(attr_names, list):
+                    attr = {name: attr for name in attr_names}
+                elif attr_names is not None:
+                    attr = {attr_names: attr}
+                attr.update({k: v[level] for k, v in other_props.items() if level in v})
+                update(self.variables[var], formatted_level, **attr)
+
+    # XXX If the scale_* methods are going to modify the plot_data structure, they
+    # can't be called twice. That means that if they are called twice, they should
+    # raise. Alternatively, we could store an original version of plot_data and each
+    # time they are called they operate on the store, not the current state.
+
+    def scale_native(self, axis, *args, **kwargs):
+
+        # Default, defer to matplotlib
+
+        raise NotImplementedError
+
+    def scale_numeric(self, axis, *args, **kwargs):
+
+        # Feels needed to completeness, what should it do?
+        # Perhaps handle log scaling? Set the ticker/formatter/limits?
+
+        raise NotImplementedError
+
+    def scale_datetime(self, axis, *args, **kwargs):
+
+        # Use pd.to_datetime to convert strings or numbers to datetime objects
+        # Note, use day-resolution for numeric->datetime to match matplotlib
+
+        raise NotImplementedError

     def scale_categorical(self, axis, order=None, formatter=None):
         """
@@ -354,7 +1373,80 @@ class VectorPlotter:
         self

         """
-        pass
+        # This method both modifies the internal representation of the data
+        # (converting it to string) and sets some attributes on self. It might be
+        # a good idea to have a separate object attached to self that contains the
+        # information in those attributes (i.e. whether to enforce variable order
+        # across facets, the order to use) similar to the SemanticMapping objects
+        # we have for semantic variables. That object could also hold the converter
+        # objects that get used, if we can decouple those from an existing axis
+        # (cf. https://github.com/matplotlib/matplotlib/issues/19229).
+        # There are some interactions with faceting information that would need
+        # to be thought through, since the converts to use depend on facets.
+        # If we go that route, these methods could become "borrowed" methods similar
+        # to what happens with the alternate semantic mapper constructors, although
+        # that approach is kind of fussy and confusing.
+
+        # TODO this method could also set the grid state? Since we like to have no
+        # grid on the categorical axis by default. Again, a case where we'll need to
+        # store information until we use it, so best to have a way to collect the
+        # attributes that this method sets.
+
+        # TODO if we are going to set visual properties of the axes with these methods,
+        # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis
+
+        # TODO another, and distinct idea, is to expose a cut= param here
+
+        _check_argument("axis", ["x", "y"], axis)
+
+        # Categorical plots can be "univariate" in which case they get an anonymous
+        # category label on the opposite axis.
+        if axis not in self.variables:
+            self.variables[axis] = None
+            self.var_types[axis] = "categorical"
+            self.plot_data[axis] = ""
+
+        # If the "categorical" variable has a numeric type, sort the rows so that
+        # the default result from categorical_order has those values sorted after
+        # they have been coerced to strings. The reason for this is so that later
+        # we can get facet-wise orders that are correct.
+        # XXX Should this also sort datetimes?
+        # It feels more consistent, but technically will be a default change
+        # If so, should also change categorical_order to behave that way
+        if self.var_types[axis] == "numeric":
+            self.plot_data = self.plot_data.sort_values(axis, kind="mergesort")
+
+        # Now get a reference to the categorical data vector and remove na values
+        cat_data = self.plot_data[axis].dropna()
+
+        # Get the initial categorical order, which we do before string
+        # conversion to respect the original types of the order list.
+        # Track whether the order is given explicitly so that we can know
+        # whether or not to use the order constructed here downstream
+        self._var_ordered[axis] = order is not None or cat_data.dtype.name == "category"
+        order = pd.Index(categorical_order(cat_data, order), name=axis)
+
+        # Then convert data to strings. This is because in matplotlib,
+        # "categorical" data really mean "string" data, so doing this artists
+        # will be drawn on the categorical axis with a fixed scale.
+        # TODO implement formatter here; check that it returns strings?
+        if formatter is not None:
+            cat_data = cat_data.map(formatter)
+            order = order.map(formatter)
+        else:
+            cat_data = cat_data.astype(str)
+            order = order.astype(str)
+
+        # Update the levels list with the type-converted order variable
+        self.var_levels[axis] = order
+
+        # Now ensure that seaborn will use categorical rules internally
+        self.var_types[axis] = "categorical"
+
+        # Put the string-typed categorical vector back into the plot_data structure
+        self.plot_data[axis] = cat_data
+
+        return self


 class VariableType(UserString):
@@ -365,7 +1457,8 @@ class VariableType(UserString):
     them. If that changes, they should be more verbose.

     """
-    allowed = 'numeric', 'datetime', 'categorical'
+    # TODO we can replace this with typing.Literal on Python 3.8+
+    allowed = "numeric", "datetime", "categorical"

     def __init__(self, data):
         assert data in self.allowed, data
@@ -376,7 +1469,7 @@ class VariableType(UserString):
         return self.data == other


-def variable_type(vector, boolean_type='numeric'):
+def variable_type(vector, boolean_type="numeric"):
     """
     Determine whether a vector contains numeric, categorical, or datetime data.

@@ -399,7 +1492,73 @@ def variable_type(vector, boolean_type='numeric'):
     var_type : 'numeric', 'categorical', or 'datetime'
         Name identifying the type of data in the vector.
     """
-    pass
+    vector = pd.Series(vector)
+
+    # If a categorical dtype is set, infer categorical
+    if isinstance(vector.dtype, pd.CategoricalDtype):
+        return VariableType("categorical")
+
+    # Special-case all-na data, which is always "numeric"
+    if pd.isna(vector).all():
+        return VariableType("numeric")
+
+    # At this point, drop nans to simplify further type inference
+    vector = vector.dropna()
+
+    # Special-case binary/boolean data, allow caller to determine
+    # This triggers a numpy warning when vector has strings/objects
+    # https://github.com/numpy/numpy/issues/6784
+    # Because we reduce with .all(), we are agnostic about whether the
+    # comparison returns a scalar or vector, so we will ignore the warning.
+    # It triggers a separate DeprecationWarning when the vector has datetimes:
+    # https://github.com/numpy/numpy/issues/13548
+    # This is considered a bug by numpy and will likely go away.
+    with warnings.catch_warnings():
+        warnings.simplefilter(
+            action='ignore', category=(FutureWarning, DeprecationWarning)
+        )
+        try:
+            if np.isin(vector, [0, 1]).all():
+                return VariableType(boolean_type)
+        except TypeError:
+            # .isin comparison is not guaranteed to be possible under NumPy
+            # casting rules, depending on the (unknown) dtype of 'vector'
+            pass
+
+    # Defer to positive pandas tests
+    if pd.api.types.is_numeric_dtype(vector):
+        return VariableType("numeric")
+
+    if pd.api.types.is_datetime64_dtype(vector):
+        return VariableType("datetime")
+
+    # --- If we get to here, we need to check the entries
+
+    # Check for a collection where everything is a number
+
+    def all_numeric(x):
+        for x_i in x:
+            if not isinstance(x_i, Number):
+                return False
+        return True
+
+    if all_numeric(vector):
+        return VariableType("numeric")
+
+    # Check for a collection where everything is a datetime
+
+    def all_datetime(x):
+        for x_i in x:
+            if not isinstance(x_i, (datetime, np.datetime64)):
+                return False
+        return True
+
+    if all_datetime(vector):
+        return VariableType("datetime")
+
+    # Otherwise, our final fallback is to consider things categorical
+
+    return VariableType("categorical")


 def infer_orient(x=None, y=None, orient=None, require_numeric=True):
@@ -430,7 +1589,59 @@ def infer_orient(x=None, y=None, orient=None, require_numeric=True):
     TypeError: When dependent variable is not numeric, with `require_numeric`

     """
-    pass
+
+    x_type = None if x is None else variable_type(x)
+    y_type = None if y is None else variable_type(y)
+
+    nonnumeric_dv_error = "{} orientation requires numeric `{}` variable."
+    single_var_warning = "{} orientation ignored with only `{}` specified."
+
+    if x is None:
+        if str(orient).startswith("h"):
+            warnings.warn(single_var_warning.format("Horizontal", "y"))
+        if require_numeric and y_type != "numeric":
+            raise TypeError(nonnumeric_dv_error.format("Vertical", "y"))
+        return "x"
+
+    elif y is None:
+        if str(orient).startswith("v"):
+            warnings.warn(single_var_warning.format("Vertical", "x"))
+        if require_numeric and x_type != "numeric":
+            raise TypeError(nonnumeric_dv_error.format("Horizontal", "x"))
+        return "y"
+
+    elif str(orient).startswith("v") or orient == "x":
+        if require_numeric and y_type != "numeric":
+            raise TypeError(nonnumeric_dv_error.format("Vertical", "y"))
+        return "x"
+
+    elif str(orient).startswith("h") or orient == "y":
+        if require_numeric and x_type != "numeric":
+            raise TypeError(nonnumeric_dv_error.format("Horizontal", "x"))
+        return "y"
+
+    elif orient is not None:
+        err = (
+            "`orient` must start with 'v' or 'h' or be None, "
+            f"but `{repr(orient)}` was passed."
+        )
+        raise ValueError(err)
+
+    elif x_type != "categorical" and y_type == "categorical":
+        return "y"
+
+    elif x_type != "numeric" and y_type == "numeric":
+        return "x"
+
+    elif x_type == "numeric" and y_type != "numeric":
+        return "y"
+
+    elif require_numeric and "numeric" not in (x_type, y_type):
+        err = "Neither the `x` nor `y` variable appears to be numeric."
+        raise TypeError(err)
+
+    else:
+        return "x"


 def unique_dashes(n):
@@ -450,7 +1661,38 @@ def unique_dashes(n):
         dashes.

     """
-    pass
+    # Start with dash specs that are well distinguishable
+    dashes = [
+        "",
+        (4, 1.5),
+        (1, 1),
+        (3, 1.25, 1.5, 1.25),
+        (5, 1, 1, 1),
+    ]
+
+    # Now programmatically build as many as we need
+    p = 3
+    while len(dashes) < n:
+
+        # Take combinations of long and short dashes
+        a = itertools.combinations_with_replacement([3, 1.25], p)
+        b = itertools.combinations_with_replacement([4, 1], p)
+
+        # Interleave the combinations, reversing one of the streams
+        segment_list = itertools.chain(*zip(
+            list(a)[1:-1][::-1],
+            list(b)[1:-1]
+        ))
+
+        # Now insert the gaps
+        for segments in segment_list:
+            gap = min(segments)
+            spec = tuple(itertools.chain(*((seg, gap) for seg in segments)))
+            dashes.append(spec)
+
+        p += 1
+
+    return dashes[:n]


 def unique_markers(n):
@@ -468,7 +1710,35 @@ def unique_markers(n):
         All markers will be filled.

     """
-    pass
+    # Start with marker specs that are well distinguishable
+    markers = [
+        "o",
+        "X",
+        (4, 0, 45),
+        "P",
+        (4, 0, 0),
+        (4, 1, 0),
+        "^",
+        (4, 1, 45),
+        "v",
+    ]
+
+    # Now generate more from regular polygons of increasing order
+    s = 5
+    while len(markers) < n:
+        a = 360 / (s + 1) / 2
+        markers.extend([
+            (s + 1, 1, a),
+            (s + 1, 0, a),
+            (s, 1, 0),
+            (s, 0, 0),
+        ])
+        s += 1
+
+    # Convert to MarkerStyle object, using only exactly what we need
+    # markers = [mpl.markers.MarkerStyle(m) for m in markers[:n]]
+
+    return markers[:n]


 def categorical_order(vector, order=None):
@@ -490,4 +1760,18 @@ def categorical_order(vector, order=None):
         Ordered list of category levels not including null values.

     """
-    pass
+    if order is None:
+        if hasattr(vector, "categories"):
+            order = vector.categories
+        else:
+            try:
+                order = vector.cat.categories
+            except (TypeError, AttributeError):
+
+                order = pd.Series(vector).unique()
+
+                if variable_type(vector) == "numeric":
+                    order = np.sort(order)
+
+        order = filter(pd.notnull, order)
+    return list(order)
diff --git a/seaborn/_compat.py b/seaborn/_compat.py
index 76dc5054..bd2f0c12 100644
--- a/seaborn/_compat.py
+++ b/seaborn/_compat.py
@@ -1,5 +1,6 @@
 from __future__ import annotations
 from typing import Literal
+
 import numpy as np
 import pandas as pd
 import matplotlib as mpl
@@ -9,35 +10,114 @@ from seaborn.utils import _version_predates

 def norm_from_scale(scale, norm):
     """Produce a Normalize object given a Scale and min/max domain limits."""
-    pass
+    # This is an internal maplotlib function that simplifies things to access
+    # It is likely to become part of the matplotlib API at some point:
+    # https://github.com/matplotlib/matplotlib/issues/20329
+    if isinstance(norm, mpl.colors.Normalize):
+        return norm
+
+    if scale is None:
+        return None
+
+    if norm is None:
+        vmin = vmax = None
+    else:
+        vmin, vmax = norm  # TODO more helpful error if this fails?
+
+    class ScaledNorm(mpl.colors.Normalize):
+
+        def __call__(self, value, clip=None):
+            # From github.com/matplotlib/matplotlib/blob/v3.4.2/lib/matplotlib/colors.py
+            # See github.com/matplotlib/matplotlib/tree/v3.4.2/LICENSE
+            value, is_scalar = self.process_value(value)
+            self.autoscale_None(value)
+            if self.vmin > self.vmax:
+                raise ValueError("vmin must be less or equal to vmax")
+            if self.vmin == self.vmax:
+                return np.full_like(value, 0)
+            if clip is None:
+                clip = self.clip
+            if clip:
+                value = np.clip(value, self.vmin, self.vmax)
+            # ***** Seaborn changes start ****
+            t_value = self.transform(value).reshape(np.shape(value))
+            t_vmin, t_vmax = self.transform([self.vmin, self.vmax])
+            # ***** Seaborn changes end *****
+            if not np.isfinite([t_vmin, t_vmax]).all():
+                raise ValueError("Invalid vmin or vmax")
+            t_value -= t_vmin
+            t_value /= (t_vmax - t_vmin)
+            t_value = np.ma.masked_invalid(t_value, copy=False)
+            return t_value[0] if is_scalar else t_value
+
+    new_norm = ScaledNorm(vmin, vmax)
+    new_norm.transform = scale.get_transform().transform
+
+    return new_norm


 def get_colormap(name):
     """Handle changes to matplotlib colormap interface in 3.6."""
-    pass
+    try:
+        return mpl.colormaps[name]
+    except AttributeError:
+        return mpl.cm.get_cmap(name)


 def register_colormap(name, cmap):
     """Handle changes to matplotlib colormap interface in 3.6."""
-    pass
+    try:
+        if name not in mpl.colormaps:
+            mpl.colormaps.register(cmap, name=name)
+    except AttributeError:
+        mpl.cm.register_cmap(name, cmap)


-def set_layout_engine(fig: Figure, engine: Literal['constrained',
-    'compressed', 'tight', 'none']) ->None:
+def set_layout_engine(
+    fig: Figure,
+    engine: Literal["constrained", "compressed", "tight", "none"],
+) -> None:
     """Handle changes to auto layout engine interface in 3.6"""
-    pass
+    if hasattr(fig, "set_layout_engine"):
+        fig.set_layout_engine(engine)
+    else:
+        # _version_predates(mpl, 3.6)
+        if engine == "tight":
+            fig.set_tight_layout(True)  # type: ignore  # predates typing
+        elif engine == "constrained":
+            fig.set_constrained_layout(True)  # type: ignore
+        elif engine == "none":
+            fig.set_tight_layout(False)  # type: ignore
+            fig.set_constrained_layout(False)  # type: ignore


-def get_layout_engine(fig: Figure) ->(mpl.layout_engine.LayoutEngine | None):
+def get_layout_engine(fig: Figure) -> mpl.layout_engine.LayoutEngine | None:
     """Handle changes to auto layout engine interface in 3.6"""
-    pass
+    if hasattr(fig, "get_layout_engine"):
+        return fig.get_layout_engine()
+    else:
+        # _version_predates(mpl, 3.6)
+        return None


 def share_axis(ax0, ax1, which):
     """Handle changes to post-hoc axis sharing."""
-    pass
+    if _version_predates(mpl, "3.5"):
+        group = getattr(ax0, f"get_shared_{which}_axes")()
+        group.join(ax1, ax0)
+    else:
+        getattr(ax1, f"share{which}")(ax0)


 def get_legend_handles(legend):
     """Handle legendHandles attribute rename."""
-    pass
+    if _version_predates(mpl, "3.7"):
+        return legend.legendHandles
+    else:
+        return legend.legend_handles
+
+
+def groupby_apply_include_groups(val):
+    if _version_predates(pd, "2.2.0"):
+        return {}
+    return {"include_groups": val}
diff --git a/seaborn/_core/data.py b/seaborn/_core/data.py
index e6ece7c9..c17bfe95 100644
--- a/seaborn/_core/data.py
+++ b/seaborn/_core/data.py
@@ -2,11 +2,14 @@
 Components for parsing variable assignments and internally representing plot data.
 """
 from __future__ import annotations
+
 from collections.abc import Mapping, Sized
 from typing import cast
 import warnings
+
 import pandas as pd
 from pandas import DataFrame
+
 from seaborn._core.typing import DataSource, VariableSpec, ColumnName
 from seaborn.utils import _version_predates

@@ -45,30 +48,87 @@ class PlotData:
     source_data: DataSource
     source_vars: dict[str, VariableSpec]

-    def __init__(self, data: DataSource, variables: dict[str, VariableSpec]):
+    def __init__(
+        self,
+        data: DataSource,
+        variables: dict[str, VariableSpec],
+    ):
+
         data = handle_data_source(data)
         frame, names, ids = self._assign_variables(data, variables)
+
         self.frame = frame
         self.names = names
         self.ids = ids
+
+        # The reason we possibly have a dictionary of frames is to support the
+        # Plot.pair operation, post scaling, where each x/y variable needs its
+        # own frame. This feels pretty clumsy and there are a bunch of places in
+        # the client code with awkard if frame / elif frames constructions.
+        # It would be great to have a cleaner abstraction here.
         self.frames = {}
+
         self.source_data = data
         self.source_vars = variables

-    def __contains__(self, key: str) ->bool:
+    def __contains__(self, key: str) -> bool:
         """Boolean check on whether a variable is defined in this dataset."""
         if self.frame is None:
             return any(key in df for df in self.frames.values())
         return key in self.frame

-    def join(self, data: DataSource, variables: (dict[str, VariableSpec] |
-        None)) ->PlotData:
+    def join(
+        self,
+        data: DataSource,
+        variables: dict[str, VariableSpec] | None,
+    ) -> PlotData:
         """Add, replace, or drop variables and return as a new dataset."""
-        pass
+        # Inherit the original source of the upstream data by default
+        if data is None:
+            data = self.source_data
+
+        # TODO allow `data` to be a function (that is called on the source data?)
+
+        if not variables:
+            variables = self.source_vars
+
+        # Passing var=None implies that we do not want that variable in this layer
+        disinherit = [k for k, v in variables.items() if v is None]
+
+        # Create a new dataset with just the info passed here
+        new = PlotData(data, variables)

-    def _assign_variables(self, data: (DataFrame | Mapping | None),
-        variables: dict[str, VariableSpec]) ->tuple[DataFrame, dict[str, 
-        str | None], dict[str, str | int]]:
+        # -- Update the inherited DataSource with this new information
+
+        drop_cols = [k for k in self.frame if k in new.frame or k in disinherit]
+        parts = [self.frame.drop(columns=drop_cols), new.frame]
+
+        # Because we are combining distinct columns, this is perhaps more
+        # naturally thought of as a "merge"/"join". But using concat because
+        # some simple testing suggests that it is marginally faster.
+        frame = pd.concat(parts, axis=1, sort=False, copy=False)
+
+        names = {k: v for k, v in self.names.items() if k not in disinherit}
+        names.update(new.names)
+
+        ids = {k: v for k, v in self.ids.items() if k not in disinherit}
+        ids.update(new.ids)
+
+        new.frame = frame
+        new.names = names
+        new.ids = ids
+
+        # Multiple chained operations should always inherit from the original object
+        new.source_data = self.source_data
+        new.source_vars = self.source_vars
+
+        return new
+
+    def _assign_variables(
+        self,
+        data: DataFrame | Mapping | None,
+        variables: dict[str, VariableSpec],
+    ) -> tuple[DataFrame, dict[str, str | None], dict[str, str | int]]:
         """
         Assign values for plot variables given long-form data and/or vector inputs.

@@ -102,14 +162,158 @@ class PlotData:
             non-indexed vector datatypes that have a different length from `data`.

         """
-        pass
+        source_data: Mapping | DataFrame
+        frame: DataFrame
+        names: dict[str, str | None]
+        ids: dict[str, str | int]
+
+        plot_data = {}
+        names = {}
+        ids = {}
+
+        given_data = data is not None
+        if data is None:
+            # Data is optional; all variables can be defined as vectors
+            # But simplify downstream code by always having a usable source data object
+            source_data = {}
+        else:
+            source_data = data
+
+        # Variables can also be extracted from the index of a DataFrame
+        if isinstance(source_data, pd.DataFrame):
+            index = source_data.index.to_frame().to_dict("series")
+        else:
+            index = {}

+        for key, val in variables.items():

-def handle_data_source(data: object) ->(pd.DataFrame | Mapping | None):
+            # Simply ignore variables with no specification
+            if val is None:
+                continue
+
+            # Try to treat the argument as a key for the data collection.
+            # But be flexible about what can be used as a key.
+            # Usually it will be a string, but allow other hashables when
+            # taking from the main data object. Allow only strings to reference
+            # fields in the index, because otherwise there is too much ambiguity.
+
+            # TODO this will be rendered unnecessary by the following pandas fix:
+            # https://github.com/pandas-dev/pandas/pull/41283
+            try:
+                hash(val)
+                val_is_hashable = True
+            except TypeError:
+                val_is_hashable = False
+
+            val_as_data_key = (
+                # See https://github.com/pandas-dev/pandas/pull/41283
+                # (isinstance(val, abc.Hashable) and val in source_data)
+                (val_is_hashable and val in source_data)
+                or (isinstance(val, str) and val in index)
+            )
+
+            if val_as_data_key:
+                val = cast(ColumnName, val)
+                if val in source_data:
+                    plot_data[key] = source_data[val]
+                elif val in index:
+                    plot_data[key] = index[val]
+                names[key] = ids[key] = str(val)
+
+            elif isinstance(val, str):
+
+                # This looks like a column name but, lookup failed.
+
+                err = f"Could not interpret value `{val}` for `{key}`. "
+                if not given_data:
+                    err += "Value is a string, but `data` was not passed."
+                else:
+                    err += "An entry with this name does not appear in `data`."
+                raise ValueError(err)
+
+            else:
+
+                # Otherwise, assume the value somehow represents data
+
+                # Ignore empty data structures
+                if isinstance(val, Sized) and len(val) == 0:
+                    continue
+
+                # If vector has no index, it must match length of data table
+                if isinstance(data, pd.DataFrame) and not isinstance(val, pd.Series):
+                    if isinstance(val, Sized) and len(data) != len(val):
+                        val_cls = val.__class__.__name__
+                        err = (
+                            f"Length of {val_cls} vectors must match length of `data`"
+                            f" when both are used, but `data` has length {len(data)}"
+                            f" and the vector passed to `{key}` has length {len(val)}."
+                        )
+                        raise ValueError(err)
+
+                plot_data[key] = val
+
+                # Try to infer the original name using pandas-like metadata
+                if hasattr(val, "name"):
+                    names[key] = ids[key] = str(val.name)  # type: ignore  # mypy/1424
+                else:
+                    names[key] = None
+                    ids[key] = id(val)
+
+        # Construct a tidy plot DataFrame. This will convert a number of
+        # types automatically, aligning on index in case of pandas objects
+        # TODO Note: this fails when variable specs *only* have scalars!
+        frame = pd.DataFrame(plot_data)
+
+        return frame, names, ids
+
+
+def handle_data_source(data: object) -> pd.DataFrame | Mapping | None:
     """Convert the data source object to a common union representation."""
-    pass
+    if isinstance(data, pd.DataFrame) or hasattr(data, "__dataframe__"):
+        # Check for pd.DataFrame inheritance could be removed once
+        # minimal pandas version supports dataframe interchange (1.5.0).
+        data = convert_dataframe_to_pandas(data)
+    elif data is not None and not isinstance(data, Mapping):
+        err = f"Data source must be a DataFrame or Mapping, not {type(data)!r}."
+        raise TypeError(err)
+
+    return data


-def convert_dataframe_to_pandas(data: object) ->pd.DataFrame:
+def convert_dataframe_to_pandas(data: object) -> pd.DataFrame:
     """Use the DataFrame exchange protocol, or fail gracefully."""
-    pass
+    if isinstance(data, pd.DataFrame):
+        return data
+
+    if not hasattr(pd.api, "interchange"):
+        msg = (
+            "Support for non-pandas DataFrame objects requires a version of pandas "
+            "that implements the DataFrame interchange protocol. Please upgrade "
+            "your pandas version or coerce your data to pandas before passing "
+            "it to seaborn."
+        )
+        raise TypeError(msg)
+
+    if _version_predates(pd, "2.0.2"):
+        msg = (
+            "DataFrame interchange with pandas<2.0.2 has some known issues. "
+            f"You are using pandas {pd.__version__}. "
+            "Continuing, but it is recommended to carefully inspect the results and to "
+            "consider upgrading."
+        )
+        warnings.warn(msg, stacklevel=2)
+
+    try:
+        # This is going to convert all columns in the input dataframe, even though
+        # we may only need one or two of them. It would be more efficient to select
+        # the columns that are going to be used in the plot prior to interchange.
+        # Solving that in general is a hard problem, especially with the objects
+        # interface where variables passed in Plot() may only be referenced later
+        # in Plot.add(). But noting here in case this seems to be a bottleneck.
+        return pd.api.interchange.from_dataframe(data)
+    except Exception as err:
+        msg = (
+            "Encountered an exception when converting data source "
+            "to a pandas DataFrame. See traceback above for details."
+        )
+        raise RuntimeError(msg) from err
diff --git a/seaborn/_core/exceptions.py b/seaborn/_core/exceptions.py
index b90716ec..048443b0 100644
--- a/seaborn/_core/exceptions.py
+++ b/seaborn/_core/exceptions.py
@@ -18,10 +18,15 @@ class PlotSpecError(RuntimeError):
     context (e.g., scaling errors could specify the variable that failed.)

     """
-
     @classmethod
-    def _during(cls, step: str, var: str='') ->PlotSpecError:
+    def _during(cls, step: str, var: str = "") -> PlotSpecError:
         """
         Initialize the class to report the failure of a specific operation.
         """
-        pass
+        message = []
+        if var:
+            message.append(f"{step} failed for the `{var}` variable.")
+        else:
+            message.append(f"{step} failed.")
+        message.append("See the traceback above for more information.")
+        return cls(" ".join(message))
diff --git a/seaborn/_core/groupby.py b/seaborn/_core/groupby.py
index 89566c5e..cb63c670 100644
--- a/seaborn/_core/groupby.py
+++ b/seaborn/_core/groupby.py
@@ -1,8 +1,12 @@
 """Simplified split-apply-combine paradigm on dataframes for internal use."""
 from __future__ import annotations
+
 from typing import cast, Iterable
+
 import pandas as pd
+
 from seaborn._core.rules import categorical_order
+
 from typing import TYPE_CHECKING
 if TYPE_CHECKING:
     from typing import Callable
@@ -22,8 +26,7 @@ class GroupBy:
     - It increases future flexibility regarding alternate DataFrame libraries

     """
-
-    def __init__(self, order: (list[str] | dict[str, list | None])):
+    def __init__(self, order: list[str] | dict[str, list | None]):
         """
         Initialize the GroupBy from grouping variables and optional level orders.

@@ -37,21 +40,43 @@ class GroupBy:

         """
         if not order:
-            raise ValueError('GroupBy requires at least one grouping variable')
+            raise ValueError("GroupBy requires at least one grouping variable")
+
         if isinstance(order, list):
             order = {k: None for k in order}
         self.order = order

-    def _get_groups(self, data: DataFrame) ->tuple[str | list[str], Index |
-        MultiIndex]:
+    def _get_groups(
+        self, data: DataFrame
+    ) -> tuple[str | list[str], Index | MultiIndex]:
         """Return index with Cartesian product of ordered grouping variable levels."""
-        pass
+        levels = {}
+        for var, order in self.order.items():
+            if var in data:
+                if order is None:
+                    order = categorical_order(data[var])
+                levels[var] = order
+
+        grouper: str | list[str]
+        groups: Index | MultiIndex
+        if not levels:
+            grouper = []
+            groups = pd.Index([])
+        elif len(levels) > 1:
+            grouper = list(levels)
+            groups = pd.MultiIndex.from_product(levels.values(), names=grouper)
+        else:
+            grouper, = list(levels)
+            groups = pd.Index(levels[grouper], name=grouper)
+        return grouper, groups

     def _reorder_columns(self, res, data):
         """Reorder result columns to match original order with new columns appended."""
-        pass
+        cols = [c for c in data if c in res]
+        cols += [c for c in res if c not in data]
+        return res.reindex(columns=pd.Index(cols))

-    def agg(self, data: DataFrame, *args, **kwargs) ->DataFrame:
+    def agg(self, data: DataFrame, *args, **kwargs) -> DataFrame:
         """
         Reduce each group to a single row in the output.

@@ -60,9 +85,45 @@ class GroupBy:
         those combinations do not appear in the dataset.

         """
-        pass
+        grouper, groups = self._get_groups(data)

-    def apply(self, data: DataFrame, func: Callable[..., DataFrame], *args,
-        **kwargs) ->DataFrame:
+        if not grouper:
+            # We will need to see whether there are valid usecases that end up here
+            raise ValueError("No grouping variables are present in dataframe")
+
+        res = (
+            data
+            .groupby(grouper, sort=False, observed=False)
+            .agg(*args, **kwargs)
+            .reindex(groups)
+            .reset_index()
+            .pipe(self._reorder_columns, data)
+        )
+
+        return res
+
+    def apply(
+        self, data: DataFrame, func: Callable[..., DataFrame],
+        *args, **kwargs,
+    ) -> DataFrame:
         """Apply a DataFrame -> DataFrame mapping to each group."""
-        pass
+        grouper, groups = self._get_groups(data)
+
+        if not grouper:
+            return self._reorder_columns(func(data, *args, **kwargs), data)
+
+        parts = {}
+        for key, part_df in data.groupby(grouper, sort=False, observed=False):
+            parts[key] = func(part_df, *args, **kwargs)
+        stack = []
+        for key in groups:
+            if key in parts:
+                if isinstance(grouper, list):
+                    # Implies that we had a MultiIndex so key is iterable
+                    group_ids = dict(zip(grouper, cast(Iterable, key)))
+                else:
+                    group_ids = {grouper: key}
+                stack.append(parts[key].assign(**group_ids))
+
+        res = pd.concat(stack, ignore_index=True)
+        return self._reorder_columns(res, data)
diff --git a/seaborn/_core/moves.py b/seaborn/_core/moves.py
index e907dbbe..179926e7 100644
--- a/seaborn/_core/moves.py
+++ b/seaborn/_core/moves.py
@@ -1,21 +1,26 @@
 from __future__ import annotations
 from dataclasses import dataclass
 from typing import ClassVar, Callable, Optional, Union, cast
+
 import numpy as np
 from pandas import DataFrame
+
 from seaborn._core.groupby import GroupBy
 from seaborn._core.scales import Scale
 from seaborn._core.typing import Default
+
 default = Default()


 @dataclass
 class Move:
     """Base class for objects that apply simple positional transforms."""
+
     group_by_orient: ClassVar[bool] = True

-    def __call__(self, data: DataFrame, groupby: GroupBy, orient: str,
-        scales: dict[str, Scale]) ->DataFrame:
+    def __call__(
+        self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],
+    ) -> DataFrame:
         raise NotImplementedError


@@ -45,25 +50,30 @@ class Jitter(Move):
     y: float = 0
     seed: int | None = None

-    def __call__(self, data: DataFrame, groupby: GroupBy, orient: str,
-        scales: dict[str, Scale]) ->DataFrame:
+    def __call__(
+        self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],
+    ) -> DataFrame:
+
         data = data.copy()
         rng = np.random.default_rng(self.seed)

         def jitter(data, col, scale):
-            noise = rng.uniform(-0.5, +0.5, len(data))
+            noise = rng.uniform(-.5, +.5, len(data))
             offsets = noise * scale
             return data[col] + offsets
+
         if self.width is default:
             width = 0.0 if self.x or self.y else 0.2
         else:
             width = cast(float, self.width)
+
         if self.width:
-            data[orient] = jitter(data, orient, width * data['width'])
+            data[orient] = jitter(data, orient, width * data["width"])
         if self.x:
-            data['x'] = jitter(data, 'x', self.x)
+            data["x"] = jitter(data, "x", self.x)
         if self.y:
-            data['y'] = jitter(data, 'y', self.y)
+            data["y"] = jitter(data, "y", self.y)
+
         return data


@@ -85,41 +95,58 @@ class Dodge(Move):
     .. include:: ../docstrings/objects.Dodge.rst

     """
-    empty: str = 'keep'
+    empty: str = "keep"  # Options: keep, drop, fill
     gap: float = 0
+
+    # TODO accept just a str here?
+    # TODO should this always be present?
+    # TODO should the default be an "all" singleton?
     by: Optional[list[str]] = None

-    def __call__(self, data: DataFrame, groupby: GroupBy, orient: str,
-        scales: dict[str, Scale]) ->DataFrame:
+    def __call__(
+        self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],
+    ) -> DataFrame:
+
         grouping_vars = [v for v in groupby.order if v in data]
-        groups = groupby.agg(data, {'width': 'max'})
-        if self.empty == 'fill':
+        groups = groupby.agg(data, {"width": "max"})
+        if self.empty == "fill":
             groups = groups.dropna()

         def groupby_pos(s):
-            grouper = [groups[v] for v in [orient, 'col', 'row'] if v in data]
+            grouper = [groups[v] for v in [orient, "col", "row"] if v in data]
             return s.groupby(grouper, sort=False, observed=True)

         def scale_widths(w):
-            empty = 0 if self.empty == 'fill' else w.mean()
+            # TODO what value to fill missing widths??? Hard problem...
+            # TODO short circuit this if outer widths has no variance?
+            empty = 0 if self.empty == "fill" else w.mean()
             filled = w.fillna(empty)
             scale = filled.max()
             norm = filled.sum()
-            if self.empty == 'keep':
+            if self.empty == "keep":
                 w = filled
             return w / norm * scale

         def widths_to_offsets(w):
             return w.shift(1).fillna(0).cumsum() + (w - w.sum()) / 2
-        new_widths = groupby_pos(groups['width']).transform(scale_widths)
+
+        new_widths = groupby_pos(groups["width"]).transform(scale_widths)
         offsets = groupby_pos(new_widths).transform(widths_to_offsets)
+
         if self.gap:
             new_widths *= 1 - self.gap
-        groups['_dodged'] = groups[orient] + offsets
-        groups['width'] = new_widths
-        out = data.drop('width', axis=1).merge(groups, on=grouping_vars,
-            how='left').drop(orient, axis=1).rename(columns={'_dodged': orient}
-            )
+
+        groups["_dodged"] = groups[orient] + offsets
+        groups["width"] = new_widths
+
+        out = (
+            data
+            .drop("width", axis=1)
+            .merge(groups, on=grouping_vars, how="left")
+            .drop(orient, axis=1)
+            .rename(columns={"_dodged": orient})
+        )
+
         return out


@@ -133,10 +160,33 @@ class Stack(Move):
     .. include:: ../docstrings/objects.Stack.rst

     """
+    # TODO center? (or should this be a different move, eg. Stream())
+
+    def _stack(self, df, orient):
+
+        # TODO should stack do something with ymin/ymax style marks?
+        # Should there be an upstream conversion to baseline/height parameterization?

-    def __call__(self, data: DataFrame, groupby: GroupBy, orient: str,
-        scales: dict[str, Scale]) ->DataFrame:
-        groupers = ['col', 'row', orient]
+        if df["baseline"].nunique() > 1:
+            err = "Stack move cannot be used when baselines are already heterogeneous"
+            raise RuntimeError(err)
+
+        other = {"x": "y", "y": "x"}[orient]
+        stacked_lengths = (df[other] - df["baseline"]).dropna().cumsum()
+        offsets = stacked_lengths.shift(1).fillna(0)
+
+        df[other] = stacked_lengths
+        df["baseline"] = df["baseline"] + offsets
+
+        return df
+
+    def __call__(
+        self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],
+    ) -> DataFrame:
+
+        # TODO where to ensure that other semantic variables are sorted properly?
+        # TODO why are we not using the passed in groupby here?
+        groupers = ["col", "row", orient]
         return GroupBy(groupers).apply(data, self._stack, orient)


@@ -158,11 +208,13 @@ class Shift(Move):
     x: float = 0
     y: float = 0

-    def __call__(self, data: DataFrame, groupby: GroupBy, orient: str,
-        scales: dict[str, Scale]) ->DataFrame:
+    def __call__(
+        self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],
+    ) -> DataFrame:
+
         data = data.copy(deep=False)
-        data['x'] = data['x'] + self.x
-        data['y'] = data['y'] + self.y
+        data["x"] = data["x"] + self.x
+        data["y"] = data["y"] + self.y
         return data


@@ -187,13 +239,36 @@ class Norm(Move):
     .. include:: ../docstrings/objects.Norm.rst

     """
-    func: Union[Callable, str] = 'max'
+
+    func: Union[Callable, str] = "max"
     where: Optional[str] = None
     by: Optional[list[str]] = None
     percent: bool = False
+
     group_by_orient: ClassVar[bool] = False

-    def __call__(self, data: DataFrame, groupby: GroupBy, orient: str,
-        scales: dict[str, Scale]) ->DataFrame:
-        other = {'x': 'y', 'y': 'x'}[orient]
+    def _norm(self, df, var):
+
+        if self.where is None:
+            denom_data = df[var]
+        else:
+            denom_data = df.query(self.where)[var]
+        df[var] = df[var] / denom_data.agg(self.func)
+
+        if self.percent:
+            df[var] = df[var] * 100
+
+        return df
+
+    def __call__(
+        self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],
+    ) -> DataFrame:
+
+        other = {"x": "y", "y": "x"}[orient]
         return groupby.apply(data, self._norm, other)
+
+
+# TODO
+# @dataclass
+# class Ridge(Move):
+#     ...
diff --git a/seaborn/_core/plot.py b/seaborn/_core/plot.py
index 0695b4a6..14348e35 100644
--- a/seaborn/_core/plot.py
+++ b/seaborn/_core/plot.py
@@ -1,5 +1,6 @@
 """The classes for specifying and compiling a declarative visualization."""
 from __future__ import annotations
+
 import io
 import os
 import re
@@ -11,6 +12,7 @@ from collections import abc
 from collections.abc import Callable, Generator
 from typing import Any, List, Literal, Optional, cast
 from xml.etree import ElementTree
+
 from cycler import cycler
 import pandas as pd
 from pandas import DataFrame, Series, Index
@@ -20,6 +22,7 @@ from matplotlib.artist import Artist
 from matplotlib.figure import Figure
 import numpy as np
 from PIL import Image
+
 from seaborn._marks.base import Mark
 from seaborn._stats.base import Stat
 from seaborn._core.data import PlotData
@@ -28,22 +31,35 @@ from seaborn._core.scales import Scale
 from seaborn._core.subplots import Subplots
 from seaborn._core.groupby import GroupBy
 from seaborn._core.properties import PROPERTIES, Property
-from seaborn._core.typing import DataSource, VariableSpec, VariableSpecList, OrderSpec, Default
+from seaborn._core.typing import (
+    DataSource,
+    VariableSpec,
+    VariableSpecList,
+    OrderSpec,
+    Default,
+)
 from seaborn._core.exceptions import PlotSpecError
 from seaborn._core.rules import categorical_order
 from seaborn._compat import get_layout_engine, set_layout_engine
 from seaborn.utils import _version_predates
 from seaborn.rcmod import axes_style, plotting_context
 from seaborn.palettes import color_palette
+
 from typing import TYPE_CHECKING, TypedDict
 if TYPE_CHECKING:
     from matplotlib.figure import SubFigure
+
+
 default = Default()


-class Layer(TypedDict, total=(False)):
-    mark: Mark
-    stat: Stat | None
+# ---- Definitions for internal specs ---------------------------------------------- #
+
+
+class Layer(TypedDict, total=False):
+
+    mark: Mark  # TODO allow list?
+    stat: Stat | None  # TODO allow list?
     move: Move | list[Move] | None
     data: PlotData
     source: DataSource
@@ -53,23 +69,41 @@ class Layer(TypedDict, total=(False)):
     label: str | None


-class FacetSpec(TypedDict, total=(False)):
+class FacetSpec(TypedDict, total=False):
+
     variables: dict[str, VariableSpec]
     structure: dict[str, list[str]]
     wrap: int | None


-class PairSpec(TypedDict, total=(False)):
+class PairSpec(TypedDict, total=False):
+
     variables: dict[str, VariableSpec]
     structure: dict[str, list[str]]
     cross: bool
     wrap: int | None


+# --- Local helpers ---------------------------------------------------------------- #
+
+
 @contextmanager
-def theme_context(params: dict[str, Any]) ->Generator:
+def theme_context(params: dict[str, Any]) -> Generator:
     """Temporarily modify specifc matplotlib rcParams."""
-    pass
+    orig_params = {k: mpl.rcParams[k] for k in params}
+    color_codes = "bgrmyck"
+    nice_colors = [*color_palette("deep6"), (.15, .15, .15)]
+    orig_colors = [mpl.colors.colorConverter.colors[x] for x in color_codes]
+    # TODO how to allow this to reflect the color cycle when relevant?
+    try:
+        mpl.rcParams.update(params)
+        for (code, color) in zip(color_codes, nice_colors):
+            mpl.colors.colorConverter.colors[code] = color
+        yield
+    finally:
+        mpl.rcParams.update(orig_params)
+        for (code, color) in zip(color_codes, orig_colors):
+            mpl.colors.colorConverter.colors[code] = color


 def build_plot_signature(cls):
@@ -81,50 +115,113 @@ def build_plot_signature(cls):
     at which point dynamic signature generation would become more important.

     """
-    pass
+    sig = inspect.signature(cls)
+    params = [
+        inspect.Parameter("args", inspect.Parameter.VAR_POSITIONAL),
+        inspect.Parameter("data", inspect.Parameter.KEYWORD_ONLY, default=None)
+    ]
+    params.extend([
+        inspect.Parameter(name, inspect.Parameter.KEYWORD_ONLY, default=None)
+        for name in PROPERTIES
+    ])
+    new_sig = sig.replace(parameters=params)
+    cls.__signature__ = new_sig
+
+    known_properties = textwrap.fill(
+        ", ".join([f"|{p}|" for p in PROPERTIES]),
+        width=78, subsequent_indent=" " * 8,
+    )
+
+    if cls.__doc__ is not None:  # support python -OO mode
+        cls.__doc__ = cls.__doc__.format(known_properties=known_properties)
+
+    return cls
+
+
+# ---- Plot configuration ---------------------------------------------------------- #


 class ThemeConfig(mpl.RcParams):
     """
     Configuration object for the Plot.theme, using matplotlib rc parameters.
     """
-    THEME_GROUPS = ['axes', 'figure', 'font', 'grid', 'hatch', 'legend',
-        'lines', 'mathtext', 'markers', 'patch', 'savefig', 'scatter',
-        'xaxis', 'xtick', 'yaxis', 'ytick']
+    THEME_GROUPS = [
+        "axes", "figure", "font", "grid", "hatch", "legend", "lines",
+        "mathtext", "markers", "patch", "savefig", "scatter",
+        "xaxis", "xtick", "yaxis", "ytick",
+    ]

     def __init__(self):
         super().__init__()
         self.reset()

-    def reset(self) ->None:
+    @property
+    def _default(self) -> dict[str, Any]:
+
+        return {
+            **self._filter_params(mpl.rcParamsDefault),
+            **axes_style("darkgrid"),
+            **plotting_context("notebook"),
+            "axes.prop_cycle": cycler("color", color_palette("deep")),
+        }
+
+    def reset(self) -> None:
         """Update the theme dictionary with seaborn's default values."""
-        pass
+        self.update(self._default)

-    def update(self, other: (dict[str, Any] | None)=None, /, **kwds):
+    def update(self, other: dict[str, Any] | None = None, /, **kwds):
         """Update the theme with a dictionary or keyword arguments of rc parameters."""
-        pass
-
-    def _filter_params(self, params: dict[str, Any]) ->dict[str, Any]:
+        if other is not None:
+            theme = self._filter_params(other)
+        else:
+            theme = {}
+        theme.update(kwds)
+        super().update(theme)
+
+    def _filter_params(self, params: dict[str, Any]) -> dict[str, Any]:
         """Restruct to thematic rc params."""
-        pass
+        return {
+            k: v for k, v in params.items()
+            if any(k.startswith(p) for p in self.THEME_GROUPS)
+        }
+
+    def _html_table(self, params: dict[str, Any]) -> list[str]:
+
+        lines = ["<table>"]
+        for k, v in params.items():
+            row = f"<tr><td>{k}:</td><td style='text-align:left'>{v!r}</td></tr>"
+            lines.append(row)
+        lines.append("</table>")
+        return lines
+
+    def _repr_html_(self) -> str:
+
+        repr = [
+            "<div style='height: 300px'>",
+            "<div style='border-style: inset; border-width: 2px'>",
+            *self._html_table(self),
+            "</div>",
+            "</div>",
+        ]
+        return "\n".join(repr)


 class DisplayConfig(TypedDict):
     """Configuration for IPython's rich display hooks."""
-    format: Literal['png', 'svg']
+    format: Literal["png", "svg"]
     scaling: float
     hidpi: bool


 class PlotConfig:
     """Configuration for default behavior / appearance of class:`Plot` instances."""
-
     def __init__(self):
+
         self._theme = ThemeConfig()
-        self._display = {'format': 'png', 'scaling': 0.85, 'hidpi': True}
+        self._display = {"format": "png", "scaling": .85, "hidpi": True}

     @property
-    def theme(self) ->dict[str, Any]:
+    def theme(self) -> dict[str, Any]:
         """
         Dictionary of base theme parameters for :class:`Plot`.

@@ -132,10 +229,10 @@ class PlotConfig:
         https://matplotlib.org/stable/tutorials/introductory/customizing.html

         """
-        pass
+        return self._theme

     @property
-    def display(self) ->DisplayConfig:
+    def display(self) -> DisplayConfig:
         """
         Dictionary of parameters for rich display in Jupyter notebook.

@@ -146,7 +243,10 @@ class PlotConfig:
         - hidpi (bool): When True, double the DPI while preserving the size

         """
-        pass
+        return self._display
+
+
+# ---- The main interface for declarative plotting --------------------------------- #


 @build_plot_signature
@@ -185,61 +285,161 @@ class Plot:

     """
     config = PlotConfig()
+
     _data: PlotData
     _layers: list[Layer]
+
     _scales: dict[str, Scale]
     _shares: dict[str, bool | str]
     _limits: dict[str, tuple[Any, Any]]
     _labels: dict[str, str | Callable[[str], str]]
     _theme: dict[str, Any]
+
     _facet_spec: FacetSpec
     _pair_spec: PairSpec
+
     _figure_spec: dict[str, Any]
     _subplot_spec: dict[str, Any]
     _layout_spec: dict[str, Any]

-    def __init__(self, *args: (DataSource | VariableSpec), data: DataSource
-        =None, **variables: VariableSpec):
+    def __init__(
+        self,
+        *args: DataSource | VariableSpec,
+        data: DataSource = None,
+        **variables: VariableSpec,
+    ):
+
         if args:
             data, variables = self._resolve_positionals(args, data, variables)
+
         unknown = [x for x in variables if x not in PROPERTIES]
         if unknown:
-            err = (
-                f"Plot() got unexpected keyword argument(s): {', '.join(unknown)}"
-                )
+            err = f"Plot() got unexpected keyword argument(s): {', '.join(unknown)}"
             raise TypeError(err)
+
         self._data = PlotData(data, variables)
+
         self._layers = []
+
         self._scales = {}
         self._shares = {}
         self._limits = {}
         self._labels = {}
         self._theme = {}
+
         self._facet_spec = {}
         self._pair_spec = {}
+
         self._figure_spec = {}
         self._subplot_spec = {}
         self._layout_spec = {}
+
         self._target = None

-    def _resolve_positionals(self, args: tuple[DataSource | VariableSpec,
-        ...], data: DataSource, variables: dict[str, VariableSpec]) ->tuple[
-        DataSource, dict[str, VariableSpec]]:
+    def _resolve_positionals(
+        self,
+        args: tuple[DataSource | VariableSpec, ...],
+        data: DataSource,
+        variables: dict[str, VariableSpec],
+    ) -> tuple[DataSource, dict[str, VariableSpec]]:
         """Handle positional arguments, which may contain data / x / y."""
-        pass
+        if len(args) > 3:
+            err = "Plot() accepts no more than 3 positional arguments (data, x, y)."
+            raise TypeError(err)
+
+        if (
+            isinstance(args[0], (abc.Mapping, pd.DataFrame))
+            or hasattr(args[0], "__dataframe__")
+        ):
+            if data is not None:
+                raise TypeError("`data` given by both name and position.")
+            data, args = args[0], args[1:]
+
+        if len(args) == 2:
+            x, y = args
+        elif len(args) == 1:
+            x, y = *args, None
+        else:
+            x = y = None
+
+        for name, var in zip("yx", (y, x)):
+            if var is not None:
+                if name in variables:
+                    raise TypeError(f"`{name}` given by both name and position.")
+                # Keep coordinates at the front of the variables dict
+                # Cast type because we know this isn't a DataSource at this point
+                variables = {name: cast(VariableSpec, var), **variables}
+
+        return data, variables

     def __add__(self, other):
+
         if isinstance(other, Mark) or isinstance(other, Stat):
             raise TypeError("Sorry, this isn't ggplot! Perhaps try Plot.add?")
+
         other_type = other.__class__.__name__
-        raise TypeError(
-            f"Unsupported operand type(s) for +: 'Plot' and '{other_type}")
+        raise TypeError(f"Unsupported operand type(s) for +: 'Plot' and '{other_type}")
+
+    def _repr_png_(self) -> tuple[bytes, dict[str, float]] | None:
+
+        if Plot.config.display["format"] != "png":
+            return None
+        return self.plot()._repr_png_()
+
+    def _repr_svg_(self) -> str | None:
+
+        if Plot.config.display["format"] != "svg":
+            return None
+        return self.plot()._repr_svg_()

-    def _clone(self) ->Plot:
+    def _clone(self) -> Plot:
         """Generate a new object with the same information as the current spec."""
-        pass
+        new = Plot()
+
+        # TODO any way to enforce that data does not get mutated?
+        new._data = self._data
+
+        new._layers.extend(self._layers)
+
+        new._scales.update(self._scales)
+        new._shares.update(self._shares)
+        new._limits.update(self._limits)
+        new._labels.update(self._labels)
+        new._theme.update(self._theme)
+
+        new._facet_spec.update(self._facet_spec)
+        new._pair_spec.update(self._pair_spec)
+
+        new._figure_spec.update(self._figure_spec)
+        new._subplot_spec.update(self._subplot_spec)
+        new._layout_spec.update(self._layout_spec)
+
+        new._target = self._target
+
+        return new

-    def on(self, target: (Axes | SubFigure | Figure)) ->Plot:
+    def _theme_with_defaults(self) -> dict[str, Any]:
+
+        theme = self.config.theme.copy()
+        theme.update(self._theme)
+        return theme
+
+    @property
+    def _variables(self) -> list[str]:
+
+        variables = (
+            list(self._data.frame)
+            + list(self._pair_spec.get("variables", []))
+            + list(self._facet_spec.get("variables", []))
+        )
+        for layer in self._layers:
+            variables.extend(v for v in layer["vars"] if v not in variables)
+
+        # Coerce to str in return to appease mypy; we know these will only
+        # ever be strings but I don't think we can type a DataFrame that way yet
+        return [str(v) for v in variables]
+
+    def on(self, target: Axes | SubFigure | Figure) -> Plot:
         """
         Provide existing Matplotlib figure or axes for drawing the plot.

@@ -261,11 +461,36 @@ class Plot:
         .. include:: ../docstrings/objects.Plot.on.rst

         """
-        pass
+        accepted_types: tuple  # Allow tuple of various length
+        accepted_types = (
+            mpl.axes.Axes, mpl.figure.SubFigure, mpl.figure.Figure
+        )
+        accepted_types_str = (
+            f"{mpl.axes.Axes}, {mpl.figure.SubFigure}, or {mpl.figure.Figure}"
+        )
+
+        if not isinstance(target, accepted_types):
+            err = (
+                f"The `Plot.on` target must be an instance of {accepted_types_str}. "
+                f"You passed an instance of {target.__class__} instead."
+            )
+            raise TypeError(err)

-    def add(self, mark: Mark, *transforms: (Stat | Move), orient: (str |
-        None)=None, legend: bool=True, label: (str | None)=None, data:
-        DataSource=None, **variables: VariableSpec) ->Plot:
+        new = self._clone()
+        new._target = target
+
+        return new
+
+    def add(
+        self,
+        mark: Mark,
+        *transforms: Stat | Move,
+        orient: str | None = None,
+        legend: bool = True,
+        label: str | None = None,
+        data: DataSource = None,
+        **variables: VariableSpec,
+    ) -> Plot:
         """
         Specify a layer of the visualization in terms of mark and data transform(s).

@@ -302,10 +527,60 @@ class Plot:
         .. include:: ../docstrings/objects.Plot.add.rst

         """
-        pass
-
-    def pair(self, x: VariableSpecList=None, y: VariableSpecList=None, wrap:
-        (int | None)=None, cross: bool=True) ->Plot:
+        if not isinstance(mark, Mark):
+            msg = f"mark must be a Mark instance, not {type(mark)!r}."
+            raise TypeError(msg)
+
+        # TODO This API for transforms was a late decision, and previously Plot.add
+        # accepted 0 or 1 Stat instances and 0, 1, or a list of Move instances.
+        # It will take some work to refactor the internals so that Stat and Move are
+        # treated identically, and until then well need to "unpack" the transforms
+        # here and enforce limitations on the order / types.
+
+        stat: Optional[Stat]
+        move: Optional[List[Move]]
+        error = False
+        if not transforms:
+            stat, move = None, None
+        elif isinstance(transforms[0], Stat):
+            stat = transforms[0]
+            move = [m for m in transforms[1:] if isinstance(m, Move)]
+            error = len(move) != len(transforms) - 1
+        else:
+            stat = None
+            move = [m for m in transforms if isinstance(m, Move)]
+            error = len(move) != len(transforms)
+
+        if error:
+            msg = " ".join([
+                "Transforms must have at most one Stat type (in the first position),",
+                "and all others must be a Move type. Given transform type(s):",
+                ", ".join(str(type(t).__name__) for t in transforms) + "."
+            ])
+            raise TypeError(msg)
+
+        new = self._clone()
+        new._layers.append({
+            "mark": mark,
+            "stat": stat,
+            "move": move,
+            # TODO it doesn't work to supply scalars to variables, but it should
+            "vars": variables,
+            "source": data,
+            "legend": legend,
+            "label": label,
+            "orient": {"v": "x", "h": "y"}.get(orient, orient),  # type: ignore
+        })
+
+        return new
+
+    def pair(
+        self,
+        x: VariableSpecList = None,
+        y: VariableSpecList = None,
+        wrap: int | None = None,
+        cross: bool = True,
+    ) -> Plot:
         """
         Produce subplots by pairing multiple `x` and/or `y` variables.

@@ -326,11 +601,49 @@ class Plot:
         .. include:: ../docstrings/objects.Plot.pair.rst

         """
-        pass
-
-    def facet(self, col: VariableSpec=None, row: VariableSpec=None, order:
-        (OrderSpec | dict[str, OrderSpec])=None, wrap: (int | None)=None
-        ) ->Plot:
+        # TODO Add transpose= arg, which would then draw pair(y=[...]) across rows
+        # This may also be possible by setting `wrap=1`, but is that too unobvious?
+        # TODO PairGrid features not currently implemented: diagonals, corner
+
+        pair_spec: PairSpec = {}
+
+        axes = {"x": [] if x is None else x, "y": [] if y is None else y}
+        for axis, arg in axes.items():
+            if isinstance(arg, (str, int)):
+                err = f"You must pass a sequence of variable keys to `{axis}`"
+                raise TypeError(err)
+
+        pair_spec["variables"] = {}
+        pair_spec["structure"] = {}
+
+        for axis in "xy":
+            keys = []
+            for i, col in enumerate(axes[axis]):
+                key = f"{axis}{i}"
+                keys.append(key)
+                pair_spec["variables"][key] = col
+
+            if keys:
+                pair_spec["structure"][axis] = keys
+
+        if not cross and len(axes["x"]) != len(axes["y"]):
+            err = "Lengths of the `x` and `y` lists must match with cross=False"
+            raise ValueError(err)
+
+        pair_spec["cross"] = cross
+        pair_spec["wrap"] = wrap
+
+        new = self._clone()
+        new._pair_spec.update(pair_spec)
+        return new
+
+    def facet(
+        self,
+        col: VariableSpec = None,
+        row: VariableSpec = None,
+        order: OrderSpec | dict[str, OrderSpec] = None,
+        wrap: int | None = None,
+    ) -> Plot:
         """
         Produce subplots with conditional subsets of the data.

@@ -350,9 +663,44 @@ class Plot:
         .. include:: ../docstrings/objects.Plot.facet.rst

         """
-        pass
-
-    def scale(self, **scales: Scale) ->Plot:
+        variables: dict[str, VariableSpec] = {}
+        if col is not None:
+            variables["col"] = col
+        if row is not None:
+            variables["row"] = row
+
+        structure = {}
+        if isinstance(order, dict):
+            for dim in ["col", "row"]:
+                dim_order = order.get(dim)
+                if dim_order is not None:
+                    structure[dim] = list(dim_order)
+        elif order is not None:
+            if col is not None and row is not None:
+                err = " ".join([
+                    "When faceting on both col= and row=, passing `order` as a list"
+                    "is ambiguous. Use a dict with 'col' and/or 'row' keys instead."
+                ])
+                raise RuntimeError(err)
+            elif col is not None:
+                structure["col"] = list(order)
+            elif row is not None:
+                structure["row"] = list(order)
+
+        spec: FacetSpec = {
+            "variables": variables,
+            "structure": structure,
+            "wrap": wrap,
+        }
+
+        new = self._clone()
+        new._facet_spec.update(spec)
+
+        return new
+
+    # TODO def twin()?
+
+    def scale(self, **scales: Scale) -> Plot:
         """
         Specify mappings from data units to visual properties.

@@ -375,9 +723,11 @@ class Plot:
         .. include:: ../docstrings/objects.Plot.scale.rst

         """
-        pass
+        new = self._clone()
+        new._scales.update(scales)
+        return new

-    def share(self, **shares: (bool | str)) ->Plot:
+    def share(self, **shares: bool | str) -> Plot:
         """
         Control sharing of axis limits and ticks across subplots.

@@ -392,9 +742,11 @@ class Plot:
         .. include:: ../docstrings/objects.Plot.share.rst

         """
-        pass
+        new = self._clone()
+        new._shares.update(shares)
+        return new

-    def limit(self, **limits: tuple[Any, Any]) ->Plot:
+    def limit(self, **limits: tuple[Any, Any]) -> Plot:
         """
         Control the range of visible data.

@@ -411,10 +763,16 @@ class Plot:
         .. include:: ../docstrings/objects.Plot.limit.rst

         """
-        pass
-
-    def label(self, *, title: (str | None)=None, legend: (str | None)=None,
-        **variables: (str | Callable[[str], str])) ->Plot:
+        new = self._clone()
+        new._limits.update(limits)
+        return new
+
+    def label(
+        self, *,
+        title: str | None = None,
+        legend: str | None = None,
+        **variables: str | Callable[[str], str]
+    ) -> Plot:
         """
         Control the labels and titles for axes, legends, and subplots.

@@ -440,11 +798,21 @@ class Plot:


         """
-        pass
-
-    def layout(self, *, size: (tuple[float, float] | Default)=default,
-        engine: (str | None | Default)=default, extent: (tuple[float, float,
-        float, float] | Default)=default) ->Plot:
+        new = self._clone()
+        if title is not None:
+            new._labels["title"] = title
+        if legend is not None:
+            new._labels["legend"] = legend
+        new._labels.update(variables)
+        return new
+
+    def layout(
+        self,
+        *,
+        size: tuple[float, float] | Default = default,
+        engine: str | None | Default = default,
+        extent: tuple[float, float, float, float] | Default = default,
+    ) -> Plot:
         """
         Control the figure size and layout.

@@ -473,9 +841,25 @@ class Plot:
         .. include:: ../docstrings/objects.Plot.layout.rst

         """
-        pass
+        # TODO add an "auto" mode for figsize that roughly scales with the rcParams
+        # figsize (so that works), but expands to prevent subplots from being squished
+        # Also should we have height=, aspect=, exclusive with figsize? Or working
+        # with figsize when only one is defined?
+
+        new = self._clone()
+
+        if size is not default:
+            new._figure_spec["figsize"] = size
+        if engine is not default:
+            new._layout_spec["engine"] = engine
+        if extent is not default:
+            new._layout_spec["extent"] = extent

-    def theme(self, config: dict[str, Any], /) ->Plot:
+        return new
+
+    # TODO def legend (ugh)
+
+    def theme(self, config: dict[str, Any], /) -> Plot:
         """
         Control the appearance of elements in the plot.

@@ -495,9 +879,14 @@ class Plot:
         .. include:: ../docstrings/objects.Plot.theme.rst

         """
-        pass
+        new = self._clone()
+
+        rc = mpl.RcParams(config)
+        new._theme.update(rc)
+
+        return new

-    def save(self, loc, **kwargs) ->Plot:
+    def save(self, loc, **kwargs) -> Plot:
         """
         Compile the plot and write it to a buffer or file on disk.

@@ -510,9 +899,12 @@ class Plot:
             :meth:`matplotlib.figure.Figure.savefig`.

         """
-        pass
+        # TODO expose important keyword arguments in our signature?
+        with theme_context(self._theme_with_defaults()):
+            self._plot().save(loc, **kwargs)
+        return self

-    def show(self, **kwargs) ->None:
+    def show(self, **kwargs) -> None:
         """
         Compile the plot and display it by hooking into pyplot.

@@ -524,13 +916,59 @@ class Plot:
         the last method you call when specifying a plot.

         """
-        pass
+        # TODO make pyplot configurable at the class level, and when not using,
+        # import IPython.display and call on self to populate cell output?

-    def plot(self, pyplot: bool=False) ->Plotter:
+        # Keep an eye on whether matplotlib implements "attaching" an existing
+        # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024
+
+        self.plot(pyplot=True).show(**kwargs)
+
+    def plot(self, pyplot: bool = False) -> Plotter:
         """
         Compile the plot spec and return the Plotter object.
         """
-        pass
+        with theme_context(self._theme_with_defaults()):
+            return self._plot(pyplot)
+
+    def _plot(self, pyplot: bool = False) -> Plotter:
+
+        # TODO if we have _target object, pyplot should be determined by whether it
+        # is hooked into the pyplot state machine (how do we check?)
+
+        plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())
+
+        # Process the variable assignments and initialize the figure
+        common, layers = plotter._extract_data(self)
+        plotter._setup_figure(self, common, layers)
+
+        # Process the scale spec for coordinate variables and transform their data
+        coord_vars = [v for v in self._variables if re.match(r"^x|y", v)]
+        plotter._setup_scales(self, common, layers, coord_vars)
+
+        # Apply statistical transform(s)
+        plotter._compute_stats(self, layers)
+
+        # Process scale spec for semantic variables and coordinates computed by stat
+        plotter._setup_scales(self, common, layers)
+
+        # TODO Remove these after updating other methods
+        # ---- Maybe have debug= param that attaches these when True?
+        plotter._data = common
+        plotter._layers = layers
+
+        # Process the data for each layer and add matplotlib artists
+        for layer in layers:
+            plotter._plot_layer(self, layer)
+
+        # Add various figure decorations
+        plotter._make_legend(self)
+        plotter._finalize_figure(self)
+
+        return plotter
+
+
+# ---- The plot compilation engine ---------------------------------------------- #


 class Plotter:
@@ -540,31 +978,853 @@ class Plotter:
     This class is not intended to be instantiated directly by users.

     """
+    # TODO decide if we ever want these (Plot.plot(debug=True))?
     _data: PlotData
     _layers: list[Layer]
     _figure: Figure

     def __init__(self, pyplot: bool, theme: dict[str, Any]):
+
         self._pyplot = pyplot
         self._theme = theme
-        self._legend_contents: list[tuple[tuple[str, str | int], list[
-            Artist], list[str]]] = []
+        self._legend_contents: list[tuple[
+            tuple[str, str | int], list[Artist], list[str],
+        ]] = []
         self._scales: dict[str, Scale] = {}

-    def show(self, **kwargs) ->None:
+    def save(self, loc, **kwargs) -> Plotter:  # TODO type args
+        kwargs.setdefault("dpi", 96)
+        try:
+            loc = os.path.expanduser(loc)
+        except TypeError:
+            # loc may be a buffer in which case that would not work
+            pass
+        self._figure.savefig(loc, **kwargs)
+        return self
+
+    def show(self, **kwargs) -> None:
         """
         Display the plot by hooking into pyplot.

         This method calls :func:`matplotlib.pyplot.show` with any keyword parameters.

         """
-        pass
+        # TODO if we did not create the Plotter with pyplot, is it possible to do this?
+        # If not we should clearly raise.
+        import matplotlib.pyplot as plt
+        with theme_context(self._theme):
+            plt.show(**kwargs)

-    def _update_legend_contents(self, p: Plot, mark: Mark, data: PlotData,
-        scales: dict[str, Scale], layer_label: (str | None)) ->None:
-        """Add legend artists / labels for one layer in the plot."""
-        pass
+    # TODO API for accessing the underlying matplotlib objects
+    # TODO what else is useful in the public API for this class?
+
+    def _repr_png_(self) -> tuple[bytes, dict[str, float]] | None:
+
+        # TODO use matplotlib backend directly instead of going through savefig?
+
+        # TODO perhaps have self.show() flip a switch to disable this, so that
+        # user does not end up with two versions of the figure in the output
+
+        # TODO use bbox_inches="tight" like the inline backend?
+        # pro: better results,  con: (sometimes) confusing results
+        # Better solution would be to default (with option to change)
+        # to using constrained/tight layout.
+
+        if Plot.config.display["format"] != "png":
+            return None
+
+        buffer = io.BytesIO()
+
+        factor = 2 if Plot.config.display["hidpi"] else 1
+        scaling = Plot.config.display["scaling"] / factor
+        dpi = 96 * factor  # TODO put dpi in Plot.config?
+
+        with theme_context(self._theme):  # TODO _theme_with_defaults?
+            self._figure.savefig(buffer, dpi=dpi, format="png", bbox_inches="tight")
+        data = buffer.getvalue()
+
+        w, h = Image.open(buffer).size
+        metadata = {"width": w * scaling, "height": h * scaling}
+        return data, metadata

-    def _make_legend(self, p: Plot) ->None:
+    def _repr_svg_(self) -> str | None:
+
+        if Plot.config.display["format"] != "svg":
+            return None
+
+        # TODO DPI for rasterized artists?
+
+        scaling = Plot.config.display["scaling"]
+
+        buffer = io.StringIO()
+        with theme_context(self._theme):  # TODO _theme_with_defaults?
+            self._figure.savefig(buffer, format="svg", bbox_inches="tight")
+
+        root = ElementTree.fromstring(buffer.getvalue())
+        w = scaling * float(root.attrib["width"][:-2])
+        h = scaling * float(root.attrib["height"][:-2])
+        root.attrib.update(width=f"{w}pt", height=f"{h}pt", viewbox=f"0 0 {w} {h}")
+        ElementTree.ElementTree(root).write(out := io.BytesIO())
+
+        return out.getvalue().decode()
+
+    def _extract_data(self, p: Plot) -> tuple[PlotData, list[Layer]]:
+
+        common_data = (
+            p._data
+            .join(None, p._facet_spec.get("variables"))
+            .join(None, p._pair_spec.get("variables"))
+        )
+
+        layers: list[Layer] = []
+        for layer in p._layers:
+            spec = layer.copy()
+            spec["data"] = common_data.join(layer.get("source"), layer.get("vars"))
+            layers.append(spec)
+
+        return common_data, layers
+
+    def _resolve_label(self, p: Plot, var: str, auto_label: str | None) -> str:
+
+        if re.match(r"[xy]\d+", var):
+            key = var if var in p._labels else var[0]
+        else:
+            key = var
+
+        label: str
+        if key in p._labels:
+            manual_label = p._labels[key]
+            if callable(manual_label) and auto_label is not None:
+                label = manual_label(auto_label)
+            else:
+                label = cast(str, manual_label)
+        elif auto_label is None:
+            label = ""
+        else:
+            label = auto_label
+        return label
+
+    def _setup_figure(self, p: Plot, common: PlotData, layers: list[Layer]) -> None:
+
+        # --- Parsing the faceting/pairing parameterization to specify figure grid
+
+        subplot_spec = p._subplot_spec.copy()
+        facet_spec = p._facet_spec.copy()
+        pair_spec = p._pair_spec.copy()
+
+        for axis in "xy":
+            if axis in p._shares:
+                subplot_spec[f"share{axis}"] = p._shares[axis]
+
+        for dim in ["col", "row"]:
+            if dim in common.frame and dim not in facet_spec["structure"]:
+                order = categorical_order(common.frame[dim])
+                facet_spec["structure"][dim] = order
+
+        self._subplots = subplots = Subplots(subplot_spec, facet_spec, pair_spec)
+
+        # --- Figure initialization
+        self._figure = subplots.init_figure(
+            pair_spec, self._pyplot, p._figure_spec, p._target,
+        )
+
+        # --- Figure annotation
+        for sub in subplots:
+            ax = sub["ax"]
+            for axis in "xy":
+                axis_key = sub[axis]
+
+                # ~~ Axis labels
+
+                # TODO Should we make it possible to use only one x/y label for
+                # all rows/columns in a faceted plot? Maybe using sub{axis}label,
+                # although the alignments of the labels from that method leaves
+                # something to be desired (in terms of how it defines 'centered').
+                names = [
+                    common.names.get(axis_key),
+                    *(layer["data"].names.get(axis_key) for layer in layers)
+                ]
+                auto_label = next((name for name in names if name is not None), None)
+                label = self._resolve_label(p, axis_key, auto_label)
+                ax.set(**{f"{axis}label": label})
+
+                # ~~ Decoration visibility
+
+                # TODO there should be some override (in Plot.layout?) so that
+                # axis / tick labels can be shown on interior shared axes if desired
+
+                axis_obj = getattr(ax, f"{axis}axis")
+                visible_side = {"x": "bottom", "y": "left"}.get(axis)
+                show_axis_label = (
+                    sub[visible_side]
+                    or not p._pair_spec.get("cross", True)
+                    or (
+                        axis in p._pair_spec.get("structure", {})
+                        and bool(p._pair_spec.get("wrap"))
+                    )
+                )
+                axis_obj.get_label().set_visible(show_axis_label)
+
+                show_tick_labels = (
+                    show_axis_label
+                    or subplot_spec.get(f"share{axis}") not in (
+                        True, "all", {"x": "col", "y": "row"}[axis]
+                    )
+                )
+                for group in ("major", "minor"):
+                    side = {"x": "bottom", "y": "left"}[axis]
+                    axis_obj.set_tick_params(**{f"label{side}": show_tick_labels})
+                    for t in getattr(axis_obj, f"get_{group}ticklabels")():
+                        t.set_visible(show_tick_labels)
+
+            # TODO we want right-side titles for row facets in most cases?
+            # Let's have what we currently call "margin titles" but properly using the
+            # ax.set_title interface (see my gist)
+            title_parts = []
+            for dim in ["col", "row"]:
+                if sub[dim] is not None:
+                    val = self._resolve_label(p, "title", f"{sub[dim]}")
+                    if dim in p._labels:
+                        key = self._resolve_label(p, dim, common.names.get(dim))
+                        val = f"{key} {val}"
+                    title_parts.append(val)
+
+            has_col = sub["col"] is not None
+            has_row = sub["row"] is not None
+            show_title = (
+                has_col and has_row
+                or (has_col or has_row) and p._facet_spec.get("wrap")
+                or (has_col and sub["top"])
+                # TODO or has_row and sub["right"] and <right titles>
+                or has_row  # TODO and not <right titles>
+            )
+            if title_parts:
+                title = " | ".join(title_parts)
+                title_text = ax.set_title(title)
+                title_text.set_visible(show_title)
+            elif not (has_col or has_row):
+                title = self._resolve_label(p, "title", None)
+                title_text = ax.set_title(title)
+
+    def _compute_stats(self, spec: Plot, layers: list[Layer]) -> None:
+
+        grouping_vars = [v for v in PROPERTIES if v not in "xy"]
+        grouping_vars += ["col", "row", "group"]
+
+        pair_vars = spec._pair_spec.get("structure", {})
+
+        for layer in layers:
+
+            data = layer["data"]
+            mark = layer["mark"]
+            stat = layer["stat"]
+
+            if stat is None:
+                continue
+
+            iter_axes = itertools.product(*[
+                pair_vars.get(axis, [axis]) for axis in "xy"
+            ])
+
+            old = data.frame
+
+            if pair_vars:
+                data.frames = {}
+                data.frame = data.frame.iloc[:0]  # TODO to simplify typing
+
+            for coord_vars in iter_axes:
+
+                pairings = "xy", coord_vars
+
+                df = old.copy()
+                scales = self._scales.copy()
+
+                for axis, var in zip(*pairings):
+                    if axis != var:
+                        df = df.rename(columns={var: axis})
+                        drop_cols = [x for x in df if re.match(rf"{axis}\d+", str(x))]
+                        df = df.drop(drop_cols, axis=1)
+                        scales[axis] = scales[var]
+
+                orient = layer["orient"] or mark._infer_orient(scales)
+
+                if stat.group_by_orient:
+                    grouper = [orient, *grouping_vars]
+                else:
+                    grouper = grouping_vars
+                groupby = GroupBy(grouper)
+                res = stat(df, groupby, orient, scales)
+
+                if pair_vars:
+                    data.frames[coord_vars] = res
+                else:
+                    data.frame = res
+
+    def _get_scale(
+        self, p: Plot, var: str, prop: Property, values: Series
+    ) -> Scale:
+
+        if re.match(r"[xy]\d+", var):
+            key = var if var in p._scales else var[0]
+        else:
+            key = var
+
+        if key in p._scales:
+            arg = p._scales[key]
+            if arg is None or isinstance(arg, Scale):
+                scale = arg
+            else:
+                scale = prop.infer_scale(arg, values)
+        else:
+            scale = prop.default_scale(values)
+
+        return scale
+
+    def _get_subplot_data(self, df, var, view, share_state):
+
+        if share_state in [True, "all"]:
+            # The all-shared case is easiest, every subplot sees all the data
+            seed_values = df[var]
+        else:
+            # Otherwise, we need to setup separate scales for different subplots
+            if share_state in [False, "none"]:
+                # Fully independent axes are also easy: use each subplot's data
+                idx = self._get_subplot_index(df, view)
+            elif share_state in df:
+                # Sharing within row/col is more complicated
+                use_rows = df[share_state] == view[share_state]
+                idx = df.index[use_rows]
+            else:
+                # This configuration doesn't make much sense, but it's fine
+                idx = df.index
+
+            seed_values = df.loc[idx, var]
+
+        return seed_values
+
+    def _setup_scales(
+        self,
+        p: Plot,
+        common: PlotData,
+        layers: list[Layer],
+        variables: list[str] | None = None,
+    ) -> None:
+
+        if variables is None:
+            # Add variables that have data but not a scale, which happens
+            # because this method can be called multiple time, to handle
+            # variables added during the Stat transform.
+            variables = []
+            for layer in layers:
+                variables.extend(layer["data"].frame.columns)
+                for df in layer["data"].frames.values():
+                    variables.extend(str(v) for v in df if v not in variables)
+            variables = [v for v in variables if v not in self._scales]
+
+        for var in variables:
+
+            # Determine whether this is a coordinate variable
+            # (i.e., x/y, paired x/y, or derivative such as xmax)
+            m = re.match(r"^(?P<coord>(?P<axis>x|y)\d*).*", var)
+            if m is None:
+                coord = axis = None
+            else:
+                coord = m["coord"]
+                axis = m["axis"]
+
+            # Get keys that handle things like x0, xmax, properly where relevant
+            prop_key = var if axis is None else axis
+            scale_key = var if coord is None else coord
+
+            if prop_key not in PROPERTIES:
+                continue
+
+            # Concatenate layers, using only the relevant coordinate and faceting vars,
+            # This is unnecessarily wasteful, as layer data will often be redundant.
+            # But figuring out the minimal amount we need is more complicated.
+            cols = [var, "col", "row"]
+            parts = [common.frame.filter(cols)]
+            for layer in layers:
+                parts.append(layer["data"].frame.filter(cols))
+                for df in layer["data"].frames.values():
+                    parts.append(df.filter(cols))
+            var_df = pd.concat(parts, ignore_index=True)
+
+            prop = PROPERTIES[prop_key]
+            scale = self._get_scale(p, scale_key, prop, var_df[var])
+
+            if scale_key not in p._variables:
+                # TODO this implies that the variable was added by the stat
+                # It allows downstream orientation inference to work properly.
+                # But it feels rather hacky, so ideally revisit.
+                scale._priority = 0  # type: ignore
+
+            if axis is None:
+                # We could think about having a broader concept of (un)shared properties
+                # In general, not something you want to do (different scales in facets)
+                # But could make sense e.g. with paired plots. Build later.
+                share_state = None
+                subplots = []
+            else:
+                share_state = self._subplots.subplot_spec[f"share{axis}"]
+                subplots = [view for view in self._subplots if view[axis] == coord]
+
+            if scale is None:
+                self._scales[var] = Scale._identity()
+            else:
+                try:
+                    self._scales[var] = scale._setup(var_df[var], prop)
+                except Exception as err:
+                    raise PlotSpecError._during("Scale setup", var) from err
+
+            if axis is None or (var != coord and coord in p._variables):
+                # Everything below here applies only to coordinate variables
+                continue
+
+            # Set up an empty series to receive the transformed values.
+            # We need this to handle piecemeal transforms of categories -> floats.
+            transformed_data = []
+            for layer in layers:
+                index = layer["data"].frame.index
+                empty_series = pd.Series(dtype=float, index=index, name=var)
+                transformed_data.append(empty_series)
+
+            for view in subplots:
+
+                axis_obj = getattr(view["ax"], f"{axis}axis")
+                seed_values = self._get_subplot_data(var_df, var, view, share_state)
+                view_scale = scale._setup(seed_values, prop, axis=axis_obj)
+                view["ax"].set(**{f"{axis}scale": view_scale._matplotlib_scale})
+
+                for layer, new_series in zip(layers, transformed_data):
+                    layer_df = layer["data"].frame
+                    if var not in layer_df:
+                        continue
+
+                    idx = self._get_subplot_index(layer_df, view)
+                    try:
+                        new_series.loc[idx] = view_scale(layer_df.loc[idx, var])
+                    except Exception as err:
+                        spec_error = PlotSpecError._during("Scaling operation", var)
+                        raise spec_error from err
+
+            # Now the transformed data series are complete, update the layer data
+            for layer, new_series in zip(layers, transformed_data):
+                layer_df = layer["data"].frame
+                if var in layer_df:
+                    layer_df[var] = pd.to_numeric(new_series)
+
+    def _plot_layer(self, p: Plot, layer: Layer) -> None:
+
+        data = layer["data"]
+        mark = layer["mark"]
+        move = layer["move"]
+
+        default_grouping_vars = ["col", "row", "group"]  # TODO where best to define?
+        grouping_properties = [v for v in PROPERTIES if v[0] not in "xy"]
+
+        pair_variables = p._pair_spec.get("structure", {})
+
+        for subplots, df, scales in self._generate_pairings(data, pair_variables):
+
+            orient = layer["orient"] or mark._infer_orient(scales)
+
+            def get_order(var):
+                # Ignore order for x/y: they have been scaled to numeric indices,
+                # so any original order is no longer valid. Default ordering rules
+                # sorted unique numbers will correctly reconstruct intended order
+                # TODO This is tricky, make sure we add some tests for this
+                if var not in "xy" and var in scales:
+                    return getattr(scales[var], "order", None)
+
+            if orient in df:
+                width = pd.Series(index=df.index, dtype=float)
+                for view in subplots:
+                    view_idx = self._get_subplot_data(
+                        df, orient, view, p._shares.get(orient)
+                    ).index
+                    view_df = df.loc[view_idx]
+                    if "width" in mark._mappable_props:
+                        view_width = mark._resolve(view_df, "width", None)
+                    elif "width" in df:
+                        view_width = view_df["width"]
+                    else:
+                        view_width = 0.8  # TODO what default?
+                    spacing = scales[orient]._spacing(view_df.loc[view_idx, orient])
+                    width.loc[view_idx] = view_width * spacing
+                df["width"] = width
+
+            if "baseline" in mark._mappable_props:
+                # TODO what marks should have this?
+                # If we can set baseline with, e.g., Bar(), then the
+                # "other" (e.g. y for x oriented bars) parameterization
+                # is somewhat ambiguous.
+                baseline = mark._resolve(df, "baseline", None)
+            else:
+                # TODO unlike width, we might not want to add baseline to data
+                # if the mark doesn't use it. Practically, there is a concern about
+                # Mark abstraction like Area / Ribbon
+                baseline = 0 if "baseline" not in df else df["baseline"]
+            df["baseline"] = baseline
+
+            if move is not None:
+                moves = move if isinstance(move, list) else [move]
+                for move_step in moves:
+                    move_by = getattr(move_step, "by", None)
+                    if move_by is None:
+                        move_by = grouping_properties
+                    move_groupers = [*move_by, *default_grouping_vars]
+                    if move_step.group_by_orient:
+                        move_groupers.insert(0, orient)
+                    order = {var: get_order(var) for var in move_groupers}
+                    groupby = GroupBy(order)
+                    df = move_step(df, groupby, orient, scales)
+
+            df = self._unscale_coords(subplots, df, orient)
+
+            grouping_vars = mark._grouping_props + default_grouping_vars
+            split_generator = self._setup_split_generator(grouping_vars, df, subplots)
+
+            mark._plot(split_generator, scales, orient)
+
+        # TODO is this the right place for this?
+        for view in self._subplots:
+            view["ax"].autoscale_view()
+
+        if layer["legend"]:
+            self._update_legend_contents(p, mark, data, scales, layer["label"])
+
+    def _unscale_coords(
+        self, subplots: list[dict], df: DataFrame, orient: str,
+    ) -> DataFrame:
+        # TODO do we still have numbers in the variable name at this point?
+        coord_cols = [c for c in df if re.match(r"^[xy]\D*$", str(c))]
+        out_df = (
+            df
+            .drop(coord_cols, axis=1)
+            .reindex(df.columns, axis=1)  # So unscaled columns retain their place
+            .copy(deep=False)
+        )
+
+        for view in subplots:
+            view_df = self._filter_subplot_data(df, view)
+            axes_df = view_df[coord_cols]
+            for var, values in axes_df.items():
+
+                axis = getattr(view["ax"], f"{str(var)[0]}axis")
+                # TODO see https://github.com/matplotlib/matplotlib/issues/22713
+                transform = axis.get_transform().inverted().transform
+                inverted = transform(values)
+                out_df.loc[values.index, str(var)] = inverted
+
+        return out_df
+
+    def _generate_pairings(
+        self, data: PlotData, pair_variables: dict,
+    ) -> Generator[
+        tuple[list[dict], DataFrame, dict[str, Scale]], None, None
+    ]:
+        # TODO retype return with subplot_spec or similar
+
+        iter_axes = itertools.product(*[
+            pair_variables.get(axis, [axis]) for axis in "xy"
+        ])
+
+        for x, y in iter_axes:
+
+            subplots = []
+            for view in self._subplots:
+                if (view["x"] == x) and (view["y"] == y):
+                    subplots.append(view)
+
+            if data.frame.empty and data.frames:
+                out_df = data.frames[(x, y)].copy()
+            elif not pair_variables:
+                out_df = data.frame.copy()
+            else:
+                if data.frame.empty and data.frames:
+                    out_df = data.frames[(x, y)].copy()
+                else:
+                    out_df = data.frame.copy()
+
+            scales = self._scales.copy()
+            if x in out_df:
+                scales["x"] = self._scales[x]
+            if y in out_df:
+                scales["y"] = self._scales[y]
+
+            for axis, var in zip("xy", (x, y)):
+                if axis != var:
+                    out_df = out_df.rename(columns={var: axis})
+                    cols = [col for col in out_df if re.match(rf"{axis}\d+", str(col))]
+                    out_df = out_df.drop(cols, axis=1)
+
+            yield subplots, out_df, scales
+
+    def _get_subplot_index(self, df: DataFrame, subplot: dict) -> Index:
+
+        dims = df.columns.intersection(["col", "row"])
+        if dims.empty:
+            return df.index
+
+        keep_rows = pd.Series(True, df.index, dtype=bool)
+        for dim in dims:
+            keep_rows &= df[dim] == subplot[dim]
+        return df.index[keep_rows]
+
+    def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:
+        # TODO note redundancies with preceding function ... needs refactoring
+        dims = df.columns.intersection(["col", "row"])
+        if dims.empty:
+            return df
+
+        keep_rows = pd.Series(True, df.index, dtype=bool)
+        for dim in dims:
+            keep_rows &= df[dim] == subplot[dim]
+        return df[keep_rows]
+
+    def _setup_split_generator(
+        self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],
+    ) -> Callable[[], Generator]:
+
+        grouping_keys = []
+        grouping_vars = [
+            v for v in grouping_vars if v in df and v not in ["col", "row"]
+        ]
+        for var in grouping_vars:
+            order = getattr(self._scales[var], "order", None)
+            if order is None:
+                order = categorical_order(df[var])
+            grouping_keys.append(order)
+
+        def split_generator(keep_na=False) -> Generator:
+
+            for view in subplots:
+
+                axes_df = self._filter_subplot_data(df, view)
+
+                axes_df_inf_as_nan = axes_df.copy()
+                axes_df_inf_as_nan = axes_df_inf_as_nan.mask(
+                    axes_df_inf_as_nan.isin([np.inf, -np.inf]), np.nan
+                )
+                if keep_na:
+                    # The simpler thing to do would be x.dropna().reindex(x.index).
+                    # But that doesn't work with the way that the subset iteration
+                    # is written below, which assumes data for grouping vars.
+                    # Matplotlib (usually?) masks nan data, so this should "work".
+                    # Downstream code can also drop these rows, at some speed cost.
+                    present = axes_df_inf_as_nan.notna().all(axis=1)
+                    nulled = {}
+                    for axis in "xy":
+                        if axis in axes_df:
+                            nulled[axis] = axes_df[axis].where(present)
+                    axes_df = axes_df_inf_as_nan.assign(**nulled)
+                else:
+                    axes_df = axes_df_inf_as_nan.dropna()
+
+                subplot_keys = {}
+                for dim in ["col", "row"]:
+                    if view[dim] is not None:
+                        subplot_keys[dim] = view[dim]
+
+                if not grouping_vars or not any(grouping_keys):
+                    if not axes_df.empty:
+                        yield subplot_keys, axes_df.copy(), view["ax"]
+                    continue
+
+                grouped_df = axes_df.groupby(
+                    grouping_vars, sort=False, as_index=False, observed=False,
+                )
+
+                for key in itertools.product(*grouping_keys):
+
+                    pd_key = (
+                        key[0] if len(key) == 1 and _version_predates(pd, "2.2.0")
+                        else key
+                    )
+                    try:
+                        df_subset = grouped_df.get_group(pd_key)
+                    except KeyError:
+                        # TODO (from initial work on categorical plots refactor)
+                        # We are adding this to allow backwards compatability
+                        # with the empty artists that old categorical plots would
+                        # add (before 0.12), which we may decide to break, in which
+                        # case this option could be removed
+                        df_subset = axes_df.loc[[]]
+
+                    if df_subset.empty:
+                        continue
+
+                    sub_vars = dict(zip(grouping_vars, key))
+                    sub_vars.update(subplot_keys)
+
+                    # TODO need copy(deep=...) policy (here, above, anywhere else?)
+                    yield sub_vars, df_subset.copy(), view["ax"]
+
+        return split_generator
+
+    def _update_legend_contents(
+        self,
+        p: Plot,
+        mark: Mark,
+        data: PlotData,
+        scales: dict[str, Scale],
+        layer_label: str | None,
+    ) -> None:
+        """Add legend artists / labels for one layer in the plot."""
+        if data.frame.empty and data.frames:
+            legend_vars: list[str] = []
+            for frame in data.frames.values():
+                frame_vars = frame.columns.intersection(list(scales))
+                legend_vars.extend(v for v in frame_vars if v not in legend_vars)
+        else:
+            legend_vars = list(data.frame.columns.intersection(list(scales)))
+
+        # First handle layer legends, which occupy a single entry in legend_contents.
+        if layer_label is not None:
+            legend_title = str(p._labels.get("legend", ""))
+            layer_key = (legend_title, -1)
+            artist = mark._legend_artist([], None, {})
+            if artist is not None:
+                for content in self._legend_contents:
+                    if content[0] == layer_key:
+                        content[1].append(artist)
+                        content[2].append(layer_label)
+                        break
+                else:
+                    self._legend_contents.append((layer_key, [artist], [layer_label]))
+
+        # Then handle the scale legends
+        # First pass: Identify the values that will be shown for each variable
+        schema: list[tuple[
+            tuple[str, str | int], list[str], tuple[list[Any], list[str]]
+        ]] = []
+        schema = []
+        for var in legend_vars:
+            var_legend = scales[var]._legend
+            if var_legend is not None:
+                values, labels = var_legend
+                for (_, part_id), part_vars, _ in schema:
+                    if data.ids[var] == part_id:
+                        # Allow multiple plot semantics to represent same data variable
+                        part_vars.append(var)
+                        break
+                else:
+                    title = self._resolve_label(p, var, data.names[var])
+                    entry = (title, data.ids[var]), [var], (values, labels)
+                    schema.append(entry)
+
+        # Second pass, generate an artist corresponding to each value
+        contents: list[tuple[tuple[str, str | int], Any, list[str]]] = []
+        for key, variables, (values, labels) in schema:
+            artists = []
+            for val in values:
+                artist = mark._legend_artist(variables, val, scales)
+                if artist is not None:
+                    artists.append(artist)
+            if artists:
+                contents.append((key, artists, labels))
+
+        self._legend_contents.extend(contents)
+
+    def _make_legend(self, p: Plot) -> None:
         """Create the legend artist(s) and add onto the figure."""
-        pass
+        # Combine artists representing same information across layers
+        # Input list has an entry for each distinct variable in each layer
+        # Output dict has an entry for each distinct variable
+        merged_contents: dict[
+            tuple[str, str | int], tuple[list[tuple[Artist, ...]], list[str]],
+        ] = {}
+        for key, new_artists, labels in self._legend_contents:
+            # Key is (name, id); we need the id to resolve variable uniqueness,
+            # but will need the name in the next step to title the legend
+            if key not in merged_contents:
+                # Matplotlib accepts a tuple of artists and will overlay them
+                new_artist_tuples = [tuple([a]) for a in new_artists]
+                merged_contents[key] = new_artist_tuples, labels
+            else:
+                existing_artists = merged_contents[key][0]
+                for i, new_artist in enumerate(new_artists):
+                    existing_artists[i] += tuple([new_artist])
+
+        # When using pyplot, an "external" legend won't be shown, so this
+        # keeps it inside the axes (though still attached to the figure)
+        # This is necessary because matplotlib layout engines currently don't
+        # support figure legends — ideally this will change.
+        loc = "center right" if self._pyplot else "center left"
+
+        base_legend = None
+        for (name, _), (handles, labels) in merged_contents.items():
+
+            legend = mpl.legend.Legend(
+                self._figure,
+                handles,  # type: ignore  # matplotlib/issues/26639
+                labels,
+                title=name,
+                loc=loc,
+                bbox_to_anchor=(.98, .55),
+            )
+
+            if base_legend:
+                # Matplotlib has no public API for this so it is a bit of a hack.
+                # Ideally we'd define our own legend class with more flexibility,
+                # but that is a lot of work!
+                base_legend_box = base_legend.get_children()[0]
+                this_legend_box = legend.get_children()[0]
+                base_legend_box.get_children().extend(this_legend_box.get_children())
+            else:
+                base_legend = legend
+                self._figure.legends.append(legend)
+
+    def _finalize_figure(self, p: Plot) -> None:
+
+        for sub in self._subplots:
+            ax = sub["ax"]
+            for axis in "xy":
+                axis_key = sub[axis]
+                axis_obj = getattr(ax, f"{axis}axis")
+
+                # Axis limits
+                if axis_key in p._limits or axis in p._limits:
+                    convert_units = getattr(ax, f"{axis}axis").convert_units
+                    a, b = p._limits.get(axis_key) or p._limits[axis]
+                    lo = a if a is None else convert_units(a)
+                    hi = b if b is None else convert_units(b)
+                    if isinstance(a, str):
+                        lo = cast(float, lo) - 0.5
+                    if isinstance(b, str):
+                        hi = cast(float, hi) + 0.5
+                    ax.set(**{f"{axis}lim": (lo, hi)})
+
+                if axis_key in self._scales:  # TODO when would it not be?
+                    self._scales[axis_key]._finalize(p, axis_obj)
+
+        if (engine_name := p._layout_spec.get("engine", default)) is not default:
+            # None is a valid arg for Figure.set_layout_engine, hence `default`
+            set_layout_engine(self._figure, engine_name)
+        elif p._target is None:
+            # Don't modify the layout engine if the user supplied their own
+            # matplotlib figure and didn't specify an engine through Plot
+            # TODO switch default to "constrained"?
+            # TODO either way, make configurable
+            set_layout_engine(self._figure, "tight")
+
+        if (extent := p._layout_spec.get("extent")) is not None:
+            engine = get_layout_engine(self._figure)
+            if engine is None:
+                self._figure.subplots_adjust(*extent)
+            else:
+                # Note the different parameterization for the layout engine rect...
+                left, bottom, right, top = extent
+                width, height = right - left, top - bottom
+                try:
+                    # The base LayoutEngine.set method doesn't have rect= so we need
+                    # to avoid typechecking this statement. We also catch a TypeError
+                    # as a plugin LayoutEngine may not support it either.
+                    # Alternatively we could guard this with a check on the engine type,
+                    # but that would make later-developed engines would un-useable.
+                    engine.set(rect=[left, bottom, width, height])  # type: ignore
+                except TypeError:
+                    # Should we warn / raise? Note that we don't expect to get here
+                    # under any normal circumstances.
+                    pass
diff --git a/seaborn/_core/properties.py b/seaborn/_core/properties.py
index 74be300e..4e2df91b 100644
--- a/seaborn/_core/properties.py
+++ b/seaborn/_core/properties.py
@@ -1,6 +1,7 @@
 from __future__ import annotations
 import itertools
 import warnings
+
 import numpy as np
 from numpy.typing import ArrayLike
 from pandas import Series
@@ -8,55 +9,132 @@ import matplotlib as mpl
 from matplotlib.colors import to_rgb, to_rgba, to_rgba_array
 from matplotlib.markers import MarkerStyle
 from matplotlib.path import Path
+
 from seaborn._core.scales import Scale, Boolean, Continuous, Nominal, Temporal
 from seaborn._core.rules import categorical_order, variable_type
 from seaborn.palettes import QUAL_PALETTES, color_palette, blend_palette
 from seaborn.utils import get_color_cycle
+
 from typing import Any, Callable, Tuple, List, Union, Optional
+
 RGBTuple = Tuple[float, float, float]
 RGBATuple = Tuple[float, float, float, float]
 ColorSpec = Union[RGBTuple, RGBATuple, str]
+
 DashPattern = Tuple[float, ...]
 DashPatternWithOffset = Tuple[float, Optional[DashPattern]]
-MarkerPattern = Union[float, str, Tuple[int, int, float], List[Tuple[float,
-    float]], Path, MarkerStyle]
+
+MarkerPattern = Union[
+    float,
+    str,
+    Tuple[int, int, float],
+    List[Tuple[float, float]],
+    Path,
+    MarkerStyle,
+]
+
 Mapping = Callable[[ArrayLike], ArrayLike]


+# =================================================================================== #
+# Base classes
+# =================================================================================== #
+
+
 class Property:
     """Base class for visual properties that can be set directly or be data scaling."""
+
+    # When True, scales for this property will populate the legend by default
     legend = False
+
+    # When True, scales for this property normalize data to [0, 1] before mapping
     normed = False

-    def __init__(self, variable: (str | None)=None):
+    def __init__(self, variable: str | None = None):
         """Initialize the property with the name of the corresponding plot variable."""
         if not variable:
             variable = self.__class__.__name__.lower()
         self.variable = variable

-    def default_scale(self, data: Series) ->Scale:
+    def default_scale(self, data: Series) -> Scale:
         """Given data, initialize appropriate scale class."""
-        pass

-    def infer_scale(self, arg: Any, data: Series) ->Scale:
+        var_type = variable_type(data, boolean_type="boolean", strict_boolean=True)
+        if var_type == "numeric":
+            return Continuous()
+        elif var_type == "datetime":
+            return Temporal()
+        elif var_type == "boolean":
+            return Boolean()
+        else:
+            return Nominal()
+
+    def infer_scale(self, arg: Any, data: Series) -> Scale:
         """Given data and a scaling argument, initialize appropriate scale class."""
-        pass
-
-    def get_mapping(self, scale: Scale, data: Series) ->Mapping:
+        # TODO put these somewhere external for validation
+        # TODO putting this here won't pick it up if subclasses define infer_scale
+        # (e.g. color). How best to handle that? One option is to call super after
+        # handling property-specific possibilities (e.g. for color check that the
+        # arg is not a valid palette name) but that could get tricky.
+        trans_args = ["log", "symlog", "logit", "pow", "sqrt"]
+        if isinstance(arg, str):
+            if any(arg.startswith(k) for k in trans_args):
+                # TODO validate numeric type? That should happen centrally somewhere
+                return Continuous(trans=arg)
+            else:
+                msg = f"Unknown magic arg for {self.variable} scale: '{arg}'."
+                raise ValueError(msg)
+        else:
+            arg_type = type(arg).__name__
+            msg = f"Magic arg for {self.variable} scale must be str, not {arg_type}."
+            raise TypeError(msg)
+
+    def get_mapping(self, scale: Scale, data: Series) -> Mapping:
         """Return a function that maps from data domain to property range."""
-        pass
+        def identity(x):
+            return x
+        return identity

-    def standardize(self, val: Any) ->Any:
+    def standardize(self, val: Any) -> Any:
         """Coerce flexible property value to standardized representation."""
-        pass
+        return val

-    def _check_dict_entries(self, levels: list, values: dict) ->None:
+    def _check_dict_entries(self, levels: list, values: dict) -> None:
         """Input check when values are provided as a dictionary."""
-        pass
+        missing = set(levels) - set(values)
+        if missing:
+            formatted = ", ".join(map(repr, sorted(missing, key=str)))
+            err = f"No entry in {self.variable} dictionary for {formatted}"
+            raise ValueError(err)

-    def _check_list_length(self, levels: list, values: list) ->list:
+    def _check_list_length(self, levels: list, values: list) -> list:
         """Input check when values are provided as a list."""
-        pass
+        message = ""
+        if len(levels) > len(values):
+            message = " ".join([
+                f"\nThe {self.variable} list has fewer values ({len(values)})",
+                f"than needed ({len(levels)}) and will cycle, which may",
+                "produce an uninterpretable plot."
+            ])
+            values = [x for _, x in zip(levels, itertools.cycle(values))]
+
+        elif len(values) > len(levels):
+            message = " ".join([
+                f"The {self.variable} list has more values ({len(values)})",
+                f"than needed ({len(levels)}), which may not be intended.",
+            ])
+            values = values[:len(levels)]
+
+        # TODO look into custom PlotSpecWarning with better formatting
+        if message:
+            warnings.warn(message, UserWarning)
+
+        return values
+
+
+# =================================================================================== #
+# Properties relating to spatial position of marks on the plotting axes
+# =================================================================================== #


 class Coordinate(Property):
@@ -65,85 +143,170 @@ class Coordinate(Property):
     normed = False


+# =================================================================================== #
+# Properties with numeric values where scale range can be defined as an interval
+# =================================================================================== #
+
+
 class IntervalProperty(Property):
     """A numeric property where scale range can be defined as an interval."""
     legend = True
     normed = True
+
     _default_range: tuple[float, float] = (0, 1)

     @property
-    def default_range(self) ->tuple[float, float]:
+    def default_range(self) -> tuple[float, float]:
         """Min and max values used by default for semantic mapping."""
-        pass
+        return self._default_range

-    def _forward(self, values: ArrayLike) ->ArrayLike:
+    def _forward(self, values: ArrayLike) -> ArrayLike:
         """Transform applied to native values before linear mapping into interval."""
-        pass
+        return values

-    def _inverse(self, values: ArrayLike) ->ArrayLike:
+    def _inverse(self, values: ArrayLike) -> ArrayLike:
         """Transform applied to results of mapping that returns to native values."""
-        pass
+        return values

-    def infer_scale(self, arg: Any, data: Series) ->Scale:
+    def infer_scale(self, arg: Any, data: Series) -> Scale:
         """Given data and a scaling argument, initialize appropriate scale class."""
-        pass

-    def get_mapping(self, scale: Scale, data: Series) ->Mapping:
-        """Return a function that maps from data domain to property range."""
-        pass
+        # TODO infer continuous based on log/sqrt etc?
+
+        var_type = variable_type(data, boolean_type="boolean", strict_boolean=True)

-    def _get_nominal_mapping(self, scale: Nominal, data: Series) ->Mapping:
+        if var_type == "boolean":
+            return Boolean(arg)
+        elif isinstance(arg, (list, dict)):
+            return Nominal(arg)
+        elif var_type == "categorical":
+            return Nominal(arg)
+        elif var_type == "datetime":
+            return Temporal(arg)
+        # TODO other variable types
+        else:
+            return Continuous(arg)
+
+    def get_mapping(self, scale: Scale, data: Series) -> Mapping:
+        """Return a function that maps from data domain to property range."""
+        if isinstance(scale, Nominal):
+            return self._get_nominal_mapping(scale, data)
+        elif isinstance(scale, Boolean):
+            return self._get_boolean_mapping(scale, data)
+
+        if scale.values is None:
+            vmin, vmax = self._forward(self.default_range)
+        elif isinstance(scale.values, tuple) and len(scale.values) == 2:
+            vmin, vmax = self._forward(scale.values)
+        else:
+            if isinstance(scale.values, tuple):
+                actual = f"{len(scale.values)}-tuple"
+            else:
+                actual = str(type(scale.values))
+            scale_class = scale.__class__.__name__
+            err = " ".join([
+                f"Values for {self.variable} variables with {scale_class} scale",
+                f"must be 2-tuple; not {actual}.",
+            ])
+            raise TypeError(err)
+
+        def mapping(x):
+            return self._inverse(np.multiply(x, vmax - vmin) + vmin)
+
+        return mapping
+
+    def _get_nominal_mapping(self, scale: Nominal, data: Series) -> Mapping:
         """Identify evenly-spaced values using interval or explicit mapping."""
-        pass
+        levels = categorical_order(data, scale.order)
+        values = self._get_values(scale, levels)
+
+        def mapping(x):
+            ixs = np.asarray(x, np.intp)
+            out = np.full(len(x), np.nan)
+            use = np.isfinite(x)
+            out[use] = np.take(values, ixs[use])
+            return out
+
+        return mapping

-    def _get_boolean_mapping(self, scale: Boolean, data: Series) ->Mapping:
+    def _get_boolean_mapping(self, scale: Boolean, data: Series) -> Mapping:
         """Identify evenly-spaced values using interval or explicit mapping."""
-        pass
+        values = self._get_values(scale, [True, False])

-    def _get_values(self, scale: Scale, levels: list) ->list:
+        def mapping(x):
+            out = np.full(len(x), np.nan)
+            use = np.isfinite(x)
+            out[use] = np.where(x[use], *values)
+            return out
+
+        return mapping
+
+    def _get_values(self, scale: Scale, levels: list) -> list:
         """Validate scale.values and identify a value for each level."""
-        pass
+        if isinstance(scale.values, dict):
+            self._check_dict_entries(levels, scale.values)
+            values = [scale.values[x] for x in levels]
+        elif isinstance(scale.values, list):
+            values = self._check_list_length(levels, scale.values)
+        else:
+            if scale.values is None:
+                vmin, vmax = self.default_range
+            elif isinstance(scale.values, tuple):
+                vmin, vmax = scale.values
+            else:
+                scale_class = scale.__class__.__name__
+                err = " ".join([
+                    f"Values for {self.variable} variables with {scale_class} scale",
+                    f"must be a dict, list or tuple; not {type(scale.values)}",
+                ])
+                raise TypeError(err)
+
+            vmin, vmax = self._forward([vmin, vmax])
+            values = list(self._inverse(np.linspace(vmax, vmin, len(levels))))
+
+        return values


 class PointSize(IntervalProperty):
     """Size (diameter) of a point mark, in points, with scaling by area."""
-    _default_range = 2, 8
+    _default_range = 2, 8  # TODO use rcparams?

     def _forward(self, values):
         """Square native values to implement linear scaling of point area."""
-        pass
+        return np.square(values)

     def _inverse(self, values):
         """Invert areal values back to point diameter."""
-        pass
+        return np.sqrt(values)


 class LineWidth(IntervalProperty):
     """Thickness of a line mark, in points."""
-
     @property
-    def default_range(self) ->tuple[float, float]:
+    def default_range(self) -> tuple[float, float]:
         """Min and max values used by default for semantic mapping."""
-        pass
+        base = mpl.rcParams["lines.linewidth"]
+        return base * .5, base * 2


 class EdgeWidth(IntervalProperty):
     """Thickness of the edges on a patch mark, in points."""
-
     @property
-    def default_range(self) ->tuple[float, float]:
+    def default_range(self) -> tuple[float, float]:
         """Min and max values used by default for semantic mapping."""
-        pass
+        base = mpl.rcParams["patch.linewidth"]
+        return base * .5, base * 2


 class Stroke(IntervalProperty):
     """Thickness of lines that define point glyphs."""
-    _default_range = 0.25, 2.5
+    _default_range = .25, 2.5


 class Alpha(IntervalProperty):
     """Opacity of the color values for an arbitrary mark."""
-    _default_range = 0.3, 0.95
+    _default_range = .3, .95
+    # TODO validate / enforce that output is in [0, 1]


 class Offset(IntervalProperty):
@@ -157,31 +320,90 @@ class FontSize(IntervalProperty):
     _legend = False

     @property
-    def default_range(self) ->tuple[float, float]:
+    def default_range(self) -> tuple[float, float]:
         """Min and max values used by default for semantic mapping."""
-        pass
+        base = mpl.rcParams["font.size"]
+        return base * .5, base * 2
+
+
+# =================================================================================== #
+# Properties defined by arbitrary objects with inherently nominal scaling
+# =================================================================================== #


 class ObjectProperty(Property):
     """A property defined by arbitrary an object, with inherently nominal scaling."""
     legend = True
     normed = False
+
+    # Object representing null data, should appear invisible when drawn by matplotlib
+    # Note that we now drop nulls in Plot._plot_layer and thus may not need this
     null_value: Any = None

-    def get_mapping(self, scale: Scale, data: Series) ->Mapping:
+    def _default_values(self, n: int) -> list:
+        raise NotImplementedError()
+
+    def default_scale(self, data: Series) -> Scale:
+        var_type = variable_type(data, boolean_type="boolean", strict_boolean=True)
+        return Boolean() if var_type == "boolean" else Nominal()
+
+    def infer_scale(self, arg: Any, data: Series) -> Scale:
+        var_type = variable_type(data, boolean_type="boolean", strict_boolean=True)
+        return Boolean(arg) if var_type == "boolean" else Nominal(arg)
+
+    def get_mapping(self, scale: Scale, data: Series) -> Mapping:
         """Define mapping as lookup into list of object values."""
-        pass
+        boolean_scale = isinstance(scale, Boolean)
+        order = getattr(scale, "order", [True, False] if boolean_scale else None)
+        levels = categorical_order(data, order)
+        values = self._get_values(scale, levels)
+
+        if boolean_scale:
+            values = values[::-1]
+
+        def mapping(x):
+            ixs = np.asarray(np.nan_to_num(x), np.intp)
+            return [
+                values[ix] if np.isfinite(x_i) else self.null_value
+                for x_i, ix in zip(x, ixs)
+            ]
+
+        return mapping

-    def _get_values(self, scale: Scale, levels: list) ->list:
+    def _get_values(self, scale: Scale, levels: list) -> list:
         """Validate scale.values and identify a value for each level."""
-        pass
+        n = len(levels)
+        if isinstance(scale.values, dict):
+            self._check_dict_entries(levels, scale.values)
+            values = [scale.values[x] for x in levels]
+        elif isinstance(scale.values, list):
+            values = self._check_list_length(levels, scale.values)
+        elif scale.values is None:
+            values = self._default_values(n)
+        else:
+            msg = " ".join([
+                f"Scale values for a {self.variable} variable must be provided",
+                f"in a dict or list; not {type(scale.values)}."
+            ])
+            raise TypeError(msg)
+
+        values = [self.standardize(x) for x in values]
+        return values


 class Marker(ObjectProperty):
     """Shape of points in scatter-type marks or lines with data points marked."""
-    null_value = MarkerStyle('')
+    null_value = MarkerStyle("")

-    def _default_values(self, n: int) ->list[MarkerStyle]:
+    # TODO should we have named marker "palettes"? (e.g. see d3 options)
+
+    # TODO need some sort of "require_scale" functionality
+    # to raise when we get the wrong kind explicitly specified
+
+    def standardize(self, val: MarkerPattern) -> MarkerStyle:
+        return MarkerStyle(val)
+
+    def _default_values(self, n: int) -> list[MarkerStyle]:
         """Build an arbitrarily long list of unique marker styles.

         Parameters
@@ -196,14 +418,31 @@ class Marker(ObjectProperty):
             All markers will be filled.

         """
-        pass
+        # Start with marker specs that are well distinguishable
+        markers = [
+            "o", "X", (4, 0, 45), "P", (4, 0, 0), (4, 1, 0), "^", (4, 1, 45), "v",
+        ]
+
+        # Now generate more from regular polygons of increasing order
+        s = 5
+        while len(markers) < n:
+            a = 360 / (s + 1) / 2
+            markers.extend([(s + 1, 1, a), (s + 1, 0, a), (s, 1, 0), (s, 0, 0)])
+            s += 1
+
+        markers = [MarkerStyle(m) for m in markers[:n]]
+
+        return markers


 class LineStyle(ObjectProperty):
     """Dash pattern for line-type marks."""
-    null_value = ''
+    null_value = ""

-    def _default_values(self, n: int) ->list[DashPatternWithOffset]:
+    def standardize(self, val: str | DashPattern) -> DashPatternWithOffset:
+        return self._get_dash_pattern(val)
+
+    def _default_values(self, n: int) -> list[DashPatternWithOffset]:
         """Build an arbitrarily long list of unique dash styles for lines.

         Parameters
@@ -220,12 +459,77 @@ class LineStyle(ObjectProperty):
             dashes.

         """
-        pass
+        # Start with dash specs that are well distinguishable
+        dashes: list[str | DashPattern] = [
+            "-", (4, 1.5), (1, 1), (3, 1.25, 1.5, 1.25), (5, 1, 1, 1),
+        ]
+
+        # Now programmatically build as many as we need
+        p = 3
+        while len(dashes) < n:
+
+            # Take combinations of long and short dashes
+            a = itertools.combinations_with_replacement([3, 1.25], p)
+            b = itertools.combinations_with_replacement([4, 1], p)
+
+            # Interleave the combinations, reversing one of the streams
+            segment_list = itertools.chain(*zip(list(a)[1:-1][::-1], list(b)[1:-1]))
+
+            # Now insert the gaps
+            for segments in segment_list:
+                gap = min(segments)
+                spec = tuple(itertools.chain(*((seg, gap) for seg in segments)))
+                dashes.append(spec)
+
+            p += 1
+
+        return [self._get_dash_pattern(x) for x in dashes]

     @staticmethod
-    def _get_dash_pattern(style: (str | DashPattern)) ->DashPatternWithOffset:
+    def _get_dash_pattern(style: str | DashPattern) -> DashPatternWithOffset:
         """Convert linestyle arguments to dash pattern with offset."""
-        pass
+        # Copied and modified from Matplotlib 3.4
+        # go from short hand -> full strings
+        ls_mapper = {"-": "solid", "--": "dashed", "-.": "dashdot", ":": "dotted"}
+        if isinstance(style, str):
+            style = ls_mapper.get(style, style)
+            # un-dashed styles
+            if style in ["solid", "none", "None"]:
+                offset = 0
+                dashes = None
+            # dashed styles
+            elif style in ["dashed", "dashdot", "dotted"]:
+                offset = 0
+                dashes = tuple(mpl.rcParams[f"lines.{style}_pattern"])
+            else:
+                options = [*ls_mapper.values(), *ls_mapper.keys()]
+                msg = f"Linestyle string must be one of {options}, not {repr(style)}."
+                raise ValueError(msg)
+
+        elif isinstance(style, tuple):
+            if len(style) > 1 and isinstance(style[1], tuple):
+                offset, dashes = style
+            elif len(style) > 1 and style[1] is None:
+                offset, dashes = style
+            else:
+                offset = 0
+                dashes = style
+        else:
+            val_type = type(style).__name__
+            msg = f"Linestyle must be str or tuple, not {val_type}."
+            raise TypeError(msg)
+
+        # Normalize offset to be positive and shorter than the dash cycle
+        if dashes is not None:
+            try:
+                dsum = sum(dashes)
+            except TypeError as err:
+                msg = f"Invalid dash pattern: {dashes}"
+                raise TypeError(msg) from err
+            if dsum:
+                offset %= dsum
+
+        return offset, dashes


 class TextAlignment(ObjectProperty):
@@ -233,11 +537,22 @@ class TextAlignment(ObjectProperty):


 class HorizontalAlignment(TextAlignment):
-    pass
+
+    def _default_values(self, n: int) -> list:
+        vals = itertools.cycle(["left", "right"])
+        return [next(vals) for _ in range(n)]


 class VerticalAlignment(TextAlignment):
-    pass
+
+    def _default_values(self, n: int) -> list:
+        vals = itertools.cycle(["top", "bottom"])
+        return [next(vals) for _ in range(n)]
+
+
+# =================================================================================== #
+# Properties with  RGB(A) color values
+# =================================================================================== #


 class Color(Property):
@@ -245,17 +560,173 @@ class Color(Property):
     legend = True
     normed = True

-    def _standardize_color_sequence(self, colors: ArrayLike) ->ArrayLike:
-        """Convert color sequence to RGB(A) array, preserving but not adding alpha."""
-        pass
+    def standardize(self, val: ColorSpec) -> RGBTuple | RGBATuple:
+        # Return color with alpha channel only if the input spec has it
+        # This is so that RGBA colors can override the Alpha property
+        if to_rgba(val) != to_rgba(val, 1):
+            return to_rgba(val)
+        else:
+            return to_rgb(val)

-    def get_mapping(self, scale: Scale, data: Series) ->Mapping:
+    def _standardize_color_sequence(self, colors: ArrayLike) -> ArrayLike:
+        """Convert color sequence to RGB(A) array, preserving but not adding alpha."""
+        def has_alpha(x):
+            return to_rgba(x) != to_rgba(x, 1)
+
+        if isinstance(colors, np.ndarray):
+            needs_alpha = colors.shape[1] == 4
+        else:
+            needs_alpha = any(has_alpha(x) for x in colors)
+
+        if needs_alpha:
+            return to_rgba_array(colors)
+        else:
+            return to_rgba_array(colors)[:, :3]
+
+    def infer_scale(self, arg: Any, data: Series) -> Scale:
+        # TODO when inferring Continuous without data, verify type
+
+        # TODO need to rethink the variable type system
+        # (e.g. boolean, ordered categories as Ordinal, etc)..
+        var_type = variable_type(data, boolean_type="boolean", strict_boolean=True)
+
+        if var_type == "boolean":
+            return Boolean(arg)
+
+        if isinstance(arg, (dict, list)):
+            return Nominal(arg)
+
+        if isinstance(arg, tuple):
+            if var_type == "categorical":
+                # TODO It seems reasonable to allow a gradient mapping for nominal
+                # scale but it also feels "technically" wrong. Should this infer
+                # Ordinal with categorical data and, if so, verify orderedness?
+                return Nominal(arg)
+            return Continuous(arg)
+
+        if callable(arg):
+            return Continuous(arg)
+
+        # TODO Do we accept str like "log", "pow", etc. for semantics?
+
+        if not isinstance(arg, str):
+            msg = " ".join([
+                f"A single scale argument for {self.variable} variables must be",
+                f"a string, dict, tuple, list, or callable, not {type(arg)}."
+            ])
+            raise TypeError(msg)
+
+        if arg in QUAL_PALETTES:
+            return Nominal(arg)
+        elif var_type == "numeric":
+            return Continuous(arg)
+        # TODO implement scales for date variables and any others.
+        else:
+            return Nominal(arg)
+
+    def get_mapping(self, scale: Scale, data: Series) -> Mapping:
         """Return a function that maps from data domain to color values."""
-        pass
-
-    def _get_values(self, scale: Scale, levels: list) ->ArrayLike:
+        # TODO what is best way to do this conditional?
+        # Should it be class-based or should classes have behavioral attributes?
+        if isinstance(scale, Nominal):
+            return self._get_nominal_mapping(scale, data)
+        elif isinstance(scale, Boolean):
+            return self._get_boolean_mapping(scale, data)
+
+        if scale.values is None:
+            # TODO Rethink best default continuous color gradient
+            mapping = color_palette("ch:", as_cmap=True)
+        elif isinstance(scale.values, tuple):
+            # TODO blend_palette will strip alpha, but we should support
+            # interpolation on all four channels
+            mapping = blend_palette(scale.values, as_cmap=True)
+        elif isinstance(scale.values, str):
+            # TODO for matplotlib colormaps this will clip extremes, which is
+            # different from what using the named colormap directly would do
+            # This may or may not be desireable.
+            mapping = color_palette(scale.values, as_cmap=True)
+        elif callable(scale.values):
+            mapping = scale.values
+        else:
+            scale_class = scale.__class__.__name__
+            msg = " ".join([
+                f"Scale values for {self.variable} with a {scale_class} mapping",
+                f"must be string, tuple, or callable; not {type(scale.values)}."
+            ])
+            raise TypeError(msg)
+
+        def _mapping(x):
+            # Remove alpha channel so it does not override alpha property downstream
+            # TODO this will need to be more flexible to support RGBA tuples (see above)
+            invalid = ~np.isfinite(x)
+            out = mapping(x)[:, :3]
+            out[invalid] = np.nan
+            return out
+
+        return _mapping
+
+    def _get_nominal_mapping(self, scale: Nominal, data: Series) -> Mapping:
+
+        levels = categorical_order(data, scale.order)
+        colors = self._get_values(scale, levels)
+
+        def mapping(x):
+            ixs = np.asarray(np.nan_to_num(x), np.intp)
+            use = np.isfinite(x)
+            out = np.full((len(ixs), colors.shape[1]), np.nan)
+            out[use] = np.take(colors, ixs[use], axis=0)
+            return out
+
+        return mapping
+
+    def _get_boolean_mapping(self, scale: Boolean, data: Series) -> Mapping:
+
+        colors = self._get_values(scale, [True, False])
+
+        def mapping(x):
+
+            use = np.isfinite(x)
+            x = np.asarray(np.nan_to_num(x)).astype(bool)
+            out = np.full((len(x), colors.shape[1]), np.nan)
+            out[x & use] = colors[0]
+            out[~x & use] = colors[1]
+            return out
+
+        return mapping
+
+    def _get_values(self, scale: Scale, levels: list) -> ArrayLike:
         """Validate scale.values and identify a value for each level."""
-        pass
+        n = len(levels)
+        values = scale.values
+        if isinstance(values, dict):
+            self._check_dict_entries(levels, values)
+            colors = [values[x] for x in levels]
+        elif isinstance(values, list):
+            colors = self._check_list_length(levels, values)
+        elif isinstance(values, tuple):
+            colors = blend_palette(values, n)
+        elif isinstance(values, str):
+            colors = color_palette(values, n)
+        elif values is None:
+            if n <= len(get_color_cycle()):
+                # Use current (global) default palette
+                colors = color_palette(n_colors=n)
+            else:
+                colors = color_palette("husl", n)
+        else:
+            scale_class = scale.__class__.__name__
+            msg = " ".join([
+                f"Scale values for {self.variable} with a {scale_class} mapping",
+                f"must be string, list, tuple, or dict; not {type(scale.values)}."
+            ])
+            raise TypeError(msg)
+
+        return self._standardize_color_sequence(colors)
+
+
+# =================================================================================== #
+# Properties that can take only two states
+# =================================================================================== #


 class Fill(Property):
@@ -263,25 +734,101 @@ class Fill(Property):
     legend = True
     normed = False

-    def _default_values(self, n: int) ->list:
-        """Return a list of n values, alternating True and False."""
-        pass
+    def default_scale(self, data: Series) -> Scale:
+        var_type = variable_type(data, boolean_type="boolean", strict_boolean=True)
+        return Boolean() if var_type == "boolean" else Nominal()
+
+    def infer_scale(self, arg: Any, data: Series) -> Scale:
+        var_type = variable_type(data, boolean_type="boolean", strict_boolean=True)
+        return Boolean(arg) if var_type == "boolean" else Nominal(arg)

-    def get_mapping(self, scale: Scale, data: Series) ->Mapping:
+    def standardize(self, val: Any) -> bool:
+        return bool(val)
+
+    def _default_values(self, n: int) -> list:
+        """Return a list of n values, alternating True and False."""
+        if n > 2:
+            msg = " ".join([
+                f"The variable assigned to {self.variable} has more than two levels,",
+                f"so {self.variable} values will cycle and may be uninterpretable",
+            ])
+            # TODO fire in a "nice" way (see above)
+            warnings.warn(msg, UserWarning)
+        return [x for x, _ in zip(itertools.cycle([True, False]), range(n))]
+
+    def get_mapping(self, scale: Scale, data: Series) -> Mapping:
         """Return a function that maps each data value to True or False."""
-        pass
+        boolean_scale = isinstance(scale, Boolean)
+        order = getattr(scale, "order", [True, False] if boolean_scale else None)
+        levels = categorical_order(data, order)
+        values = self._get_values(scale, levels)

-    def _get_values(self, scale: Scale, levels: list) ->list:
-        """Validate scale.values and identify a value for each level."""
-        pass
+        if boolean_scale:
+            values = values[::-1]
+
+        def mapping(x):
+            ixs = np.asarray(np.nan_to_num(x), np.intp)
+            return [
+                values[ix] if np.isfinite(x_i) else False
+                for x_i, ix in zip(x, ixs)
+            ]

+        return mapping
+
+    def _get_values(self, scale: Scale, levels: list) -> list:
+        """Validate scale.values and identify a value for each level."""
+        if isinstance(scale.values, list):
+            values = [bool(x) for x in scale.values]
+        elif isinstance(scale.values, dict):
+            values = [bool(scale.values[x]) for x in levels]
+        elif scale.values is None:
+            values = self._default_values(len(levels))
+        else:
+            msg = " ".join([
+                f"Scale values for {self.variable} must be passed in",
+                f"a list or dict; not {type(scale.values)}."
+            ])
+            raise TypeError(msg)
+
+        return values
+
+
+# =================================================================================== #
+# Enumeration of properties for use by Plot and Mark classes
+# =================================================================================== #
+# TODO turn this into a property registry with hooks, etc.
+# TODO Users do not interact directly with properties, so how to document them?
+
+
+PROPERTY_CLASSES = {
+    "x": Coordinate,
+    "y": Coordinate,
+    "color": Color,
+    "alpha": Alpha,
+    "fill": Fill,
+    "marker": Marker,
+    "pointsize": PointSize,
+    "stroke": Stroke,
+    "linewidth": LineWidth,
+    "linestyle": LineStyle,
+    "fillcolor": Color,
+    "fillalpha": Alpha,
+    "edgewidth": EdgeWidth,
+    "edgestyle": LineStyle,
+    "edgecolor": Color,
+    "edgealpha": Alpha,
+    "text": Property,
+    "halign": HorizontalAlignment,
+    "valign": VerticalAlignment,
+    "offset": Offset,
+    "fontsize": FontSize,
+    "xmin": Coordinate,
+    "xmax": Coordinate,
+    "ymin": Coordinate,
+    "ymax": Coordinate,
+    "group": Property,
+    # TODO pattern?
+    # TODO gradient?
+}

-PROPERTY_CLASSES = {'x': Coordinate, 'y': Coordinate, 'color': Color,
-    'alpha': Alpha, 'fill': Fill, 'marker': Marker, 'pointsize': PointSize,
-    'stroke': Stroke, 'linewidth': LineWidth, 'linestyle': LineStyle,
-    'fillcolor': Color, 'fillalpha': Alpha, 'edgewidth': EdgeWidth,
-    'edgestyle': LineStyle, 'edgecolor': Color, 'edgealpha': Alpha, 'text':
-    Property, 'halign': HorizontalAlignment, 'valign': VerticalAlignment,
-    'offset': Offset, 'fontsize': FontSize, 'xmin': Coordinate, 'xmax':
-    Coordinate, 'ymin': Coordinate, 'ymax': Coordinate, 'group': Property}
 PROPERTIES = {var: cls(var) for var, cls in PROPERTY_CLASSES.items()}
diff --git a/seaborn/_core/rules.py b/seaborn/_core/rules.py
index d78093c0..de6c651d 100644
--- a/seaborn/_core/rules.py
+++ b/seaborn/_core/rules.py
@@ -1,10 +1,13 @@
 from __future__ import annotations
+
 import warnings
 from collections import UserString
 from numbers import Number
 from datetime import datetime
+
 import numpy as np
 import pandas as pd
+
 from typing import TYPE_CHECKING
 if TYPE_CHECKING:
     from typing import Literal
@@ -19,7 +22,9 @@ class VarType(UserString):
     them. If that changes, they should be more verbose.

     """
-    allowed = 'numeric', 'datetime', 'categorical', 'boolean', 'unknown'
+    # TODO VarType is an awfully overloaded name, but so is DataType ...
+    # TODO adding unknown because we are using this in for scales, is that right?
+    allowed = "numeric", "datetime", "categorical", "boolean", "unknown"

     def __init__(self, data):
         assert data in self.allowed, data
@@ -30,8 +35,11 @@ class VarType(UserString):
         return self.data == other


-def variable_type(vector: Series, boolean_type: Literal['numeric',
-    'categorical', 'boolean']='numeric', strict_boolean: bool=False) ->VarType:
+def variable_type(
+    vector: Series,
+    boolean_type: Literal["numeric", "categorical", "boolean"] = "numeric",
+    strict_boolean: bool = False,
+) -> VarType:
     """
     Determine whether a vector contains numeric, categorical, or datetime data.

@@ -57,10 +65,84 @@ def variable_type(vector: Series, boolean_type: Literal['numeric',
     var_type : 'numeric', 'categorical', or 'datetime'
         Name identifying the type of data in the vector.
     """
-    pass

-
-def categorical_order(vector: Series, order: (list | None)=None) ->list:
+    # If a categorical dtype is set, infer categorical
+    if isinstance(getattr(vector, 'dtype', None), pd.CategoricalDtype):
+        return VarType("categorical")
+
+    # Special-case all-na data, which is always "numeric"
+    if pd.isna(vector).all():
+        return VarType("numeric")
+
+    # Now drop nulls to simplify further type inference
+    vector = vector.dropna()
+
+    # Special-case binary/boolean data, allow caller to determine
+    # This triggers a numpy warning when vector has strings/objects
+    # https://github.com/numpy/numpy/issues/6784
+    # Because we reduce with .all(), we are agnostic about whether the
+    # comparison returns a scalar or vector, so we will ignore the warning.
+    # It triggers a separate DeprecationWarning when the vector has datetimes:
+    # https://github.com/numpy/numpy/issues/13548
+    # This is considered a bug by numpy and will likely go away.
+    with warnings.catch_warnings():
+        warnings.simplefilter(
+            action='ignore',
+            category=(FutureWarning, DeprecationWarning)  # type: ignore  # mypy bug?
+        )
+        if strict_boolean:
+            if isinstance(vector.dtype, pd.core.dtypes.base.ExtensionDtype):
+                boolean_dtypes = ["bool", "boolean"]
+            else:
+                boolean_dtypes = ["bool"]
+            boolean_vector = vector.dtype in boolean_dtypes
+        else:
+            try:
+                boolean_vector = bool(np.isin(vector, [0, 1]).all())
+            except TypeError:
+                # .isin comparison is not guaranteed to be possible under NumPy
+                # casting rules, depending on the (unknown) dtype of 'vector'
+                boolean_vector = False
+        if boolean_vector:
+            return VarType(boolean_type)
+
+    # Defer to positive pandas tests
+    if pd.api.types.is_numeric_dtype(vector):
+        return VarType("numeric")
+
+    if pd.api.types.is_datetime64_dtype(vector):
+        return VarType("datetime")
+
+    # --- If we get to here, we need to check the entries
+
+    # Check for a collection where everything is a number
+
+    def all_numeric(x):
+        for x_i in x:
+            if not isinstance(x_i, Number):
+                return False
+        return True
+
+    if all_numeric(vector):
+        return VarType("numeric")
+
+    # Check for a collection where everything is a datetime
+
+    def all_datetime(x):
+        for x_i in x:
+            if not isinstance(x_i, (datetime, np.datetime64)):
+                return False
+        return True
+
+    if all_datetime(vector):
+        return VarType("datetime")
+
+    # Otherwise, our final fallback is to consider things categorical
+
+    return VarType("categorical")
+
+
+def categorical_order(vector: Series, order: list | None = None) -> list:
     """
     Return a list of unique data values using seaborn's ordering rules.

@@ -78,4 +160,14 @@ def categorical_order(vector: Series, order: (list | None)=None) ->list:
         Ordered list of category levels not including null values.

     """
-    pass
+    if order is not None:
+        return order
+
+    if vector.dtype.name == "category":
+        order = list(vector.cat.categories)
+    else:
+        order = list(filter(pd.notnull, vector.unique()))
+        if variable_type(pd.Series(order)) == "numeric":
+            order.sort()
+
+    return order
diff --git a/seaborn/_core/scales.py b/seaborn/_core/scales.py
index 99f98988..1e7bef8a 100644
--- a/seaborn/_core/scales.py
+++ b/seaborn/_core/scales.py
@@ -5,28 +5,58 @@ from collections.abc import Sequence
 from dataclasses import dataclass
 from functools import partial
 from typing import Any, Callable, Tuple, Optional, ClassVar
+
 import numpy as np
 import matplotlib as mpl
-from matplotlib.ticker import Locator, Formatter, AutoLocator, AutoMinorLocator, FixedLocator, LinearLocator, LogLocator, SymmetricalLogLocator, MaxNLocator, MultipleLocator, EngFormatter, FuncFormatter, LogFormatterSciNotation, ScalarFormatter, StrMethodFormatter
-from matplotlib.dates import AutoDateLocator, AutoDateFormatter, ConciseDateFormatter
+from matplotlib.ticker import (
+    Locator,
+    Formatter,
+    AutoLocator,
+    AutoMinorLocator,
+    FixedLocator,
+    LinearLocator,
+    LogLocator,
+    SymmetricalLogLocator,
+    MaxNLocator,
+    MultipleLocator,
+    EngFormatter,
+    FuncFormatter,
+    LogFormatterSciNotation,
+    ScalarFormatter,
+    StrMethodFormatter,
+)
+from matplotlib.dates import (
+    AutoDateLocator,
+    AutoDateFormatter,
+    ConciseDateFormatter,
+)
 from matplotlib.axis import Axis
 from matplotlib.scale import ScaleBase
 from pandas import Series
+
 from seaborn._core.rules import categorical_order
 from seaborn._core.typing import Default, default
+
 from typing import TYPE_CHECKING
 if TYPE_CHECKING:
     from seaborn._core.plot import Plot
     from seaborn._core.properties import Property
     from numpy.typing import ArrayLike, NDArray
-    TransFuncs = Tuple[Callable[[ArrayLike], ArrayLike], Callable[[
-        ArrayLike], ArrayLike]]
+
+    TransFuncs = Tuple[
+        Callable[[ArrayLike], ArrayLike], Callable[[ArrayLike], ArrayLike]
+    ]
+
+    # TODO Reverting typing to Any as it was proving too complicated to
+    # work out the right way to communicate the types to mypy. Revisit!
     Pipeline = Sequence[Optional[Callable[[Any], Any]]]


 class Scale:
     """Base class for objects that map data values to visual properties."""
+
     values: tuple | str | list | dict | None
+
     _priority: ClassVar[int]
     _pipeline: Pipeline
     _matplotlib_scale: ScaleBase
@@ -34,29 +64,86 @@ class Scale:
     _legend: tuple[list[Any], list[str]] | None

     def __post_init__(self):
+
         self._tick_params = None
         self._label_params = None
         self._legend = None

-    def _finalize(self, p: Plot, axis: Axis) ->None:
+    def tick(self):
+        raise NotImplementedError()
+
+    def label(self):
+        raise NotImplementedError()
+
+    def _get_locators(self):
+        raise NotImplementedError()
+
+    def _get_formatter(self, locator: Locator | None = None):
+        raise NotImplementedError()
+
+    def _get_scale(self, name: str, forward: Callable, inverse: Callable):
+
+        major_locator, minor_locator = self._get_locators(**self._tick_params)
+        major_formatter = self._get_formatter(major_locator, **self._label_params)
+
+        class InternalScale(mpl.scale.FuncScale):
+            def set_default_locators_and_formatters(self, axis):
+                axis.set_major_locator(major_locator)
+                if minor_locator is not None:
+                    axis.set_minor_locator(minor_locator)
+                axis.set_major_formatter(major_formatter)
+
+        return InternalScale(name, (forward, inverse))
+
+    def _spacing(self, x: Series) -> float:
+        space = self._spacer(x)
+        if np.isnan(space):
+            # This happens when there is no variance in the orient coordinate data
+            # Not exactly clear what the right default is, but 1 seems reasonable?
+            return 1
+        return space
+
+    def _setup(
+        self, data: Series, prop: Property, axis: Axis | None = None,
+    ) -> Scale:
+        raise NotImplementedError()
+
+    def _finalize(self, p: Plot, axis: Axis) -> None:
         """Perform scale-specific axis tweaks after adding artists."""
         pass

-    def __call__(self, data: Series) ->ArrayLike:
+    def __call__(self, data: Series) -> ArrayLike:
+
         trans_data: Series | NDArray | list
+
+        # TODO sometimes we need to handle scalars (e.g. for Line)
+        # but what is the best way to do that?
         scalar_data = np.isscalar(data)
         if scalar_data:
             trans_data = np.array([data])
         else:
             trans_data = data
+
         for func in self._pipeline:
             if func is not None:
                 trans_data = func(trans_data)
+
         if scalar_data:
             return trans_data[0]
         else:
             return trans_data

+    @staticmethod
+    def _identity():
+
+        class Identity(Scale):
+            _pipeline = []
+            _spacer = None
+            _legend = None
+            _matplotlib_scale = None
+
+        return Identity()
+

 @dataclass
 class Boolean(Scale):
@@ -71,19 +158,183 @@ class Boolean(Scale):

     """
     values: tuple | list | dict | None = None
+
     _priority: ClassVar[int] = 3

+    def _setup(
+        self, data: Series, prop: Property, axis: Axis | None = None,
+    ) -> Scale:
+
+        new = copy(self)
+        if new._tick_params is None:
+            new = new.tick()
+        if new._label_params is None:
+            new = new.label()
+
+        def na_safe_cast(x):
+            # TODO this doesn't actually need to be a closure
+            if np.isscalar(x):
+                return float(bool(x))
+            else:
+                if hasattr(x, "notna"):
+                    # Handle pd.NA; np<>pd interop with NA is tricky
+                    use = x.notna().to_numpy()
+                else:
+                    use = np.isfinite(x)
+                out = np.full(len(x), np.nan, dtype=float)
+                out[use] = x[use].astype(bool).astype(float)
+                return out
+
+        new._pipeline = [na_safe_cast, prop.get_mapping(new, data)]
+        new._spacer = _default_spacer
+        if prop.legend:
+            new._legend = [True, False], ["True", "False"]
+
+        forward, inverse = _make_identity_transforms()
+        mpl_scale = new._get_scale(str(data.name), forward, inverse)
+
+        axis = PseudoAxis(mpl_scale) if axis is None else axis
+        mpl_scale.set_default_locators_and_formatters(axis)
+        new._matplotlib_scale = mpl_scale
+
+        return new
+
+    def _finalize(self, p: Plot, axis: Axis) -> None:
+
+        # We want values to appear in a True, False order but also want
+        # True/False to be drawn at 1/0 positions respectively to avoid nasty
+        # surprises if additional artists are added through the matplotlib API.
+        # We accomplish this using axis inversion akin to what we do in Nominal.
+
+        ax = axis.axes
+        name = axis.axis_name
+        axis.grid(False, which="both")
+        if name not in p._limits:
+            nticks = len(axis.get_major_ticks())
+            lo, hi = -.5, nticks - .5
+            if name == "x":
+                lo, hi = hi, lo
+            set_lim = getattr(ax, f"set_{name}lim")
+            set_lim(lo, hi, auto=None)
+
+    def tick(self, locator: Locator | None = None):
+        new = copy(self)
+        new._tick_params = {"locator": locator}
+        return new
+
+    def label(self, formatter: Formatter | None = None):
+        new = copy(self)
+        new._label_params = {"formatter": formatter}
+        return new
+
+    def _get_locators(self, locator):
+        if locator is not None:
+            return locator
+        return FixedLocator([0, 1]), None
+
+    def _get_formatter(self, locator, formatter):
+        if formatter is not None:
+            return formatter
+        return FuncFormatter(lambda x, _: str(bool(x)))
+

 @dataclass
 class Nominal(Scale):
     """
     A categorical scale without relative importance / magnitude.
     """
+    # Categorical (convert to strings), un-sortable
+
     values: tuple | str | list | dict | None = None
     order: list | None = None
+
     _priority: ClassVar[int] = 4

-    def tick(self, locator: (Locator | None)=None) ->Nominal:
+    def _setup(
+        self, data: Series, prop: Property, axis: Axis | None = None,
+    ) -> Scale:
+
+        new = copy(self)
+        if new._tick_params is None:
+            new = new.tick()
+        if new._label_params is None:
+            new = new.label()
+
+        # TODO flexibility over format() which isn't great for numbers / dates
+        stringify = np.vectorize(format, otypes=["object"])
+
+        units_seed = categorical_order(data, new.order)
+
+        # TODO move to Nominal._get_scale?
+        # TODO this needs some more complicated rethinking about how to pass
+        # a unit dictionary down to these methods, along with how much we want
+        # to invest in their API. What is it useful for tick() to do here?
+        # (Ordinal may be different if we draw that contrast).
+        # Any customization we do to allow, e.g., label wrapping will probably
+        # require defining our own Formatter subclass.
+        # We could also potentially implement auto-wrapping in an Axis subclass
+        # (see Axis.draw ... it already is computing the bboxes).
+        # major_locator, minor_locator = new._get_locators(**new._tick_params)
+        # major_formatter = new._get_formatter(major_locator, **new._label_params)
+
+        class CatScale(mpl.scale.LinearScale):
+            def set_default_locators_and_formatters(self, axis):
+                ...
+                # axis.set_major_locator(major_locator)
+                # if minor_locator is not None:
+                #     axis.set_minor_locator(minor_locator)
+                # axis.set_major_formatter(major_formatter)
+
+        mpl_scale = CatScale(data.name)
+        if axis is None:
+            axis = PseudoAxis(mpl_scale)
+
+            # TODO Currently just used in non-Coordinate contexts, but should
+            # we use this to (A) set the padding we want for categorial plots
+            # and (B) allow the values parameter for a Coordinate to set xlim/ylim
+            axis.set_view_interval(0, len(units_seed) - 1)
+
+        new._matplotlib_scale = mpl_scale
+
+        # TODO array cast necessary to handle float/int mixture, which we need
+        # to solve in a more systematic way probably
+        # (i.e. if we have [1, 2.5], do we want [1.0, 2.5]? Unclear)
+        axis.update_units(stringify(np.array(units_seed)))
+
+        # TODO define this more centrally
+        def convert_units(x):
+            # TODO only do this with explicit order?
+            # (But also category dtype?)
+            # TODO isin fails when units_seed mixes numbers and strings (numpy error?)
+            # but np.isin also does not seem any faster? (Maybe not broadcasting in C)
+            # keep = x.isin(units_seed)
+            keep = np.array([x_ in units_seed for x_ in x], bool)
+            out = np.full(len(x), np.nan)
+            out[keep] = axis.convert_units(stringify(x[keep]))
+            return out
+
+        new._pipeline = [convert_units, prop.get_mapping(new, data)]
+        new._spacer = _default_spacer
+
+        if prop.legend:
+            new._legend = units_seed, list(stringify(units_seed))
+
+        return new
+
+    def _finalize(self, p: Plot, axis: Axis) -> None:
+
+        ax = axis.axes
+        name = axis.axis_name
+        axis.grid(False, which="both")
+        if name not in p._limits:
+            nticks = len(axis.get_major_ticks())
+            lo, hi = -.5, nticks - .5
+            if name == "y":
+                lo, hi = hi, lo
+            set_lim = getattr(ax, f"set_{name}lim")
+            set_lim(lo, hi, auto=None)
+
+    def tick(self, locator: Locator | None = None) -> Nominal:
         """
         Configure the selection of ticks for the scale's axis or legend.

@@ -101,9 +352,11 @@ class Nominal(Scale):
         Copy of self with new tick configuration.

         """
-        pass
+        new = copy(self)
+        new._tick_params = {"locator": locator}
+        return new

-    def label(self, formatter: (Formatter | None)=None) ->Nominal:
+    def label(self, formatter: Formatter | None = None) -> Nominal:
         """
         Configure the selection of labels for the scale's axis or legend.

@@ -122,24 +375,152 @@ class Nominal(Scale):
             Copy of self with new tick configuration.

         """
-        pass
+        new = copy(self)
+        new._label_params = {"formatter": formatter}
+        return new
+
+    def _get_locators(self, locator):
+
+        if locator is not None:
+            return locator, None
+
+        locator = mpl.category.StrCategoryLocator({})
+
+        return locator, None
+
+    def _get_formatter(self, locator, formatter):
+
+        if formatter is not None:
+            return formatter
+
+        formatter = mpl.category.StrCategoryFormatter({})
+
+        return formatter


 @dataclass
 class Ordinal(Scale):
+    # Categorical (convert to strings), sortable, can skip ticklabels
     ...


 @dataclass
 class Discrete(Scale):
+    # Numeric, integral, can skip ticks/ticklabels
     ...


 @dataclass
 class ContinuousBase(Scale):
+
     values: tuple | str | None = None
     norm: tuple | None = None

+    def _setup(
+        self, data: Series, prop: Property, axis: Axis | None = None,
+    ) -> Scale:
+
+        new = copy(self)
+        if new._tick_params is None:
+            new = new.tick()
+        if new._label_params is None:
+            new = new.label()
+
+        forward, inverse = new._get_transform()
+
+        mpl_scale = new._get_scale(str(data.name), forward, inverse)
+
+        if axis is None:
+            axis = PseudoAxis(mpl_scale)
+            axis.update_units(data)
+
+        mpl_scale.set_default_locators_and_formatters(axis)
+        new._matplotlib_scale = mpl_scale
+
+        normalize: Optional[Callable[[ArrayLike], ArrayLike]]
+        if prop.normed:
+            if new.norm is None:
+                vmin, vmax = data.min(), data.max()
+            else:
+                vmin, vmax = new.norm
+            vmin, vmax = map(float, axis.convert_units((vmin, vmax)))
+            a = forward(vmin)
+            b = forward(vmax) - forward(vmin)
+
+            def normalize(x):
+                return (x - a) / b
+
+        else:
+            normalize = vmin = vmax = None
+
+        new._pipeline = [
+            axis.convert_units,
+            forward,
+            normalize,
+            prop.get_mapping(new, data)
+        ]
+
+        def spacer(x):
+            x = x.dropna().unique()
+            if len(x) < 2:
+                return np.nan
+            return np.min(np.diff(np.sort(x)))
+        new._spacer = spacer
+
+        # TODO How to allow disabling of legend for all uses of property?
+        # Could add a Scale parameter, or perhaps Scale.suppress()?
+        # Are there other useful parameters that would be in Scale.legend()
+        # besides allowing Scale.legend(False)?
+        if prop.legend:
+            axis.set_view_interval(vmin, vmax)
+            locs = axis.major.locator()
+            locs = locs[(vmin <= locs) & (locs <= vmax)]
+            # Avoid having an offset / scientific notation in a legend
+            # as we don't represent that anywhere so it ends up incorrect.
+            # This could become an option (e.g. Continuous.label(offset=True))
+            # in which case we would need to figure out how to show it.
+            if hasattr(axis.major.formatter, "set_useOffset"):
+                axis.major.formatter.set_useOffset(False)
+            if hasattr(axis.major.formatter, "set_scientific"):
+                axis.major.formatter.set_scientific(False)
+            labels = axis.major.formatter.format_ticks(locs)
+            new._legend = list(locs), list(labels)
+
+        return new
+
+    def _get_transform(self):
+
+        arg = self.trans
+
+        def get_param(method, default):
+            if arg == method:
+                return default
+            return float(arg[len(method):])
+
+        if arg is None:
+            return _make_identity_transforms()
+        elif isinstance(arg, tuple):
+            return arg
+        elif isinstance(arg, str):
+            if arg == "ln":
+                return _make_log_transforms()
+            elif arg == "logit":
+                base = get_param("logit", 10)
+                return _make_logit_transforms(base)
+            elif arg.startswith("log"):
+                base = get_param("log", 10)
+                return _make_log_transforms(base)
+            elif arg.startswith("symlog"):
+                c = get_param("symlog", 1)
+                return _make_symlog_transforms(c)
+            elif arg.startswith("pow"):
+                exp = get_param("pow", 2)
+                return _make_power_transforms(exp)
+            elif arg == "sqrt":
+                return _make_sqrt_transforms()
+            else:
+                raise ValueError(f"Unknown value provided for trans: {arg!r}")
+

 @dataclass
 class Continuous(ContinuousBase):
@@ -148,12 +529,22 @@ class Continuous(ContinuousBase):
     """
     values: tuple | str | None = None
     trans: str | TransFuncs | None = None
+
+    # TODO Add this to deal with outliers?
+    # outside: Literal["keep", "drop", "clip"] = "keep"
+
     _priority: ClassVar[int] = 1

-    def tick(self, locator: (Locator | None)=None, *, at: (Sequence[float] |
-        None)=None, upto: (int | None)=None, count: (int | None)=None,
-        every: (float | None)=None, between: (tuple[float, float] | None)=
-        None, minor: (int | None)=None) ->Continuous:
+    def tick(
+        self,
+        locator: Locator | None = None, *,
+        at: Sequence[float] | None = None,
+        upto: int | None = None,
+        count: int | None = None,
+        every: float | None = None,
+        between: tuple[float, float] | None = None,
+        minor: int | None = None,
+    ) -> Continuous:
         """
         Configure the selection of ticks for the scale's axis or legend.

@@ -180,11 +571,38 @@ class Continuous(ContinuousBase):
             Copy of self with new tick configuration.

         """
-        pass
+        # Input checks
+        if locator is not None and not isinstance(locator, Locator):
+            raise TypeError(
+                f"Tick locator must be an instance of {Locator!r}, "
+                f"not {type(locator)!r}."
+            )
+        log_base, symlog_thresh = self._parse_for_log_params(self.trans)
+        if log_base or symlog_thresh:
+            if count is not None and between is None:
+                raise RuntimeError("`count` requires `between` with log transform.")
+            if every is not None:
+                raise RuntimeError("`every` not supported with log transform.")

-    def label(self, formatter: (Formatter | None)=None, *, like: (str |
-        Callable | None)=None, base: (int | None | Default)=default, unit:
-        (str | None)=None) ->Continuous:
+        new = copy(self)
+        new._tick_params = {
+            "locator": locator,
+            "at": at,
+            "upto": upto,
+            "count": count,
+            "every": every,
+            "between": between,
+            "minor": minor,
+        }
+        return new
+
+    def label(
+        self,
+        formatter: Formatter | None = None, *,
+        like: str | Callable | None = None,
+        base: int | None | Default = default,
+        unit: str | None = None,
+    ) -> Continuous:
         """
         Configure the appearance of tick labels for the scale's axis or legend.

@@ -211,7 +629,133 @@ class Continuous(ContinuousBase):
             Copy of self with new label configuration.

         """
-        pass
+        # Input checks
+        if formatter is not None and not isinstance(formatter, Formatter):
+            raise TypeError(
+                f"Label formatter must be an instance of {Formatter!r}, "
+                f"not {type(formatter)!r}"
+            )
+        if like is not None and not (isinstance(like, str) or callable(like)):
+            msg = f"`like` must be a string or callable, not {type(like).__name__}."
+            raise TypeError(msg)
+
+        new = copy(self)
+        new._label_params = {
+            "formatter": formatter,
+            "like": like,
+            "base": base,
+            "unit": unit,
+        }
+        return new
+
+    def _parse_for_log_params(
+        self, trans: str | TransFuncs | None
+    ) -> tuple[float | None, float | None]:
+
+        log_base = symlog_thresh = None
+        if isinstance(trans, str):
+            m = re.match(r"^log(\d*)", trans)
+            if m is not None:
+                log_base = float(m[1] or 10)
+            m = re.match(r"symlog(\d*)", trans)
+            if m is not None:
+                symlog_thresh = float(m[1] or 1)
+        return log_base, symlog_thresh
+
+    def _get_locators(self, locator, at, upto, count, every, between, minor):
+
+        log_base, symlog_thresh = self._parse_for_log_params(self.trans)
+
+        if locator is not None:
+            major_locator = locator
+
+        elif upto is not None:
+            if log_base:
+                major_locator = LogLocator(base=log_base, numticks=upto)
+            else:
+                major_locator = MaxNLocator(upto, steps=[1, 1.5, 2, 2.5, 3, 5, 10])
+
+        elif count is not None:
+            if between is None:
+                # This is rarely useful (unless you are setting limits)
+                major_locator = LinearLocator(count)
+            else:
+                if log_base or symlog_thresh:
+                    forward, inverse = self._get_transform()
+                    lo, hi = forward(between)
+                    ticks = inverse(np.linspace(lo, hi, num=count))
+                else:
+                    ticks = np.linspace(*between, num=count)
+                major_locator = FixedLocator(ticks)
+
+        elif every is not None:
+            if between is None:
+                major_locator = MultipleLocator(every)
+            else:
+                lo, hi = between
+                ticks = np.arange(lo, hi + every, every)
+                major_locator = FixedLocator(ticks)
+
+        elif at is not None:
+            major_locator = FixedLocator(at)
+
+        else:
+            if log_base:
+                major_locator = LogLocator(log_base)
+            elif symlog_thresh:
+                major_locator = SymmetricalLogLocator(linthresh=symlog_thresh, base=10)
+            else:
+                major_locator = AutoLocator()
+
+        if minor is None:
+            minor_locator = LogLocator(log_base, subs=None) if log_base else None
+        else:
+            if log_base:
+                subs = np.linspace(0, log_base, minor + 2)[1:-1]
+                minor_locator = LogLocator(log_base, subs=subs)
+            else:
+                minor_locator = AutoMinorLocator(minor + 1)
+
+        return major_locator, minor_locator
+
+    def _get_formatter(self, locator, formatter, like, base, unit):
+
+        log_base, symlog_thresh = self._parse_for_log_params(self.trans)
+        if base is default:
+            if symlog_thresh:
+                log_base = 10
+            base = log_base
+
+        if formatter is not None:
+            return formatter
+
+        if like is not None:
+            if isinstance(like, str):
+                if "{x" in like or "{pos" in like:
+                    fmt = like
+                else:
+                    fmt = f"{{x:{like}}}"
+                formatter = StrMethodFormatter(fmt)
+            else:
+                formatter = FuncFormatter(like)
+
+        elif base is not None:
+            # We could add other log options if necessary
+            formatter = LogFormatterSciNotation(base)
+
+        elif unit is not None:
+            if isinstance(unit, tuple):
+                sep, unit = unit
+            elif not unit:
+                sep = ""
+            else:
+                sep = " "
+            formatter = EngFormatter(unit, sep=sep)
+
+        else:
+            formatter = ScalarFormatter()
+
+        return formatter


 @dataclass
@@ -219,11 +763,24 @@ class Temporal(ContinuousBase):
     """
     A scale for date/time data.
     """
+    # TODO date: bool?
+    # For when we only care about the time component, would affect
+    # default formatter and norm conversion. Should also happen in
+    # Property.default_scale. The alternative was having distinct
+    # Calendric / Temporal scales, but that feels a bit fussy, and it
+    # would get in the way of using first-letter shorthands because
+    # Calendric and Continuous would collide. Still, we haven't implemented
+    # those yet, and having a clear distinction betewen date(time) / time
+    # may be more useful.
+
     trans = None
+
     _priority: ClassVar[int] = 2

-    def tick(self, locator: (Locator | None)=None, *, upto: (int | None)=None
-        ) ->Temporal:
+    def tick(
+        self, locator: Locator | None = None, *,
+        upto: int | None = None,
+    ) -> Temporal:
         """
         Configure the selection of ticks for the scale's axis or legend.

@@ -243,10 +800,22 @@ class Temporal(ContinuousBase):
             Copy of self with new tick configuration.

         """
-        pass
+        if locator is not None and not isinstance(locator, Locator):
+            err = (
+                f"Tick locator must be an instance of {Locator!r}, "
+                f"not {type(locator)!r}."
+            )
+            raise TypeError(err)
+
+        new = copy(self)
+        new._tick_params = {"locator": locator, "upto": upto}
+        return new

-    def label(self, formatter: (Formatter | None)=None, *, concise: bool=False
-        ) ->Temporal:
+    def label(
+        self,
+        formatter: Formatter | None = None, *,
+        concise: bool = False,
+    ) -> Temporal:
         """
         Configure the appearance of tick labels for the scale's axis or legend.

@@ -267,7 +836,54 @@ class Temporal(ContinuousBase):
             Copy of self with new label configuration.

         """
-        pass
+        new = copy(self)
+        new._label_params = {"formatter": formatter, "concise": concise}
+        return new
+
+    def _get_locators(self, locator, upto):
+
+        if locator is not None:
+            major_locator = locator
+        elif upto is not None:
+            major_locator = AutoDateLocator(minticks=2, maxticks=upto)
+
+        else:
+            major_locator = AutoDateLocator(minticks=2, maxticks=6)
+        minor_locator = None
+
+        return major_locator, minor_locator
+
+    def _get_formatter(self, locator, formatter, concise):
+
+        if formatter is not None:
+            return formatter
+
+        if concise:
+            # TODO ideally we would have concise coordinate ticks,
+            # but full semantic ticks. Is that possible?
+            formatter = ConciseDateFormatter(locator)
+        else:
+            formatter = AutoDateFormatter(locator)
+
+        return formatter
+
+
+# ----------------------------------------------------------------------------------- #
+
+
+# TODO Have this separate from Temporal or have Temporal(date=True) or similar?
+# class Calendric(Scale):
+
+# TODO Needed? Or handle this at layer (in stat or as param, eg binning=)
+# class Binned(Scale):
+
+# TODO any need for color-specific scales?
+# class Sequential(Continuous):
+# class Diverging(Continuous):
+# class Qualitative(Nominal):
+
+
+# ----------------------------------------------------------------------------------- #


 class PseudoAxis:
@@ -280,21 +896,195 @@ class PseudoAxis:
     code, this object acts like an Axis and can be used to scale other variables.

     """
-    axis_name = ''
+    axis_name = ""  # Matplotlib requirement but not actually used

     def __init__(self, scale):
+
         self.converter = None
         self.units = None
         self.scale = scale
         self.major = mpl.axis.Ticker()
         self.minor = mpl.axis.Ticker()
+
+        # It appears that this needs to be initialized this way on matplotlib 3.1,
+        # but not later versions. It is unclear whether there are any issues with it.
         self._data_interval = None, None
+
         scale.set_default_locators_and_formatters(self)
+        # self.set_default_intervals()  Is this ever needed?
+
+    def set_view_interval(self, vmin, vmax):
+        self._view_interval = vmin, vmax
+
+    def get_view_interval(self):
+        return self._view_interval
+
+    # TODO do we want to distinguish view/data intervals? e.g. for a legend
+    # we probably want to represent the full range of the data values, but
+    # still norm the colormap. If so, we'll need to track data range separately
+    # from the norm, which we currently don't do.
+
+    def set_data_interval(self, vmin, vmax):
+        self._data_interval = vmin, vmax
+
+    def get_data_interval(self):
+        return self._data_interval
+
+    def get_tick_space(self):
+        # TODO how to do this in a configurable / auto way?
+        # Would be cool to have legend density adapt to figure size, etc.
+        return 5
+
+    def set_major_locator(self, locator):
+        self.major.locator = locator
+        locator.set_axis(self)
+
+    def set_major_formatter(self, formatter):
+        self.major.formatter = formatter
+        formatter.set_axis(self)
+
+    def set_minor_locator(self, locator):
+        self.minor.locator = locator
+        locator.set_axis(self)
+
+    def set_minor_formatter(self, formatter):
+        self.minor.formatter = formatter
+        formatter.set_axis(self)
+
+    def set_units(self, units):
+        self.units = units

     def update_units(self, x):
         """Pass units to the internal converter, potentially updating its mapping."""
-        pass
+        self.converter = mpl.units.registry.get_converter(x)
+        if self.converter is not None:
+            self.converter.default_units(x, self)
+
+            info = self.converter.axisinfo(self.units, self)
+
+            if info is None:
+                return
+            if info.majloc is not None:
+                self.set_major_locator(info.majloc)
+            if info.majfmt is not None:
+                self.set_major_formatter(info.majfmt)
+
+            # This is in matplotlib method; do we need this?
+            # self.set_default_intervals()

     def convert_units(self, x):
         """Return a numeric representation of the input data."""
-        pass
+        if np.issubdtype(np.asarray(x).dtype, np.number):
+            return x
+        elif self.converter is None:
+            return x
+        return self.converter.convert(x, self.units, self)
+
+    def get_scale(self):
+        # Note that matplotlib actually returns a string here!
+        # (e.g., with a log scale, axis.get_scale() returns "log")
+        # Currently we just hit it with minor ticks where it checks for
+        # scale == "log". I'm not sure how you'd actually use log-scale
+        # minor "ticks" in a legend context, so this is fine....
+        return self.scale
+
+    def get_majorticklocs(self):
+        return self.major.locator()
+
+
+# ------------------------------------------------------------------------------------ #
+# Transform function creation
+
+
+def _make_identity_transforms() -> TransFuncs:
+
+    def identity(x):
+        return x
+
+    return identity, identity
+
+
+def _make_logit_transforms(base: float | None = None) -> TransFuncs:
+
+    log, exp = _make_log_transforms(base)
+
+    def logit(x):
+        with np.errstate(invalid="ignore", divide="ignore"):
+            return log(x) - log(1 - x)
+
+    def expit(x):
+        with np.errstate(invalid="ignore", divide="ignore"):
+            return exp(x) / (1 + exp(x))
+
+    return logit, expit
+
+
+def _make_log_transforms(base: float | None = None) -> TransFuncs:
+
+    fs: TransFuncs
+    if base is None:
+        fs = np.log, np.exp
+    elif base == 2:
+        fs = np.log2, partial(np.power, 2)
+    elif base == 10:
+        fs = np.log10, partial(np.power, 10)
+    else:
+        def forward(x):
+            return np.log(x) / np.log(base)
+        fs = forward, partial(np.power, base)
+
+    def log(x: ArrayLike) -> ArrayLike:
+        with np.errstate(invalid="ignore", divide="ignore"):
+            return fs[0](x)
+
+    def exp(x: ArrayLike) -> ArrayLike:
+        with np.errstate(invalid="ignore", divide="ignore"):
+            return fs[1](x)
+
+    return log, exp
+
+
+def _make_symlog_transforms(c: float = 1, base: float = 10) -> TransFuncs:
+
+    # From https://iopscience.iop.org/article/10.1088/0957-0233/24/2/027001
+
+    # Note: currently not using base because we only get
+    # one parameter from the string, and are using c (this is consistent with d3)
+
+    log, exp = _make_log_transforms(base)
+
+    def symlog(x):
+        with np.errstate(invalid="ignore", divide="ignore"):
+            return np.sign(x) * log(1 + np.abs(np.divide(x, c)))
+
+    def symexp(x):
+        with np.errstate(invalid="ignore", divide="ignore"):
+            return np.sign(x) * c * (exp(np.abs(x)) - 1)
+
+    return symlog, symexp
+
+
+def _make_sqrt_transforms() -> TransFuncs:
+
+    def sqrt(x):
+        return np.sign(x) * np.sqrt(np.abs(x))
+
+    def square(x):
+        return np.sign(x) * np.square(x)
+
+    return sqrt, square
+
+
+def _make_power_transforms(exp: float) -> TransFuncs:
+
+    def forward(x):
+        return np.sign(x) * np.power(np.abs(x), exp)
+
+    def inverse(x):
+        return np.sign(x) * np.power(np.abs(x), 1 / exp)
+
+    return forward, inverse
+
+
+def _default_spacer(x: Series) -> float:
+    return 1
diff --git a/seaborn/_core/subplots.py b/seaborn/_core/subplots.py
index ab72e2f0..287f4416 100644
--- a/seaborn/_core/subplots.py
+++ b/seaborn/_core/subplots.py
@@ -1,12 +1,14 @@
 from __future__ import annotations
 from collections.abc import Generator
+
 import numpy as np
 import matplotlib as mpl
 import matplotlib.pyplot as plt
+
 from matplotlib.axes import Axes
 from matplotlib.figure import Figure
 from typing import TYPE_CHECKING
-if TYPE_CHECKING:
+if TYPE_CHECKING:  # TODO move to seaborn._core.typing?
     from seaborn._core.plot import FacetSpec, PairSpec
     from matplotlib.figure import SubFigure

@@ -27,44 +29,235 @@ class Subplots:
         Data used to define figure setup.

     """
+    def __init__(
+        self,
+        subplot_spec: dict,  # TODO define as TypedDict
+        facet_spec: FacetSpec,
+        pair_spec: PairSpec,
+    ):

-    def __init__(self, subplot_spec: dict, facet_spec: FacetSpec, pair_spec:
-        PairSpec):
         self.subplot_spec = subplot_spec
+
         self._check_dimension_uniqueness(facet_spec, pair_spec)
         self._determine_grid_dimensions(facet_spec, pair_spec)
         self._handle_wrapping(facet_spec, pair_spec)
         self._determine_axis_sharing(pair_spec)

-    def _check_dimension_uniqueness(self, facet_spec: FacetSpec, pair_spec:
-        PairSpec) ->None:
+    def _check_dimension_uniqueness(
+        self, facet_spec: FacetSpec, pair_spec: PairSpec
+    ) -> None:
         """Reject specs that pair and facet on (or wrap to) same figure dimension."""
-        pass
+        err = None
+
+        facet_vars = facet_spec.get("variables", {})

-    def _determine_grid_dimensions(self, facet_spec: FacetSpec, pair_spec:
-        PairSpec) ->None:
+        if facet_spec.get("wrap") and {"col", "row"} <= set(facet_vars):
+            err = "Cannot wrap facets when specifying both `col` and `row`."
+        elif (
+            pair_spec.get("wrap")
+            and pair_spec.get("cross", True)
+            and len(pair_spec.get("structure", {}).get("x", [])) > 1
+            and len(pair_spec.get("structure", {}).get("y", [])) > 1
+        ):
+            err = "Cannot wrap subplots when pairing on both `x` and `y`."
+
+        collisions = {"x": ["columns", "rows"], "y": ["rows", "columns"]}
+        for pair_axis, (multi_dim, wrap_dim) in collisions.items():
+            if pair_axis not in pair_spec.get("structure", {}):
+                continue
+            elif multi_dim[:3] in facet_vars:
+                err = f"Cannot facet the {multi_dim} while pairing on `{pair_axis}``."
+            elif wrap_dim[:3] in facet_vars and facet_spec.get("wrap"):
+                err = f"Cannot wrap the {wrap_dim} while pairing on `{pair_axis}``."
+            elif wrap_dim[:3] in facet_vars and pair_spec.get("wrap"):
+                err = f"Cannot wrap the {multi_dim} while faceting the {wrap_dim}."
+
+        if err is not None:
+            raise RuntimeError(err)  # TODO what err class? Define PlotSpecError?
+
+    def _determine_grid_dimensions(
+        self, facet_spec: FacetSpec, pair_spec: PairSpec
+    ) -> None:
         """Parse faceting and pairing information to define figure structure."""
-        pass
+        self.grid_dimensions: dict[str, list] = {}
+        for dim, axis in zip(["col", "row"], ["x", "y"]):
+
+            facet_vars = facet_spec.get("variables", {})
+            if dim in facet_vars:
+                self.grid_dimensions[dim] = facet_spec["structure"][dim]
+            elif axis in pair_spec.get("structure", {}):
+                self.grid_dimensions[dim] = [
+                    None for _ in pair_spec.get("structure", {})[axis]
+                ]
+            else:
+                self.grid_dimensions[dim] = [None]

-    def _handle_wrapping(self, facet_spec: FacetSpec, pair_spec: PairSpec
-        ) ->None:
+            self.subplot_spec[f"n{dim}s"] = len(self.grid_dimensions[dim])
+
+        if not pair_spec.get("cross", True):
+            self.subplot_spec["nrows"] = 1
+
+        self.n_subplots = self.subplot_spec["ncols"] * self.subplot_spec["nrows"]
+
+    def _handle_wrapping(
+        self, facet_spec: FacetSpec, pair_spec: PairSpec
+    ) -> None:
         """Update figure structure parameters based on facet/pair wrapping."""
-        pass
+        self.wrap = wrap = facet_spec.get("wrap") or pair_spec.get("wrap")
+        if not wrap:
+            return
+
+        wrap_dim = "row" if self.subplot_spec["nrows"] > 1 else "col"
+        flow_dim = {"row": "col", "col": "row"}[wrap_dim]
+        n_subplots = self.subplot_spec[f"n{wrap_dim}s"]
+        flow = int(np.ceil(n_subplots / wrap))

-    def _determine_axis_sharing(self, pair_spec: PairSpec) ->None:
+        if wrap < self.subplot_spec[f"n{wrap_dim}s"]:
+            self.subplot_spec[f"n{wrap_dim}s"] = wrap
+        self.subplot_spec[f"n{flow_dim}s"] = flow
+        self.n_subplots = n_subplots
+        self.wrap_dim = wrap_dim
+
+    def _determine_axis_sharing(self, pair_spec: PairSpec) -> None:
         """Update subplot spec with default or specified axis sharing parameters."""
-        pass
+        axis_to_dim = {"x": "col", "y": "row"}
+        key: str
+        val: str | bool
+        for axis in "xy":
+            key = f"share{axis}"
+            # Always use user-specified value, if present
+            if key not in self.subplot_spec:
+                if axis in pair_spec.get("structure", {}):
+                    # Paired axes are shared along one dimension by default
+                    if self.wrap is None and pair_spec.get("cross", True):
+                        val = axis_to_dim[axis]
+                    else:
+                        val = False
+                else:
+                    # This will pick up faceted plots, as well as single subplot
+                    # figures, where the value doesn't really matter
+                    val = True
+                self.subplot_spec[key] = val

-    def init_figure(self, pair_spec: PairSpec, pyplot: bool=False,
-        figure_kws: (dict | None)=None, target: (Axes | Figure | SubFigure |
-        None)=None) ->Figure:
+    def init_figure(
+        self,
+        pair_spec: PairSpec,
+        pyplot: bool = False,
+        figure_kws: dict | None = None,
+        target: Axes | Figure | SubFigure | None = None,
+    ) -> Figure:
         """Initialize matplotlib objects and add seaborn-relevant metadata."""
-        pass
+        # TODO reduce need to pass pair_spec here?
+
+        if figure_kws is None:
+            figure_kws = {}
+
+        if isinstance(target, mpl.axes.Axes):
+
+            if max(self.subplot_spec["nrows"], self.subplot_spec["ncols"]) > 1:
+                err = " ".join([
+                    "Cannot create multiple subplots after calling `Plot.on` with",
+                    f"a {mpl.axes.Axes} object.",
+                    f" You may want to use a {mpl.figure.SubFigure} instead.",
+                ])
+                raise RuntimeError(err)
+
+            self._subplot_list = [{
+                "ax": target,
+                "left": True,
+                "right": True,
+                "top": True,
+                "bottom": True,
+                "col": None,
+                "row": None,
+                "x": "x",
+                "y": "y",
+            }]
+            self._figure = target.figure
+            return self._figure
+
+        elif isinstance(target, mpl.figure.SubFigure):
+            figure = target.figure
+        elif isinstance(target, mpl.figure.Figure):
+            figure = target
+        else:
+            if pyplot:
+                figure = plt.figure(**figure_kws)
+            else:
+                figure = mpl.figure.Figure(**figure_kws)
+            target = figure
+        self._figure = figure
+
+        axs = target.subplots(**self.subplot_spec, squeeze=False)
+
+        if self.wrap:
+            # Remove unused Axes and flatten the rest into a (2D) vector
+            axs_flat = axs.ravel({"col": "C", "row": "F"}[self.wrap_dim])
+            axs, extra = np.split(axs_flat, [self.n_subplots])
+            for ax in extra:
+                ax.remove()
+            if self.wrap_dim == "col":
+                axs = axs[np.newaxis, :]
+            else:
+                axs = axs[:, np.newaxis]
+
+        # Get i, j coordinates for each Axes object
+        # Note that i, j are with respect to faceting/pairing,
+        # not the subplot grid itself, (which only matters in the case of wrapping).
+        iter_axs: np.ndenumerate | zip
+        if not pair_spec.get("cross", True):
+            indices = np.arange(self.n_subplots)
+            iter_axs = zip(zip(indices, indices), axs.flat)
+        else:
+            iter_axs = np.ndenumerate(axs)
+
+        self._subplot_list = []
+        for (i, j), ax in iter_axs:
+
+            info = {"ax": ax}
+
+            nrows, ncols = self.subplot_spec["nrows"], self.subplot_spec["ncols"]
+            if not self.wrap:
+                info["left"] = j % ncols == 0
+                info["right"] = (j + 1) % ncols == 0
+                info["top"] = i == 0
+                info["bottom"] = i == nrows - 1
+            elif self.wrap_dim == "col":
+                info["left"] = j % ncols == 0
+                info["right"] = ((j + 1) % ncols == 0) or ((j + 1) == self.n_subplots)
+                info["top"] = j < ncols
+                info["bottom"] = j >= (self.n_subplots - ncols)
+            elif self.wrap_dim == "row":
+                info["left"] = i < nrows
+                info["right"] = i >= self.n_subplots - nrows
+                info["top"] = i % nrows == 0
+                info["bottom"] = ((i + 1) % nrows == 0) or ((i + 1) == self.n_subplots)
+
+            if not pair_spec.get("cross", True):
+                info["top"] = j < ncols
+                info["bottom"] = j >= self.n_subplots - ncols
+
+            for dim in ["row", "col"]:
+                idx = {"row": i, "col": j}[dim]
+                info[dim] = self.grid_dimensions[dim][idx]
+
+            for axis in "xy":
+
+                idx = {"x": j, "y": i}[axis]
+                if axis in pair_spec.get("structure", {}):
+                    key = f"{axis}{idx}"
+                else:
+                    key = axis
+                info[axis] = key
+
+            self._subplot_list.append(info)
+
+        return figure

-    def __iter__(self) ->Generator[dict, None, None]:
+    def __iter__(self) -> Generator[dict, None, None]:  # TODO TypedDict?
         """Yield each subplot dictionary with Axes object and metadata."""
         yield from self._subplot_list

-    def __len__(self) ->int:
+    def __len__(self) -> int:
         """Return the number of subplots in this figure."""
         return len(self._subplot_list)
diff --git a/seaborn/_core/typing.py b/seaborn/_core/typing.py
index 1ff7dfb2..9bdf8a6e 100644
--- a/seaborn/_core/typing.py
+++ b/seaborn/_core/typing.py
@@ -1,34 +1,48 @@
 from __future__ import annotations
+
 from collections.abc import Iterable, Mapping
 from datetime import date, datetime, timedelta
 from typing import Any, Optional, Union, Tuple, List, Dict
-from numpy import ndarray
+
+from numpy import ndarray  # TODO use ArrayLike?
 from pandas import Series, Index, Timestamp, Timedelta
 from matplotlib.colors import Colormap, Normalize
-ColumnName = Union[str, bytes, date, datetime, timedelta, bool, complex,
-    Timestamp, Timedelta]
+
+
+ColumnName = Union[
+    str, bytes, date, datetime, timedelta, bool, complex, Timestamp, Timedelta
+]
 Vector = Union[Series, Index, ndarray]
+
 VariableSpec = Union[ColumnName, Vector, None]
 VariableSpecList = Union[List[VariableSpec], Index, None]
+
+# A DataSource can be an object implementing __dataframe__, or a Mapping
+# (and is optional in all contexts where it is used).
+# I don't think there's an abc for "has __dataframe__", so we type as object
+# but keep the (slightly odd) Union alias for better user-facing annotations.
 DataSource = Union[object, Mapping, None]
-OrderSpec = Union[Iterable, None]
+
+OrderSpec = Union[Iterable, None]  # TODO technically str is iterable
 NormSpec = Union[Tuple[Optional[float], Optional[float]], Normalize, None]
+
+# TODO for discrete mappings, it would be ideal to use a parameterized type
+# as the dict values / list entries should be of specific type(s) for each method
 PaletteSpec = Union[str, list, dict, Colormap, None]
 DiscreteValueSpec = Union[dict, list, None]
-ContinuousValueSpec = Union[Tuple[float, float], List[float], Dict[Any,
-    float], None]
+ContinuousValueSpec = Union[
+    Tuple[float, float], List[float], Dict[Any, float], None,
+]


 class Default:
-
     def __repr__(self):
-        return '<default>'
+        return "<default>"


 class Deprecated:
-
     def __repr__(self):
-        return '<deprecated>'
+        return "<deprecated>"


 default = Default()
diff --git a/seaborn/_docstrings.py b/seaborn/_docstrings.py
index 92bca3b0..2ab210b6 100644
--- a/seaborn/_docstrings.py
+++ b/seaborn/_docstrings.py
@@ -4,7 +4,8 @@ from .external.docscrape import NumpyDocString


 class DocstringComponents:
-    regexp = re.compile('\\n((\\n|.)+)\\n\\s*', re.MULTILINE)
+
+    regexp = re.compile(r"\n((\n|.)+)\n\s*", re.MULTILINE)

     def __init__(self, comp_dict, strip_whitespace=True):
         """Read entries from a dict, optionally stripping outer whitespace."""
@@ -18,6 +19,7 @@ class DocstringComponents:
                     entries[key] = m.group(1)
         else:
             entries = comp_dict.copy()
+
         self.entries = entries

     def __getattr__(self, attr):
@@ -28,6 +30,11 @@ class DocstringComponents:
             try:
                 return self.__getattribute__(attr)
             except AttributeError as err:
+                # If Python is run with -OO, it will strip docstrings and our lookup
+                # from self.entries will fail. We check for __debug__, which is actually
+                # set to False by -O (it is True for normal execution).
+                # But we only want to see an error when building the docs;
+                # not something users should see, so this slight inconsistency is fine.
                 if __debug__:
                     raise err
                 else:
@@ -36,139 +43,156 @@ class DocstringComponents:
     @classmethod
     def from_nested_components(cls, **kwargs):
         """Add multiple sub-sets of components."""
-        pass
+        return cls(kwargs, strip_whitespace=False)

     @classmethod
     def from_function_params(cls, func):
         """Use the numpydoc parser to extract components from existing func."""
-        pass
+        params = NumpyDocString(pydoc.getdoc(func))["Parameters"]
+        comp_dict = {}
+        for p in params:
+            name = p.name
+            type = p.type
+            desc = "\n    ".join(p.desc)
+            comp_dict[name] = f"{name} : {type}\n    {desc}"
+
+        return cls(comp_dict)
+

+# TODO is "vector" the best term here? We mean to imply 1D data with a variety
+# of types?

-_core_params = dict(data=
-    """
+# TODO now that we can parse numpydoc style strings, do we need to define dicts
+# of docstring components, or just write out a docstring?
+
+
+_core_params = dict(
+    data="""
 data : :class:`pandas.DataFrame`, :class:`numpy.ndarray`, mapping, or sequence
     Input data structure. Either a long-form collection of vectors that can be
     assigned to named variables or a wide-form dataset that will be internally
     reshaped.
-    """
-    , xy=
-    """
+    """,  # TODO add link to user guide narrative when exists
+    xy="""
 x, y : vectors or keys in ``data``
     Variables that specify positions on the x and y axes.
-    """
-    , hue=
-    """
+    """,
+    hue="""
 hue : vector or key in ``data``
     Semantic variable that is mapped to determine the color of plot elements.
-    """
-    , palette=
-    """
+    """,
+    palette="""
 palette : string, list, dict, or :class:`matplotlib.colors.Colormap`
     Method for choosing the colors to use when mapping the ``hue`` semantic.
     String values are passed to :func:`color_palette`. List or dict values
     imply categorical mapping, while a colormap object implies numeric mapping.
-    """
-    , hue_order=
-    """
+    """,  # noqa: E501
+    hue_order="""
 hue_order : vector of strings
     Specify the order of processing and plotting for categorical levels of the
     ``hue`` semantic.
-    """
-    , hue_norm=
-    """
+    """,
+    hue_norm="""
 hue_norm : tuple or :class:`matplotlib.colors.Normalize`
     Either a pair of values that set the normalization range in data units
     or an object that will map from data units into a [0, 1] interval. Usage
     implies numeric mapping.
-    """
-    , color=
-    """
+    """,
+    color="""
 color : :mod:`matplotlib color <matplotlib.colors>`
     Single color specification for when hue mapping is not used. Otherwise, the
     plot will try to hook into the matplotlib property cycle.
-    """
-    , ax=
-    """
+    """,
+    ax="""
 ax : :class:`matplotlib.axes.Axes`
     Pre-existing axes for the plot. Otherwise, call :func:`matplotlib.pyplot.gca`
     internally.
-    """
-    )
-_core_returns = dict(ax=
-    """
+    """,  # noqa: E501
+)
+
+
+_core_returns = dict(
+    ax="""
 :class:`matplotlib.axes.Axes`
     The matplotlib axes containing the plot.
-    """
-    , facetgrid=
-    """
+    """,
+    facetgrid="""
 :class:`FacetGrid`
     An object managing one or more subplots that correspond to conditional data
     subsets with convenient methods for batch-setting of axes attributes.
-    """
-    , jointgrid=
-    """
+    """,
+    jointgrid="""
 :class:`JointGrid`
     An object managing multiple subplots that correspond to joint and marginal axes
     for plotting a bivariate relationship or distribution.
-    """
-    , pairgrid=
-    """
+    """,
+    pairgrid="""
 :class:`PairGrid`
     An object managing multiple subplots that correspond to joint and marginal axes
     for pairwise combinations of multiple variables in a dataset.
-    """
-    )
-_seealso_blurbs = dict(scatterplot=
-    """
+    """,
+)
+
+
+_seealso_blurbs = dict(
+
+    # Relational plots
+    scatterplot="""
 scatterplot : Plot data using points.
-    """, lineplot=
-    '\nlineplot : Plot data using lines.\n    ', displot=
-    """
+    """,
+    lineplot="""
+lineplot : Plot data using lines.
+    """,
+
+    # Distribution plots
+    displot="""
 displot : Figure-level interface to distribution plot functions.
-    """
-    , histplot=
-    """
+    """,
+    histplot="""
 histplot : Plot a histogram of binned counts with optional normalization or smoothing.
-    """
-    , kdeplot=
-    """
+    """,
+    kdeplot="""
 kdeplot : Plot univariate or bivariate distributions using kernel density estimation.
-    """
-    , ecdfplot=
-    '\necdfplot : Plot empirical cumulative distribution functions.\n    ',
-    rugplot=
-    """
+    """,
+    ecdfplot="""
+ecdfplot : Plot empirical cumulative distribution functions.
+    """,
+    rugplot="""
 rugplot : Plot a tick at each observation value along the x and/or y axes.
-    """
-    , stripplot=
-    '\nstripplot : Plot a categorical scatter with jitter.\n    ',
-    swarmplot=
-    """
+    """,
+
+    # Categorical plots
+    stripplot="""
+stripplot : Plot a categorical scatter with jitter.
+    """,
+    swarmplot="""
 swarmplot : Plot a categorical scatter with non-overlapping points.
-    """
-    , violinplot=
-    """
+    """,
+    violinplot="""
 violinplot : Draw an enhanced boxplot using kernel density estimation.
-    """
-    , pointplot=
-    '\npointplot : Plot point estimates and CIs using markers and lines.\n    '
-    , jointplot=
-    """
+    """,
+    pointplot="""
+pointplot : Plot point estimates and CIs using markers and lines.
+    """,
+
+    # Multiples
+    jointplot="""
 jointplot : Draw a bivariate plot with univariate marginal distributions.
-    """
-    , pairplot=
-    """
+    """,
+    pairplot="""
 jointplot : Draw multiple bivariate plots with univariate marginal distributions.
-    """
-    , jointgrid=
-    """
+    """,
+    jointgrid="""
 JointGrid : Set up a figure with joint and marginal views on bivariate data.
-    """
-    , pairgrid=
-    """
+    """,
+    pairgrid="""
 PairGrid : Set up a figure with joint and marginal views on multiple variables.
-    """
-    )
-_core_docs = dict(params=DocstringComponents(_core_params), returns=
-    DocstringComponents(_core_returns), seealso=DocstringComponents(
-    _seealso_blurbs))
+    """,
+)
+
+
+_core_docs = dict(
+    params=DocstringComponents(_core_params),
+    returns=DocstringComponents(_core_returns),
+    seealso=DocstringComponents(_seealso_blurbs),
+)
diff --git a/seaborn/_marks/area.py b/seaborn/_marks/area.py
index 427c1a16..7514a6d1 100644
--- a/seaborn/_marks/area.py
+++ b/seaborn/_marks/area.py
@@ -1,13 +1,89 @@
 from __future__ import annotations
 from collections import defaultdict
 from dataclasses import dataclass
+
 import numpy as np
 import matplotlib as mpl
-from seaborn._marks.base import Mark, Mappable, MappableBool, MappableFloat, MappableColor, MappableStyle, resolve_properties, resolve_color, document_properties
+
+from seaborn._marks.base import (
+    Mark,
+    Mappable,
+    MappableBool,
+    MappableFloat,
+    MappableColor,
+    MappableStyle,
+    resolve_properties,
+    resolve_color,
+    document_properties,
+)


 class AreaBase:
-    pass
+
+    def _plot(self, split_gen, scales, orient):
+
+        patches = defaultdict(list)
+
+        for keys, data, ax in split_gen():
+
+            kws = {}
+            data = self._standardize_coordinate_parameters(data, orient)
+            resolved = resolve_properties(self, keys, scales)
+            verts = self._get_verts(data, orient)
+            ax.update_datalim(verts)
+
+            # TODO should really move this logic into resolve_color
+            fc = resolve_color(self, keys, "", scales)
+            if not resolved["fill"]:
+                fc = mpl.colors.to_rgba(fc, 0)
+
+            kws["facecolor"] = fc
+            kws["edgecolor"] = resolve_color(self, keys, "edge", scales)
+            kws["linewidth"] = resolved["edgewidth"]
+            kws["linestyle"] = resolved["edgestyle"]
+
+            patches[ax].append(mpl.patches.Polygon(verts, **kws))
+
+        for ax, ax_patches in patches.items():
+
+            for patch in ax_patches:
+                self._postprocess_artist(patch, ax, orient)
+                ax.add_patch(patch)
+
+    def _standardize_coordinate_parameters(self, data, orient):
+        return data
+
+    def _postprocess_artist(self, artist, ax, orient):
+        pass
+
+    def _get_verts(self, data, orient):
+
+        dv = {"x": "y", "y": "x"}[orient]
+        data = data.sort_values(orient, kind="mergesort")
+        verts = np.concatenate([
+            data[[orient, f"{dv}min"]].to_numpy(),
+            data[[orient, f"{dv}max"]].to_numpy()[::-1],
+        ])
+        if orient == "y":
+            verts = verts[:, ::-1]
+        return verts
+
+    def _legend_artist(self, variables, value, scales):
+
+        keys = {v: value for v in variables}
+        resolved = resolve_properties(self, keys, scales)
+
+        fc = resolve_color(self, keys, "", scales)
+        if not resolved["fill"]:
+            fc = mpl.colors.to_rgba(fc, 0)
+
+        return mpl.patches.Patch(
+            facecolor=fc,
+            edgecolor=resolve_color(self, keys, "edge", scales),
+            linewidth=resolved["edgewidth"],
+            linestyle=resolved["edgestyle"],
+            **self.artist_kws,
+        )


 @document_properties
@@ -25,15 +101,40 @@ class Area(AreaBase, Mark):
     .. include:: ../docstrings/objects.Area.rst

     """
-    color: MappableColor = Mappable('C0')
-    alpha: MappableFloat = Mappable(0.2)
-    fill: MappableBool = Mappable(True)
-    edgecolor: MappableColor = Mappable(depend='color')
-    edgealpha: MappableFloat = Mappable(1)
-    edgewidth: MappableFloat = Mappable(rc='patch.linewidth')
-    edgestyle: MappableStyle = Mappable('-')
+    color: MappableColor = Mappable("C0", )
+    alpha: MappableFloat = Mappable(.2, )
+    fill: MappableBool = Mappable(True, )
+    edgecolor: MappableColor = Mappable(depend="color")
+    edgealpha: MappableFloat = Mappable(1, )
+    edgewidth: MappableFloat = Mappable(rc="patch.linewidth", )
+    edgestyle: MappableStyle = Mappable("-", )
+
+    # TODO should this be settable / mappable?
     baseline: MappableFloat = Mappable(0, grouping=False)

+    def _standardize_coordinate_parameters(self, data, orient):
+        dv = {"x": "y", "y": "x"}[orient]
+        return data.rename(columns={"baseline": f"{dv}min", dv: f"{dv}max"})
+
+    def _postprocess_artist(self, artist, ax, orient):
+
+        # TODO copying a lot of code from Bar, let's abstract this
+        # See comments there, I am not going to repeat them too
+
+        artist.set_linewidth(artist.get_linewidth() * 2)
+
+        linestyle = artist.get_linestyle()
+        if linestyle[1]:
+            linestyle = (linestyle[0], tuple(x / 2 for x in linestyle[1]))
+        artist.set_linestyle(linestyle)
+
+        artist.set_clip_path(artist.get_path(), artist.get_transform() + ax.transData)
+        if self.artist_kws.get("clip_on", True):
+            artist.set_clip_box(ax.bbox)
+
+        val_idx = ["y", "x"].index(orient)
+        artist.sticky_edges[val_idx][:] = (0, np.inf)
+

 @document_properties
 @dataclass
@@ -50,10 +151,20 @@ class Band(AreaBase, Mark):
     .. include:: ../docstrings/objects.Band.rst

     """
-    color: MappableColor = Mappable('C0')
-    alpha: MappableFloat = Mappable(0.2)
-    fill: MappableBool = Mappable(True)
-    edgecolor: MappableColor = Mappable(depend='color')
-    edgealpha: MappableFloat = Mappable(1)
-    edgewidth: MappableFloat = Mappable(0)
-    edgestyle: MappableFloat = Mappable('-')
+    color: MappableColor = Mappable("C0", )
+    alpha: MappableFloat = Mappable(.2, )
+    fill: MappableBool = Mappable(True, )
+    edgecolor: MappableColor = Mappable(depend="color", )
+    edgealpha: MappableFloat = Mappable(1, )
+    edgewidth: MappableFloat = Mappable(0, )
+    edgestyle: MappableFloat = Mappable("-", )
+
+    def _standardize_coordinate_parameters(self, data, orient):
+        # dv = {"x": "y", "y": "x"}[orient]
+        # TODO assert that all(ymax >= ymin)?
+        # TODO what if only one exist?
+        other = {"x": "y", "y": "x"}[orient]
+        if not set(data.columns) & {f"{other}min", f"{other}max"}:
+            agg = {f"{other}min": (other, "min"), f"{other}max": (other, "max")}
+            data = data.groupby(orient).agg(**agg).reset_index()
+        return data
diff --git a/seaborn/_marks/bar.py b/seaborn/_marks/bar.py
index 66c20024..2aed6830 100644
--- a/seaborn/_marks/bar.py
+++ b/seaborn/_marks/bar.py
@@ -1,9 +1,22 @@
 from __future__ import annotations
 from collections import defaultdict
 from dataclasses import dataclass
+
 import numpy as np
 import matplotlib as mpl
-from seaborn._marks.base import Mark, Mappable, MappableBool, MappableColor, MappableFloat, MappableStyle, resolve_properties, resolve_color, document_properties
+
+from seaborn._marks.base import (
+    Mark,
+    Mappable,
+    MappableBool,
+    MappableColor,
+    MappableFloat,
+    MappableStyle,
+    resolve_properties,
+    resolve_color,
+    document_properties
+)
+
 from typing import TYPE_CHECKING
 if TYPE_CHECKING:
     from typing import Any
@@ -12,7 +25,87 @@ if TYPE_CHECKING:


 class BarBase(Mark):
-    pass
+
+    def _make_patches(self, data, scales, orient):
+
+        transform = scales[orient]._matplotlib_scale.get_transform()
+        forward = transform.transform
+        reverse = transform.inverted().transform
+
+        other = {"x": "y", "y": "x"}[orient]
+
+        pos = reverse(forward(data[orient]) - data["width"] / 2)
+        width = reverse(forward(data[orient]) + data["width"] / 2) - pos
+
+        val = (data[other] - data["baseline"]).to_numpy()
+        base = data["baseline"].to_numpy()
+
+        kws = self._resolve_properties(data, scales)
+        if orient == "x":
+            kws.update(x=pos, y=base, w=width, h=val)
+        else:
+            kws.update(x=base, y=pos, w=val, h=width)
+
+        kws.pop("width", None)
+        kws.pop("baseline", None)
+
+        val_dim = {"x": "h", "y": "w"}[orient]
+        bars, vals = [], []
+
+        for i in range(len(data)):
+
+            row = {k: v[i] for k, v in kws.items()}
+
+            # Skip bars with no value. It's possible we'll want to make this
+            # an option (i.e so you have an artist for animating or annotating),
+            # but let's keep things simple for now.
+            if not np.nan_to_num(row[val_dim]):
+                continue
+
+            bar = mpl.patches.Rectangle(
+                xy=(row["x"], row["y"]),
+                width=row["w"],
+                height=row["h"],
+                facecolor=row["facecolor"],
+                edgecolor=row["edgecolor"],
+                linestyle=row["edgestyle"],
+                linewidth=row["edgewidth"],
+                **self.artist_kws,
+            )
+            bars.append(bar)
+            vals.append(row[val_dim])
+
+        return bars, vals
+
+    def _resolve_properties(self, data, scales):
+
+        resolved = resolve_properties(self, data, scales)
+
+        resolved["facecolor"] = resolve_color(self, data, "", scales)
+        resolved["edgecolor"] = resolve_color(self, data, "edge", scales)
+
+        fc = resolved["facecolor"]
+        if isinstance(fc, tuple):
+            resolved["facecolor"] = fc[0], fc[1], fc[2], fc[3] * resolved["fill"]
+        else:
+            fc[:, 3] = fc[:, 3] * resolved["fill"]  # TODO Is inplace mod a problem?
+            resolved["facecolor"] = fc
+
+        return resolved
+
+    def _legend_artist(
+        self, variables: list[str], value: Any, scales: dict[str, Scale],
+    ) -> Artist:
+        # TODO return some sensible default?
+        key = {v: value for v in variables}
+        key = self._resolve_properties(key, scales)
+        artist = mpl.patches.Patch(
+            facecolor=key["facecolor"],
+            edgecolor=key["edgecolor"],
+            linewidth=key["edgewidth"],
+            linestyle=key["edgestyle"],
+        )
+        return artist


 @document_properties
@@ -30,15 +123,56 @@ class Bar(BarBase):
     .. include:: ../docstrings/objects.Bar.rst

     """
-    color: MappableColor = Mappable('C0', grouping=False)
-    alpha: MappableFloat = Mappable(0.7, grouping=False)
+    color: MappableColor = Mappable("C0", grouping=False)
+    alpha: MappableFloat = Mappable(.7, grouping=False)
     fill: MappableBool = Mappable(True, grouping=False)
-    edgecolor: MappableColor = Mappable(depend='color', grouping=False)
+    edgecolor: MappableColor = Mappable(depend="color", grouping=False)
     edgealpha: MappableFloat = Mappable(1, grouping=False)
-    edgewidth: MappableFloat = Mappable(rc='patch.linewidth', grouping=False)
-    edgestyle: MappableStyle = Mappable('-', grouping=False)
-    width: MappableFloat = Mappable(0.8, grouping=False)
-    baseline: MappableFloat = Mappable(0, grouping=False)
+    edgewidth: MappableFloat = Mappable(rc="patch.linewidth", grouping=False)
+    edgestyle: MappableStyle = Mappable("-", grouping=False)
+    # pattern: MappableString = Mappable(None)  # TODO no Property yet
+
+    width: MappableFloat = Mappable(.8, grouping=False)
+    baseline: MappableFloat = Mappable(0, grouping=False)  # TODO *is* this mappable?
+
+    def _plot(self, split_gen, scales, orient):
+
+        val_idx = ["y", "x"].index(orient)
+
+        for _, data, ax in split_gen():
+
+            bars, vals = self._make_patches(data, scales, orient)
+
+            for bar in bars:
+
+                # Because we are clipping the artist (see below), the edges end up
+                # looking half as wide as they actually are. I don't love this clumsy
+                # workaround, which is going to cause surprises if you work with the
+                # artists directly. We may need to revisit after feedback.
+                bar.set_linewidth(bar.get_linewidth() * 2)
+                linestyle = bar.get_linestyle()
+                if linestyle[1]:
+                    linestyle = (linestyle[0], tuple(x / 2 for x in linestyle[1]))
+                bar.set_linestyle(linestyle)
+
+                # This is a bit of a hack to handle the fact that the edge lines are
+                # centered on the actual extents of the bar, and overlap when bars are
+                # stacked or dodged. We may discover that this causes problems and needs
+                # to be revisited at some point. Also it should be faster to clip with
+                # a bbox than a path, but I cant't work out how to get the intersection
+                # with the axes bbox.
+                bar.set_clip_path(bar.get_path(), bar.get_transform() + ax.transData)
+                if self.artist_kws.get("clip_on", True):
+                    # It seems the above hack undoes the default axes clipping
+                    bar.set_clip_box(ax.bbox)
+                bar.sticky_edges[val_idx][:] = (0, np.inf)
+                ax.add_patch(bar)
+
+            # Add a container which is useful for, e.g. Axes.bar_label
+            orientation = {"x": "vertical", "y": "horizontal"}[orient]
+            container_kws = dict(datavalues=vals, orientation=orientation)
+            container = mpl.container.BarContainer(bars, **container_kws)
+            ax.add_container(container)


 @document_properties
@@ -56,12 +190,63 @@ class Bars(BarBase):
     .. include:: ../docstrings/objects.Bars.rst

     """
-    color: MappableColor = Mappable('C0', grouping=False)
-    alpha: MappableFloat = Mappable(0.7, grouping=False)
+    color: MappableColor = Mappable("C0", grouping=False)
+    alpha: MappableFloat = Mappable(.7, grouping=False)
     fill: MappableBool = Mappable(True, grouping=False)
-    edgecolor: MappableColor = Mappable(rc='patch.edgecolor', grouping=False)
+    edgecolor: MappableColor = Mappable(rc="patch.edgecolor", grouping=False)
     edgealpha: MappableFloat = Mappable(1, grouping=False)
     edgewidth: MappableFloat = Mappable(auto=True, grouping=False)
-    edgestyle: MappableStyle = Mappable('-', grouping=False)
+    edgestyle: MappableStyle = Mappable("-", grouping=False)
+    # pattern: MappableString = Mappable(None)  # TODO no Property yet
+
     width: MappableFloat = Mappable(1, grouping=False)
-    baseline: MappableFloat = Mappable(0, grouping=False)
+    baseline: MappableFloat = Mappable(0, grouping=False)  # TODO *is* this mappable?
+
+    def _plot(self, split_gen, scales, orient):
+
+        ori_idx = ["x", "y"].index(orient)
+        val_idx = ["y", "x"].index(orient)
+
+        patches = defaultdict(list)
+        for _, data, ax in split_gen():
+            bars, _ = self._make_patches(data, scales, orient)
+            patches[ax].extend(bars)
+
+        collections = {}
+        for ax, ax_patches in patches.items():
+
+            col = mpl.collections.PatchCollection(ax_patches, match_original=True)
+            col.sticky_edges[val_idx][:] = (0, np.inf)
+            ax.add_collection(col, autolim=False)
+            collections[ax] = col
+
+            # Workaround for matplotlib autoscaling bug
+            # https://github.com/matplotlib/matplotlib/issues/11898
+            # https://github.com/matplotlib/matplotlib/issues/23129
+            xys = np.vstack([path.vertices for path in col.get_paths()])
+            ax.update_datalim(xys)
+
+        if "edgewidth" not in scales and isinstance(self.edgewidth, Mappable):
+
+            for ax in collections:
+                ax.autoscale_view()
+
+            def get_dimensions(collection):
+                edges, widths = [], []
+                for verts in (path.vertices for path in collection.get_paths()):
+                    edges.append(min(verts[:, ori_idx]))
+                    widths.append(np.ptp(verts[:, ori_idx]))
+                return np.array(edges), np.array(widths)
+
+            min_width = np.inf
+            for ax, col in collections.items():
+                edges, widths = get_dimensions(col)
+                points = 72 / ax.figure.dpi * abs(
+                    ax.transData.transform([edges + widths] * 2)
+                    - ax.transData.transform([edges] * 2)
+                )
+                min_width = min(min_width, min(points[:, ori_idx]))
+
+            linewidth = min(.1 * min_width, mpl.rcParams["patch.linewidth"])
+            for _, col in collections.items():
+                col.set_linewidth(linewidth)
diff --git a/seaborn/_marks/base.py b/seaborn/_marks/base.py
index 03ee03a9..ac8fdf4a 100644
--- a/seaborn/_marks/base.py
+++ b/seaborn/_marks/base.py
@@ -3,21 +3,35 @@ from dataclasses import dataclass, fields, field
 import textwrap
 from typing import Any, Callable, Union
 from collections.abc import Generator
+
 import numpy as np
 import pandas as pd
 import matplotlib as mpl
+
 from numpy import ndarray
 from pandas import DataFrame
 from matplotlib.artist import Artist
+
 from seaborn._core.scales import Scale
-from seaborn._core.properties import PROPERTIES, Property, RGBATuple, DashPattern, DashPatternWithOffset
+from seaborn._core.properties import (
+    PROPERTIES,
+    Property,
+    RGBATuple,
+    DashPattern,
+    DashPatternWithOffset,
+)
 from seaborn._core.exceptions import PlotSpecError


 class Mappable:
-
-    def __init__(self, val: Any=None, depend: (str | None)=None, rc: (str |
-        None)=None, auto: bool=False, grouping: bool=True):
+    def __init__(
+        self,
+        val: Any = None,
+        depend: str | None = None,
+        rc: str | None = None,
+        auto: bool = False,
+        grouping: bool = True,
+    ):
         """
         Property that can be mapped from data or set directly, with flexible defaults.

@@ -39,6 +53,7 @@ class Mappable:
             assert depend in PROPERTIES
         if rc is not None:
             assert rc in mpl.rcParams
+
         self._val = val
         self._rc = rc
         self._depend = depend
@@ -48,27 +63,36 @@ class Mappable:
     def __repr__(self):
         """Nice formatting for when object appears in Mark init signature."""
         if self._val is not None:
-            s = f'<{repr(self._val)}>'
+            s = f"<{repr(self._val)}>"
         elif self._depend is not None:
-            s = f'<depend:{self._depend}>'
+            s = f"<depend:{self._depend}>"
         elif self._rc is not None:
-            s = f'<rc:{self._rc}>'
+            s = f"<rc:{self._rc}>"
         elif self._auto:
-            s = '<auto>'
+            s = "<auto>"
         else:
-            s = '<undefined>'
+            s = "<undefined>"
         return s

     @property
-    def depend(self) ->Any:
+    def depend(self) -> Any:
         """Return the name of the feature to source a default value from."""
-        pass
+        return self._depend

     @property
-    def default(self) ->Any:
+    def grouping(self) -> bool:
+        return self._grouping
+
+    @property
+    def default(self) -> Any:
         """Get the default value for this feature, or access the relevant rcParam."""
-        pass
+        if self._val is not None:
+            return self._val
+        elif self._rc is not None:
+            return mpl.rcParams.get(self._rc)
+

+# TODO where is the right place to put this kind of type aliasing?

 MappableBool = Union[bool, Mappable]
 MappableString = Union[str, Mappable]
@@ -80,10 +104,32 @@ MappableStyle = Union[str, DashPattern, DashPatternWithOffset, Mappable]
 @dataclass
 class Mark:
     """Base class for objects that visually represent data."""
+
     artist_kws: dict = field(default_factory=dict)

-    def _resolve(self, data: (DataFrame | dict[str, Any]), name: str,
-        scales: (dict[str, Scale] | None)=None) ->Any:
+    @property
+    def _mappable_props(self):
+        return {
+            f.name: getattr(self, f.name) for f in fields(self)
+            if isinstance(f.default, Mappable)
+        }
+
+    @property
+    def _grouping_props(self):
+        # TODO does it make sense to have variation within a Mark's
+        # properties about whether they are grouping?
+        return [
+            f.name for f in fields(self)
+            if isinstance(f.default, Mappable) and f.default.grouping
+        ]
+
+    # TODO make this method private? Would extender every need to call directly?
+    def _resolve(
+        self,
+        data: DataFrame | dict[str, Any],
+        name: str,
+        scales: dict[str, Scale] | None = None,
+    ) -> Any:
         """Obtain default, specified, or mapped value for a named feature.

         Parameters
@@ -103,16 +149,102 @@ class Mark:
             of values with matching length).

         """
-        pass
+        feature = self._mappable_props[name]
+        prop = PROPERTIES.get(name, Property(name))
+        directly_specified = not isinstance(feature, Mappable)
+        return_multiple = isinstance(data, pd.DataFrame)
+        return_array = return_multiple and not name.endswith("style")
+
+        # Special case width because it needs to be resolved and added to the dataframe
+        # during layer prep (so the Move operations use it properly).
+        # TODO how does width *scaling* work, e.g. for violin width by count?
+        if name == "width":
+            directly_specified = directly_specified and name not in data
+
+        if directly_specified:
+            feature = prop.standardize(feature)
+            if return_multiple:
+                feature = [feature] * len(data)
+            if return_array:
+                feature = np.array(feature)
+            return feature
+
+        if name in data:
+            if scales is None or name not in scales:
+                # TODO Might this obviate the identity scale? Just don't add a scale?
+                feature = data[name]
+            else:
+                scale = scales[name]
+                value = data[name]
+                try:
+                    feature = scale(value)
+                except Exception as err:
+                    raise PlotSpecError._during("Scaling operation", name) from err
+
+            if return_array:
+                feature = np.asarray(feature)
+            return feature
+
+        if feature.depend is not None:
+            # TODO add source_func or similar to transform the source value?
+            # e.g. set linewidth as a proportion of pointsize?
+            return self._resolve(data, feature.depend, scales)

-    def _plot(self, split_generator: Callable[[], Generator], scales: dict[
-        str, Scale], orient: str) ->None:
+        default = prop.standardize(feature.default)
+        if return_multiple:
+            default = [default] * len(data)
+        if return_array:
+            default = np.array(default)
+        return default
+
+    def _infer_orient(self, scales: dict) -> str:  # TODO type scales
+
+        # TODO The original version of this (in seaborn._base) did more checking.
+        # Paring that down here for the prototype to see what restrictions make sense.
+
+        # TODO rethink this to map from scale type to "DV priority" and use that?
+        # e.g. Nominal > Discrete > Continuous
+
+        x = 0 if "x" not in scales else scales["x"]._priority
+        y = 0 if "y" not in scales else scales["y"]._priority
+
+        if y > x:
+            return "y"
+        else:
+            return "x"
+
+    def _plot(
+        self,
+        split_generator: Callable[[], Generator],
+        scales: dict[str, Scale],
+        orient: str,
+    ) -> None:
         """Main interface for creating a plot."""
-        pass
+        raise NotImplementedError()
+
+    def _legend_artist(
+        self, variables: list[str], value: Any, scales: dict[str, Scale],
+    ) -> Artist | None:
+
+        return None
+

+def resolve_properties(
+    mark: Mark, data: DataFrame, scales: dict[str, Scale]
+) -> dict[str, Any]:

-def resolve_color(mark: Mark, data: (DataFrame | dict), prefix: str='',
-    scales: (dict[str, Scale] | None)=None) ->(RGBATuple | ndarray):
+    props = {
+        name: mark._resolve(data, name, scales) for name in mark._mappable_props
+    }
+    return props
+
+
+def resolve_color(
+    mark: Mark,
+    data: DataFrame | dict,
+    prefix: str = "",
+    scales: dict[str, Scale] | None = None,
+) -> RGBATuple | ndarray:
     """
     Obtain a default, specified, or mapped value for a color feature.

@@ -133,4 +265,53 @@ def resolve_color(mark: Mark, data: (DataFrame | dict), prefix: str='',
         Support "color", "fillcolor", etc.

     """
-    pass
+    color = mark._resolve(data, f"{prefix}color", scales)
+
+    if f"{prefix}alpha" in mark._mappable_props:
+        alpha = mark._resolve(data, f"{prefix}alpha", scales)
+    else:
+        alpha = mark._resolve(data, "alpha", scales)
+
+    def visible(x, axis=None):
+        """Detect "invisible" colors to set alpha appropriately."""
+        # TODO First clause only needed to handle non-rgba arrays,
+        # which we are trying to handle upstream
+        return np.array(x).dtype.kind != "f" or np.isfinite(x).all(axis)
+
+    # Second check here catches vectors of strings with identity scale
+    # It could probably be handled better upstream. This is a tricky problem
+    if np.ndim(color) < 2 and all(isinstance(x, float) for x in color):
+        if len(color) == 4:
+            return mpl.colors.to_rgba(color)
+        alpha = alpha if visible(color) else np.nan
+        return mpl.colors.to_rgba(color, alpha)
+    else:
+        if np.ndim(color) == 2 and color.shape[1] == 4:
+            return mpl.colors.to_rgba_array(color)
+        alpha = np.where(visible(color, axis=1), alpha, np.nan)
+        return mpl.colors.to_rgba_array(color, alpha)
+
+    # TODO should we be implementing fill here too?
+    # (i.e. set fillalpha to 0 when fill=False)
+
+
+def document_properties(mark):
+
+    properties = [f.name for f in fields(mark) if isinstance(f.default, Mappable)]
+    text = [
+        "",
+        "    This mark defines the following properties:",
+        textwrap.fill(
+            ", ".join([f"|{p}|" for p in properties]),
+            width=78, initial_indent=" " * 8, subsequent_indent=" " * 8,
+        ),
+    ]
+
+    docstring_lines = mark.__doc__.split("\n")
+    new_docstring = "\n".join([
+        *docstring_lines[:2],
+        *text,
+        *docstring_lines[2:],
+    ])
+    mark.__doc__ = new_docstring
+    return mark
diff --git a/seaborn/_marks/dot.py b/seaborn/_marks/dot.py
index c188ed0d..beef412d 100644
--- a/seaborn/_marks/dot.py
+++ b/seaborn/_marks/dot.py
@@ -1,8 +1,22 @@
 from __future__ import annotations
 from dataclasses import dataclass
+
 import numpy as np
 import matplotlib as mpl
-from seaborn._marks.base import Mark, Mappable, MappableBool, MappableFloat, MappableString, MappableColor, MappableStyle, resolve_properties, resolve_color, document_properties
+
+from seaborn._marks.base import (
+    Mark,
+    Mappable,
+    MappableBool,
+    MappableFloat,
+    MappableString,
+    MappableColor,
+    MappableStyle,
+    resolve_properties,
+    resolve_color,
+    document_properties,
+)
+
 from typing import TYPE_CHECKING
 if TYPE_CHECKING:
     from typing import Any
@@ -11,7 +25,82 @@ if TYPE_CHECKING:


 class DotBase(Mark):
-    pass
+
+    def _resolve_paths(self, data):
+
+        paths = []
+        path_cache = {}
+        marker = data["marker"]
+
+        def get_transformed_path(m):
+            return m.get_path().transformed(m.get_transform())
+
+        if isinstance(marker, mpl.markers.MarkerStyle):
+            return get_transformed_path(marker)
+
+        for m in marker:
+            if m not in path_cache:
+                path_cache[m] = get_transformed_path(m)
+            paths.append(path_cache[m])
+        return paths
+
+    def _resolve_properties(self, data, scales):
+
+        resolved = resolve_properties(self, data, scales)
+        resolved["path"] = self._resolve_paths(resolved)
+        resolved["size"] = resolved["pointsize"] ** 2
+
+        if isinstance(data, dict):  # Properties for single dot
+            filled_marker = resolved["marker"].is_filled()
+        else:
+            filled_marker = [m.is_filled() for m in resolved["marker"]]
+
+        resolved["fill"] = resolved["fill"] * filled_marker
+
+        return resolved
+
+    def _plot(self, split_gen, scales, orient):
+
+        # TODO Not backcompat with allowed (but nonfunctional) univariate plots
+        # (That should be solved upstream by defaulting to "" for unset x/y?)
+        # (Be mindful of xmin/xmax, etc!)
+
+        for _, data, ax in split_gen():
+
+            offsets = np.column_stack([data["x"], data["y"]])
+            data = self._resolve_properties(data, scales)
+
+            points = mpl.collections.PathCollection(
+                offsets=offsets,
+                paths=data["path"],
+                sizes=data["size"],
+                facecolors=data["facecolor"],
+                edgecolors=data["edgecolor"],
+                linewidths=data["linewidth"],
+                linestyles=data["edgestyle"],
+                transOffset=ax.transData,
+                transform=mpl.transforms.IdentityTransform(),
+                **self.artist_kws,
+            )
+            ax.add_collection(points)
+
+    def _legend_artist(
+        self, variables: list[str], value: Any, scales: dict[str, Scale],
+    ) -> Artist:
+
+        key = {v: value for v in variables}
+        res = self._resolve_properties(key, scales)
+
+        return mpl.collections.PathCollection(
+            paths=[res["path"]],
+            sizes=[res["size"]],
+            facecolors=[res["facecolor"]],
+            edgecolors=[res["edgecolor"]],
+            linewidths=[res["linewidth"]],
+            linestyles=[res["edgestyle"]],
+            transform=mpl.transforms.IdentityTransform(),
+            **self.artist_kws,
+        )


 @document_properties
@@ -29,16 +118,43 @@ class Dot(DotBase):
     .. include:: ../docstrings/objects.Dot.rst

     """
-    marker: MappableString = Mappable('o', grouping=False)
-    pointsize: MappableFloat = Mappable(6, grouping=False)
-    stroke: MappableFloat = Mappable(0.75, grouping=False)
-    color: MappableColor = Mappable('C0', grouping=False)
+    marker: MappableString = Mappable("o", grouping=False)
+    pointsize: MappableFloat = Mappable(6, grouping=False)  # TODO rcParam?
+    stroke: MappableFloat = Mappable(.75, grouping=False)  # TODO rcParam?
+    color: MappableColor = Mappable("C0", grouping=False)
     alpha: MappableFloat = Mappable(1, grouping=False)
     fill: MappableBool = Mappable(True, grouping=False)
-    edgecolor: MappableColor = Mappable(depend='color', grouping=False)
-    edgealpha: MappableFloat = Mappable(depend='alpha', grouping=False)
-    edgewidth: MappableFloat = Mappable(0.5, grouping=False)
-    edgestyle: MappableStyle = Mappable('-', grouping=False)
+    edgecolor: MappableColor = Mappable(depend="color", grouping=False)
+    edgealpha: MappableFloat = Mappable(depend="alpha", grouping=False)
+    edgewidth: MappableFloat = Mappable(.5, grouping=False)  # TODO rcParam?
+    edgestyle: MappableStyle = Mappable("-", grouping=False)
+
+    def _resolve_properties(self, data, scales):
+
+        resolved = super()._resolve_properties(data, scales)
+        filled = resolved["fill"]
+
+        main_stroke = resolved["stroke"]
+        edge_stroke = resolved["edgewidth"]
+        resolved["linewidth"] = np.where(filled, edge_stroke, main_stroke)
+
+        main_color = resolve_color(self, data, "", scales)
+        edge_color = resolve_color(self, data, "edge", scales)
+
+        if not np.isscalar(filled):
+            # Expand dims to use in np.where with rgba arrays
+            filled = filled[:, None]
+        resolved["edgecolor"] = np.where(filled, edge_color, main_color)
+
+        filled = np.squeeze(filled)
+        if isinstance(main_color, tuple):
+            # TODO handle this in resolve_color
+            main_color = tuple([*main_color[:3], main_color[3] * filled])
+        else:
+            main_color = np.c_[main_color[:, :3], main_color[:, 3] * filled]
+        resolved["facecolor"] = main_color
+
+        return resolved


 @document_properties
@@ -56,11 +172,29 @@ class Dots(DotBase):
     .. include:: ../docstrings/objects.Dots.rst

     """
-    marker: MappableString = Mappable(rc='scatter.marker', grouping=False)
-    pointsize: MappableFloat = Mappable(4, grouping=False)
-    stroke: MappableFloat = Mappable(0.75, grouping=False)
-    color: MappableColor = Mappable('C0', grouping=False)
-    alpha: MappableFloat = Mappable(1, grouping=False)
+    # TODO retype marker as MappableMarker
+    marker: MappableString = Mappable(rc="scatter.marker", grouping=False)
+    pointsize: MappableFloat = Mappable(4, grouping=False)  # TODO rcParam?
+    stroke: MappableFloat = Mappable(.75, grouping=False)  # TODO rcParam?
+    color: MappableColor = Mappable("C0", grouping=False)
+    alpha: MappableFloat = Mappable(1, grouping=False)  # TODO auto alpha?
     fill: MappableBool = Mappable(True, grouping=False)
-    fillcolor: MappableColor = Mappable(depend='color', grouping=False)
-    fillalpha: MappableFloat = Mappable(0.2, grouping=False)
+    fillcolor: MappableColor = Mappable(depend="color", grouping=False)
+    fillalpha: MappableFloat = Mappable(.2, grouping=False)
+
+    def _resolve_properties(self, data, scales):
+
+        resolved = super()._resolve_properties(data, scales)
+        resolved["linewidth"] = resolved.pop("stroke")
+        resolved["facecolor"] = resolve_color(self, data, "fill", scales)
+        resolved["edgecolor"] = resolve_color(self, data, "", scales)
+        resolved.setdefault("edgestyle", (0, None))
+
+        fc = resolved["facecolor"]
+        if isinstance(fc, tuple):
+            resolved["facecolor"] = fc[0], fc[1], fc[2], fc[3] * resolved["fill"]
+        else:
+            fc[:, 3] = fc[:, 3] * resolved["fill"]  # TODO Is inplace mod a problem?
+            resolved["facecolor"] = fc
+
+        return resolved
diff --git a/seaborn/_marks/line.py b/seaborn/_marks/line.py
index 9ba99240..a517f1b8 100644
--- a/seaborn/_marks/line.py
+++ b/seaborn/_marks/line.py
@@ -1,9 +1,20 @@
 from __future__ import annotations
 from dataclasses import dataclass
 from typing import ClassVar
+
 import numpy as np
 import matplotlib as mpl
-from seaborn._marks.base import Mark, Mappable, MappableFloat, MappableString, MappableColor, resolve_properties, resolve_color, document_properties
+
+from seaborn._marks.base import (
+    Mark,
+    Mappable,
+    MappableFloat,
+    MappableString,
+    MappableColor,
+    resolve_properties,
+    resolve_color,
+    document_properties,
+)


 @document_properties
@@ -22,17 +33,80 @@ class Path(Mark):
     .. include:: ../docstrings/objects.Path.rst

     """
-    color: MappableColor = Mappable('C0')
+    color: MappableColor = Mappable("C0")
     alpha: MappableFloat = Mappable(1)
-    linewidth: MappableFloat = Mappable(rc='lines.linewidth')
-    linestyle: MappableString = Mappable(rc='lines.linestyle')
-    marker: MappableString = Mappable(rc='lines.marker')
-    pointsize: MappableFloat = Mappable(rc='lines.markersize')
-    fillcolor: MappableColor = Mappable(depend='color')
-    edgecolor: MappableColor = Mappable(depend='color')
-    edgewidth: MappableFloat = Mappable(rc='lines.markeredgewidth')
+    linewidth: MappableFloat = Mappable(rc="lines.linewidth")
+    linestyle: MappableString = Mappable(rc="lines.linestyle")
+    marker: MappableString = Mappable(rc="lines.marker")
+    pointsize: MappableFloat = Mappable(rc="lines.markersize")
+    fillcolor: MappableColor = Mappable(depend="color")
+    edgecolor: MappableColor = Mappable(depend="color")
+    edgewidth: MappableFloat = Mappable(rc="lines.markeredgewidth")
+
     _sort: ClassVar[bool] = False

+    def _plot(self, split_gen, scales, orient):
+
+        for keys, data, ax in split_gen(keep_na=not self._sort):
+
+            vals = resolve_properties(self, keys, scales)
+            vals["color"] = resolve_color(self, keys, scales=scales)
+            vals["fillcolor"] = resolve_color(self, keys, prefix="fill", scales=scales)
+            vals["edgecolor"] = resolve_color(self, keys, prefix="edge", scales=scales)
+
+            if self._sort:
+                data = data.sort_values(orient, kind="mergesort")
+
+            artist_kws = self.artist_kws.copy()
+            self._handle_capstyle(artist_kws, vals)
+
+            line = mpl.lines.Line2D(
+                data["x"].to_numpy(),
+                data["y"].to_numpy(),
+                color=vals["color"],
+                linewidth=vals["linewidth"],
+                linestyle=vals["linestyle"],
+                marker=vals["marker"],
+                markersize=vals["pointsize"],
+                markerfacecolor=vals["fillcolor"],
+                markeredgecolor=vals["edgecolor"],
+                markeredgewidth=vals["edgewidth"],
+                **artist_kws,
+            )
+            ax.add_line(line)
+
+    def _legend_artist(self, variables, value, scales):
+
+        keys = {v: value for v in variables}
+        vals = resolve_properties(self, keys, scales)
+        vals["color"] = resolve_color(self, keys, scales=scales)
+        vals["fillcolor"] = resolve_color(self, keys, prefix="fill", scales=scales)
+        vals["edgecolor"] = resolve_color(self, keys, prefix="edge", scales=scales)
+
+        artist_kws = self.artist_kws.copy()
+        self._handle_capstyle(artist_kws, vals)
+
+        return mpl.lines.Line2D(
+            [], [],
+            color=vals["color"],
+            linewidth=vals["linewidth"],
+            linestyle=vals["linestyle"],
+            marker=vals["marker"],
+            markersize=vals["pointsize"],
+            markerfacecolor=vals["fillcolor"],
+            markeredgecolor=vals["edgecolor"],
+            markeredgewidth=vals["edgewidth"],
+            **artist_kws,
+        )
+
+    def _handle_capstyle(self, kws, vals):
+
+        # Work around for this matplotlib issue:
+        # https://github.com/matplotlib/matplotlib/issues/23437
+        if vals["linestyle"][1] is None:
+            capstyle = kws.get("solid_capstyle", mpl.rcParams["lines.solid_capstyle"])
+            kws["dash_capstyle"] = capstyle
+

 @document_properties
 @dataclass
@@ -68,15 +142,80 @@ class Paths(Mark):
     .. include:: ../docstrings/objects.Paths.rst

     """
-    color: MappableColor = Mappable('C0')
+    color: MappableColor = Mappable("C0")
     alpha: MappableFloat = Mappable(1)
-    linewidth: MappableFloat = Mappable(rc='lines.linewidth')
-    linestyle: MappableString = Mappable(rc='lines.linestyle')
+    linewidth: MappableFloat = Mappable(rc="lines.linewidth")
+    linestyle: MappableString = Mappable(rc="lines.linestyle")
+
     _sort: ClassVar[bool] = False

     def __post_init__(self):
-        self.artist_kws.setdefault('capstyle', mpl.rcParams[
-            'lines.solid_capstyle'])
+
+        # LineCollection artists have a capstyle property but don't source its value
+        # from the rc, so we do that manually here. Unfortunately, because we add
+        # only one LineCollection, we have the use the same capstyle for all lines
+        # even when they are dashed. It's a slight inconsistency, but looks fine IMO.
+        self.artist_kws.setdefault("capstyle", mpl.rcParams["lines.solid_capstyle"])
+
+    def _plot(self, split_gen, scales, orient):
+
+        line_data = {}
+        for keys, data, ax in split_gen(keep_na=not self._sort):
+
+            if ax not in line_data:
+                line_data[ax] = {
+                    "segments": [],
+                    "colors": [],
+                    "linewidths": [],
+                    "linestyles": [],
+                }
+
+            segments = self._setup_segments(data, orient)
+            line_data[ax]["segments"].extend(segments)
+            n = len(segments)
+
+            vals = resolve_properties(self, keys, scales)
+            vals["color"] = resolve_color(self, keys, scales=scales)
+
+            line_data[ax]["colors"].extend([vals["color"]] * n)
+            line_data[ax]["linewidths"].extend([vals["linewidth"]] * n)
+            line_data[ax]["linestyles"].extend([vals["linestyle"]] * n)
+
+        for ax, ax_data in line_data.items():
+            lines = mpl.collections.LineCollection(**ax_data, **self.artist_kws)
+            # Handle datalim update manually
+            # https://github.com/matplotlib/matplotlib/issues/23129
+            ax.add_collection(lines, autolim=False)
+            if ax_data["segments"]:
+                xy = np.concatenate(ax_data["segments"])
+                ax.update_datalim(xy)
+
+    def _legend_artist(self, variables, value, scales):
+
+        key = resolve_properties(self, {v: value for v in variables}, scales)
+
+        artist_kws = self.artist_kws.copy()
+        capstyle = artist_kws.pop("capstyle")
+        artist_kws["solid_capstyle"] = capstyle
+        artist_kws["dash_capstyle"] = capstyle
+
+        return mpl.lines.Line2D(
+            [], [],
+            color=key["color"],
+            linewidth=key["linewidth"],
+            linestyle=key["linestyle"],
+            **artist_kws,
+        )
+
+    def _setup_segments(self, data, orient):
+
+        if self._sort:
+            data = data.sort_values(orient, kind="mergesort")
+
+        # Column stack to avoid block consolidation
+        xy = np.column_stack([data["x"], data["y"]])
+
+        return [xy]


 @document_properties
@@ -108,6 +247,19 @@ class Range(Paths):
     .. include:: ../docstrings/objects.Range.rst

     """
+    def _setup_segments(self, data, orient):
+
+        # TODO better checks on what variables we have
+        # TODO what if only one exist?
+        val = {"x": "y", "y": "x"}[orient]
+        if not set(data.columns) & {f"{val}min", f"{val}max"}:
+            agg = {f"{val}min": (val, "min"), f"{val}max": (val, "max")}
+            data = data.groupby(orient).agg(**agg).reset_index()
+
+        cols = [orient, f"{val}min", f"{val}max"]
+        data = data[cols].melt(orient, value_name=val)[["x", "y"]]
+        segments = [d.to_numpy() for _, d in data.groupby(orient)]
+        return segments


 @document_properties
@@ -121,4 +273,13 @@ class Dash(Paths):
     .. include:: ../docstrings/objects.Dash.rst

     """
-    width: MappableFloat = Mappable(0.8, grouping=False)
+    width: MappableFloat = Mappable(.8, grouping=False)
+
+    def _setup_segments(self, data, orient):
+
+        ori = ["x", "y"].index(orient)
+        xys = data[["x", "y"]].to_numpy().astype(float)
+        segments = np.stack([xys, xys], axis=1)
+        segments[:, 0, ori] -= data["width"] / 2
+        segments[:, 1, ori] += data["width"] / 2
+        return segments
diff --git a/seaborn/_marks/text.py b/seaborn/_marks/text.py
index 5c00231f..58d757c1 100644
--- a/seaborn/_marks/text.py
+++ b/seaborn/_marks/text.py
@@ -1,10 +1,21 @@
 from __future__ import annotations
 from collections import defaultdict
 from dataclasses import dataclass
+
 import numpy as np
 import matplotlib as mpl
 from matplotlib.transforms import ScaledTranslation
-from seaborn._marks.base import Mark, Mappable, MappableFloat, MappableString, MappableColor, resolve_properties, resolve_color, document_properties
+
+from seaborn._marks.base import (
+    Mark,
+    Mappable,
+    MappableFloat,
+    MappableString,
+    MappableColor,
+    resolve_properties,
+    resolve_color,
+    document_properties,
+)


 @document_properties
@@ -18,10 +29,48 @@ class Text(Mark):
     .. include:: ../docstrings/objects.Text.rst

     """
-    text: MappableString = Mappable('')
-    color: MappableColor = Mappable('k')
+    text: MappableString = Mappable("")
+    color: MappableColor = Mappable("k")
     alpha: MappableFloat = Mappable(1)
-    fontsize: MappableFloat = Mappable(rc='font.size')
-    halign: MappableString = Mappable('center')
-    valign: MappableString = Mappable('center_baseline')
+    fontsize: MappableFloat = Mappable(rc="font.size")
+    halign: MappableString = Mappable("center")
+    valign: MappableString = Mappable("center_baseline")
     offset: MappableFloat = Mappable(4)
+
+    def _plot(self, split_gen, scales, orient):
+
+        ax_data = defaultdict(list)
+
+        for keys, data, ax in split_gen():
+
+            vals = resolve_properties(self, keys, scales)
+            color = resolve_color(self, keys, "", scales)
+
+            halign = vals["halign"]
+            valign = vals["valign"]
+            fontsize = vals["fontsize"]
+            offset = vals["offset"] / 72
+
+            offset_trans = ScaledTranslation(
+                {"right": -offset, "left": +offset}.get(halign, 0),
+                {"top": -offset, "bottom": +offset, "baseline": +offset}.get(valign, 0),
+                ax.figure.dpi_scale_trans,
+            )
+
+            for row in data.to_dict("records"):
+                artist = mpl.text.Text(
+                    x=row["x"],
+                    y=row["y"],
+                    text=str(row.get("text", vals["text"])),
+                    color=color,
+                    fontsize=fontsize,
+                    horizontalalignment=halign,
+                    verticalalignment=valign,
+                    transform=ax.transData + offset_trans,
+                    **self.artist_kws,
+                )
+                ax.add_artist(artist)
+                ax_data[ax].append([row["x"], row["y"]])
+
+        for ax, ax_vals in ax_data.items():
+            ax.update_datalim(np.array(ax_vals))
diff --git a/seaborn/_statistics.py b/seaborn/_statistics.py
index 2b81faf7..40346b02 100644
--- a/seaborn/_statistics.py
+++ b/seaborn/_statistics.py
@@ -34,15 +34,22 @@ try:
 except ImportError:
     from .external.kde import gaussian_kde
     _no_scipy = True
+
 from .algorithms import bootstrap
 from .utils import _check_argument


 class KDE:
     """Univariate and bivariate kernel density estimator."""
-
-    def __init__(self, *, bw_method=None, bw_adjust=1, gridsize=200, cut=3,
-        clip=None, cumulative=False):
+    def __init__(
+        self, *,
+        bw_method=None,
+        bw_adjust=1,
+        gridsize=200,
+        cut=3,
+        clip=None,
+        cumulative=False,
+    ):
         """Initialize the estimator with its parameters.

         Parameters
@@ -67,43 +74,118 @@ class KDE:
         """
         if clip is None:
             clip = None, None
+
         self.bw_method = bw_method
         self.bw_adjust = bw_adjust
         self.gridsize = gridsize
         self.cut = cut
         self.clip = clip
         self.cumulative = cumulative
+
         if cumulative and _no_scipy:
-            raise RuntimeError('Cumulative KDE evaluation requires scipy')
+            raise RuntimeError("Cumulative KDE evaluation requires scipy")
+
         self.support = None

     def _define_support_grid(self, x, bw, cut, clip, gridsize):
         """Create the grid of evaluation points depending for vector x."""
-        pass
+        clip_lo = -np.inf if clip[0] is None else clip[0]
+        clip_hi = +np.inf if clip[1] is None else clip[1]
+        gridmin = max(x.min() - bw * cut, clip_lo)
+        gridmax = min(x.max() + bw * cut, clip_hi)
+        return np.linspace(gridmin, gridmax, gridsize)

     def _define_support_univariate(self, x, weights):
         """Create a 1D grid of evaluation points."""
-        pass
+        kde = self._fit(x, weights)
+        bw = np.sqrt(kde.covariance.squeeze())
+        grid = self._define_support_grid(
+            x, bw, self.cut, self.clip, self.gridsize
+        )
+        return grid

     def _define_support_bivariate(self, x1, x2, weights):
         """Create a 2D grid of evaluation points."""
-        pass
+        clip = self.clip
+        if clip[0] is None or np.isscalar(clip[0]):
+            clip = (clip, clip)
+
+        kde = self._fit([x1, x2], weights)
+        bw = np.sqrt(np.diag(kde.covariance).squeeze())
+
+        grid1 = self._define_support_grid(
+            x1, bw[0], self.cut, clip[0], self.gridsize
+        )
+        grid2 = self._define_support_grid(
+            x2, bw[1], self.cut, clip[1], self.gridsize
+        )
+
+        return grid1, grid2

     def define_support(self, x1, x2=None, weights=None, cache=True):
         """Create the evaluation grid for a given data set."""
-        pass
+        if x2 is None:
+            support = self._define_support_univariate(x1, weights)
+        else:
+            support = self._define_support_bivariate(x1, x2, weights)
+
+        if cache:
+            self.support = support
+
+        return support

     def _fit(self, fit_data, weights=None):
         """Fit the scipy kde while adding bw_adjust logic and version check."""
-        pass
+        fit_kws = {"bw_method": self.bw_method}
+        if weights is not None:
+            fit_kws["weights"] = weights
+
+        kde = gaussian_kde(fit_data, **fit_kws)
+        kde.set_bandwidth(kde.factor * self.bw_adjust)
+
+        return kde

     def _eval_univariate(self, x, weights=None):
         """Fit and evaluate a univariate on univariate data."""
-        pass
+        support = self.support
+        if support is None:
+            support = self.define_support(x, cache=False)
+
+        kde = self._fit(x, weights)
+
+        if self.cumulative:
+            s_0 = support[0]
+            density = np.array([
+                kde.integrate_box_1d(s_0, s_i) for s_i in support
+            ])
+        else:
+            density = kde(support)
+
+        return density, support

     def _eval_bivariate(self, x1, x2, weights=None):
         """Fit and evaluate a univariate on bivariate data."""
-        pass
+        support = self.support
+        if support is None:
+            support = self.define_support(x1, x2, cache=False)
+
+        kde = self._fit([x1, x2], weights)
+
+        if self.cumulative:
+
+            grid1, grid2 = support
+            density = np.zeros((grid1.size, grid2.size))
+            p0 = grid1.min(), grid2.min()
+            for i, xi in enumerate(grid1):
+                for j, xj in enumerate(grid2):
+                    density[i, j] = kde.integrate_box(p0, (xi, xj))
+
+        else:
+
+            xx1, xx2 = np.meshgrid(*support)
+            density = kde([xx1.ravel(), xx2.ravel()]).reshape(xx1.shape)
+
+        return density, support

     def __call__(self, x1, x2=None, weights=None):
         """Fit and evaluate on univariate or bivariate data."""
@@ -113,11 +195,19 @@ class KDE:
             return self._eval_bivariate(x1, x2, weights)


+# Note: we no longer use this for univariate histograms in histplot,
+# preferring _stats.Hist. We'll deprecate this once we have a bivariate Stat class.
 class Histogram:
     """Univariate and bivariate histogram estimator."""
-
-    def __init__(self, stat='count', bins='auto', binwidth=None, binrange=
-        None, discrete=False, cumulative=False):
+    def __init__(
+        self,
+        stat="count",
+        bins="auto",
+        binwidth=None,
+        binrange=None,
+        discrete=False,
+        cumulative=False,
+    ):
         """Initialize the estimator with its parameters.

         Parameters
@@ -148,33 +238,158 @@ class Histogram:
             If True, return the cumulative statistic.

         """
-        stat_choices = ['count', 'frequency', 'density', 'probability',
-            'proportion', 'percent']
-        _check_argument('stat', stat_choices, stat)
+        stat_choices = [
+            "count", "frequency", "density", "probability", "proportion", "percent",
+        ]
+        _check_argument("stat", stat_choices, stat)
+
         self.stat = stat
         self.bins = bins
         self.binwidth = binwidth
         self.binrange = binrange
         self.discrete = discrete
         self.cumulative = cumulative
+
         self.bin_kws = None

-    def _define_bin_edges(self, x, weights, bins, binwidth, binrange, discrete
-        ):
+    def _define_bin_edges(self, x, weights, bins, binwidth, binrange, discrete):
         """Inner function that takes bin parameters as arguments."""
-        pass
+        if binrange is None:
+            start, stop = x.min(), x.max()
+        else:
+            start, stop = binrange
+
+        if discrete:
+            bin_edges = np.arange(start - .5, stop + 1.5)
+        elif binwidth is not None:
+            step = binwidth
+            bin_edges = np.arange(start, stop + step, step)
+            # Handle roundoff error (maybe there is a less clumsy way?)
+            if bin_edges.max() < stop or len(bin_edges) < 2:
+                bin_edges = np.append(bin_edges, bin_edges.max() + step)
+        else:
+            bin_edges = np.histogram_bin_edges(
+                x, bins, binrange, weights,
+            )
+        return bin_edges

     def define_bin_params(self, x1, x2=None, weights=None, cache=True):
         """Given data, return numpy.histogram parameters to define bins."""
-        pass
+        if x2 is None:
+
+            bin_edges = self._define_bin_edges(
+                x1, weights, self.bins, self.binwidth, self.binrange, self.discrete,
+            )
+
+            if isinstance(self.bins, (str, Number)):
+                n_bins = len(bin_edges) - 1
+                bin_range = bin_edges.min(), bin_edges.max()
+                bin_kws = dict(bins=n_bins, range=bin_range)
+            else:
+                bin_kws = dict(bins=bin_edges)
+
+        else:
+
+            bin_edges = []
+            for i, x in enumerate([x1, x2]):
+
+                # Resolve out whether bin parameters are shared
+                # or specific to each variable
+
+                bins = self.bins
+                if not bins or isinstance(bins, (str, Number)):
+                    pass
+                elif isinstance(bins[i], str):
+                    bins = bins[i]
+                elif len(bins) == 2:
+                    bins = bins[i]
+
+                binwidth = self.binwidth
+                if binwidth is None:
+                    pass
+                elif not isinstance(binwidth, Number):
+                    binwidth = binwidth[i]
+
+                binrange = self.binrange
+                if binrange is None:
+                    pass
+                elif not isinstance(binrange[0], Number):
+                    binrange = binrange[i]
+
+                discrete = self.discrete
+                if not isinstance(discrete, bool):
+                    discrete = discrete[i]
+
+                # Define the bins for this variable
+
+                bin_edges.append(self._define_bin_edges(
+                    x, weights, bins, binwidth, binrange, discrete,
+                ))
+
+            bin_kws = dict(bins=tuple(bin_edges))
+
+        if cache:
+            self.bin_kws = bin_kws
+
+        return bin_kws

     def _eval_bivariate(self, x1, x2, weights):
         """Inner function for histogram of two variables."""
-        pass
+        bin_kws = self.bin_kws
+        if bin_kws is None:
+            bin_kws = self.define_bin_params(x1, x2, cache=False)
+
+        density = self.stat == "density"
+
+        hist, *bin_edges = np.histogram2d(
+            x1, x2, **bin_kws, weights=weights, density=density
+        )
+
+        area = np.outer(
+            np.diff(bin_edges[0]),
+            np.diff(bin_edges[1]),
+        )
+
+        if self.stat == "probability" or self.stat == "proportion":
+            hist = hist.astype(float) / hist.sum()
+        elif self.stat == "percent":
+            hist = hist.astype(float) / hist.sum() * 100
+        elif self.stat == "frequency":
+            hist = hist.astype(float) / area
+
+        if self.cumulative:
+            if self.stat in ["density", "frequency"]:
+                hist = (hist * area).cumsum(axis=0).cumsum(axis=1)
+            else:
+                hist = hist.cumsum(axis=0).cumsum(axis=1)
+
+        return hist, bin_edges

     def _eval_univariate(self, x, weights):
         """Inner function for histogram of one variable."""
-        pass
+        bin_kws = self.bin_kws
+        if bin_kws is None:
+            bin_kws = self.define_bin_params(x, weights=weights, cache=False)
+
+        density = self.stat == "density"
+        hist, bin_edges = np.histogram(
+            x, **bin_kws, weights=weights, density=density,
+        )
+
+        if self.stat == "probability" or self.stat == "proportion":
+            hist = hist.astype(float) / hist.sum()
+        elif self.stat == "percent":
+            hist = hist.astype(float) / hist.sum() * 100
+        elif self.stat == "frequency":
+            hist = hist.astype(float) / np.diff(bin_edges)
+
+        if self.cumulative:
+            if self.stat in ["density", "frequency"]:
+                hist = (hist * np.diff(bin_edges)).cumsum()
+            else:
+                hist = hist.cumsum()
+
+        return hist, bin_edges

     def __call__(self, x1, x2=None, weights=None):
         """Count the occurrences in each bin, maybe normalize."""
@@ -186,8 +401,7 @@ class Histogram:

 class ECDF:
     """Univariate empirical cumulative distribution estimator."""
-
-    def __init__(self, stat='proportion', complementary=False):
+    def __init__(self, stat="proportion", complementary=False):
         """Initialize the class with its parameters

         Parameters
@@ -198,17 +412,33 @@ class ECDF:
             If True, use the complementary CDF (1 - CDF)

         """
-        _check_argument('stat', ['count', 'percent', 'proportion'], stat)
+        _check_argument("stat", ["count", "percent", "proportion"], stat)
         self.stat = stat
         self.complementary = complementary

     def _eval_bivariate(self, x1, x2, weights):
         """Inner function for ECDF of two variables."""
-        pass
+        raise NotImplementedError("Bivariate ECDF is not implemented")

     def _eval_univariate(self, x, weights):
         """Inner function for ECDF of one variable."""
-        pass
+        sorter = x.argsort()
+        x = x[sorter]
+        weights = weights[sorter]
+        y = weights.cumsum()
+
+        if self.stat in ["percent", "proportion"]:
+            y = y / y.max()
+        if self.stat == "percent":
+            y = y * 100
+
+        x = np.r_[-np.inf, x]
+        y = np.r_[0, y]
+
+        if self.complementary:
+            y = y.max() - y
+
+        return y, x

     def __call__(self, x1, x2=None, weights=None):
         """Return proportion or count of observations below each sorted datapoint."""
@@ -217,6 +447,7 @@ class ECDF:
             weights = np.ones_like(x1)
         else:
             weights = np.asarray(weights)
+
         if x2 is None:
             return self._eval_univariate(x1, weights)
         else:
@@ -243,41 +474,50 @@ class EstimateAggregator:

         """
         self.estimator = estimator
+
         method, level = _validate_errorbar_arg(errorbar)
         self.error_method = method
         self.error_level = level
+
         self.boot_kws = boot_kws

     def __call__(self, data, var):
         """Aggregate over `var` column of `data` with estimate and error interval."""
         vals = data[var]
         if callable(self.estimator):
+            # You would think we could pass to vals.agg, and yet:
+            # https://github.com/mwaskom/seaborn/issues/2943
             estimate = self.estimator(vals)
         else:
             estimate = vals.agg(self.estimator)
+
+        # Options that produce no error bars
         if self.error_method is None:
             err_min = err_max = np.nan
         elif len(data) <= 1:
             err_min = err_max = np.nan
+
+        # Generic errorbars from user-supplied function
         elif callable(self.error_method):
             err_min, err_max = self.error_method(vals)
-        elif self.error_method == 'sd':
+
+        # Parametric options
+        elif self.error_method == "sd":
             half_interval = vals.std() * self.error_level
-            err_min, err_max = (estimate - half_interval, estimate +
-                half_interval)
-        elif self.error_method == 'se':
+            err_min, err_max = estimate - half_interval, estimate + half_interval
+        elif self.error_method == "se":
             half_interval = vals.sem() * self.error_level
-            err_min, err_max = (estimate - half_interval, estimate +
-                half_interval)
-        elif self.error_method == 'pi':
+            err_min, err_max = estimate - half_interval, estimate + half_interval
+
+        # Nonparametric options
+        elif self.error_method == "pi":
             err_min, err_max = _percentile_interval(vals, self.error_level)
-        elif self.error_method == 'ci':
-            units = data.get('units', None)
-            boots = bootstrap(vals, units=units, func=self.estimator, **
-                self.boot_kws)
+        elif self.error_method == "ci":
+            units = data.get("units", None)
+            boots = bootstrap(vals, units=units, func=self.estimator, **self.boot_kws)
             err_min, err_max = _percentile_interval(boots, self.error_level)
-        return pd.Series({var: estimate, f'{var}min': err_min, f'{var}max':
-            err_max})
+
+        return pd.Series({var: estimate, f"{var}min": err_min, f"{var}max": err_max})


 class WeightedAggregator:
@@ -298,32 +538,42 @@ class WeightedAggregator:
             Additional keywords are passed to bootstrap when error_method is "ci".

         """
-        if estimator != 'mean':
-            raise ValueError(
-                f"Weighted estimator must be 'mean', not {estimator!r}.")
+        if estimator != "mean":
+            # Note that, while other weighted estimators may make sense (e.g. median),
+            # I'm not aware of an implementation in our dependencies. We can add one
+            # in seaborn later, if there is sufficient interest. For now, limit to mean.
+            raise ValueError(f"Weighted estimator must be 'mean', not {estimator!r}.")
         self.estimator = estimator
+
         method, level = _validate_errorbar_arg(errorbar)
-        if method is not None and method != 'ci':
+        if method is not None and method != "ci":
+            # As with the estimator, weighted 'sd' or 'pi' error bars may make sense.
+            # But we'll keep things simple for now and limit to (bootstrap) CI.
             raise ValueError(f"Error bar method must be 'ci', not {method!r}.")
         self.error_method = method
         self.error_level = level
+
         self.boot_kws = boot_kws

     def __call__(self, data, var):
         """Aggregate over `var` column of `data` with estimate and error interval."""
         vals = data[var]
-        weights = data['weight']
+        weights = data["weight"]
+
         estimate = np.average(vals, weights=weights)
-        if self.error_method == 'ci' and len(data) > 1:
+
+        if self.error_method == "ci" and len(data) > 1:

             def error_func(x, w):
                 return np.average(x, weights=w)
+
             boots = bootstrap(vals, weights, func=error_func, **self.boot_kws)
             err_min, err_max = _percentile_interval(boots, self.error_level)
+
         else:
             err_min = err_max = np.nan
-        return pd.Series({var: estimate, f'{var}min': err_min, f'{var}max':
-            err_max})
+
+        return pd.Series({var: estimate, f"{var}min": err_min, f"{var}max": err_max})


 class LetterValues:
@@ -352,39 +602,97 @@ class LetterValues:
         https://vita.had.co.nz/papers/letter-value-plot.pdf

         """
-        k_options = ['tukey', 'proportion', 'trustworthy', 'full']
+        k_options = ["tukey", "proportion", "trustworthy", "full"]
         if isinstance(k_depth, str):
-            _check_argument('k_depth', k_options, k_depth)
+            _check_argument("k_depth", k_options, k_depth)
         elif not isinstance(k_depth, int):
             err = (
-                f'The `k_depth` parameter must be either an integer or string (one of {k_options}), not {k_depth!r}.'
-                )
+                "The `k_depth` parameter must be either an integer or string "
+                f"(one of {k_options}), not {k_depth!r}."
+            )
             raise TypeError(err)
+
         self.k_depth = k_depth
         self.outlier_prop = outlier_prop
         self.trust_alpha = trust_alpha

+    def _compute_k(self, n):
+
+        # Select the depth, i.e. number of boxes to draw, based on the method
+        if self.k_depth == "full":
+            # extend boxes to 100% of the data
+            k = int(np.log2(n)) + 1
+        elif self.k_depth == "tukey":
+            # This results with 5-8 points in each tail
+            k = int(np.log2(n)) - 3
+        elif self.k_depth == "proportion":
+            k = int(np.log2(n)) - int(np.log2(n * self.outlier_prop)) + 1
+        elif self.k_depth == "trustworthy":
+            normal_quantile_func = np.vectorize(NormalDist().inv_cdf)
+            point_conf = 2 * normal_quantile_func(1 - self.trust_alpha / 2) ** 2
+            k = int(np.log2(n / point_conf)) + 1
+        else:
+            # Allow having k directly specified as input
+            k = int(self.k_depth)
+
+        return max(k, 1)
+
     def __call__(self, x):
         """Evaluate the letter values."""
         k = self._compute_k(len(x))
         exp = np.arange(k + 1, 1, -1), np.arange(2, k + 2)
         levels = k + 1 - np.concatenate([exp[0], exp[1][1:]])
         percentiles = 100 * np.concatenate([0.5 ** exp[0], 1 - 0.5 ** exp[1]])
-        if self.k_depth == 'full':
+        if self.k_depth == "full":
             percentiles[0] = 0
             percentiles[-1] = 100
         values = np.percentile(x, percentiles)
         fliers = np.asarray(x[(x < values.min()) | (x > values.max())])
         median = np.percentile(x, 50)
-        return {'k': k, 'levels': levels, 'percs': percentiles, 'values':
-            values, 'fliers': fliers, 'median': median}
+
+        return {
+            "k": k,
+            "levels": levels,
+            "percs": percentiles,
+            "values": values,
+            "fliers": fliers,
+            "median": median,
+        }


 def _percentile_interval(data, width):
     """Return a percentile interval from data of a given width."""
-    pass
+    edge = (100 - width) / 2
+    percentiles = edge, 100 - edge
+    return np.nanpercentile(data, percentiles)


 def _validate_errorbar_arg(arg):
     """Check type and value of errorbar argument and assign default level."""
-    pass
+    DEFAULT_LEVELS = {
+        "ci": 95,
+        "pi": 95,
+        "se": 1,
+        "sd": 1,
+    }
+
+    usage = "`errorbar` must be a callable, string, or (string, number) tuple"
+
+    if arg is None:
+        return None, None
+    elif callable(arg):
+        return arg, None
+    elif isinstance(arg, str):
+        method = arg
+        level = DEFAULT_LEVELS.get(method, None)
+    else:
+        try:
+            method, level = arg
+        except (ValueError, TypeError) as err:
+            raise err.__class__(usage) from err
+
+    _check_argument("errorbar", list(DEFAULT_LEVELS), method)
+    if level is not None and not isinstance(level, Number):
+        raise TypeError(usage)
+
+    return method, level
diff --git a/seaborn/_stats/aggregation.py b/seaborn/_stats/aggregation.py
index edd0bde5..7e7d6021 100644
--- a/seaborn/_stats/aggregation.py
+++ b/seaborn/_stats/aggregation.py
@@ -1,12 +1,17 @@
 from __future__ import annotations
 from dataclasses import dataclass
 from typing import ClassVar, Callable
+
 import pandas as pd
 from pandas import DataFrame
+
 from seaborn._core.scales import Scale
 from seaborn._core.groupby import GroupBy
 from seaborn._stats.base import Stat
-from seaborn._statistics import EstimateAggregator, WeightedAggregator
+from seaborn._statistics import (
+    EstimateAggregator,
+    WeightedAggregator,
+)
 from seaborn._core.typing import Vector


@@ -29,14 +34,21 @@ class Agg(Stat):
     .. include:: ../docstrings/objects.Agg.rst

     """
-    func: str | Callable[[Vector], float] = 'mean'
+    func: str | Callable[[Vector], float] = "mean"
+
     group_by_orient: ClassVar[bool] = True

-    def __call__(self, data: DataFrame, groupby: GroupBy, orient: str,
-        scales: dict[str, Scale]) ->DataFrame:
-        var = {'x': 'y', 'y': 'x'}.get(orient)
-        res = groupby.agg(data, {var: self.func}).dropna(subset=[var]
-            ).reset_index(drop=True)
+    def __call__(
+        self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],
+    ) -> DataFrame:
+
+        var = {"x": "y", "y": "x"}.get(orient)
+        res = (
+            groupby
+            .agg(data, {var: self.func})
+            .dropna(subset=[var])
+            .reset_index(drop=True)
+        )
         return res


@@ -72,23 +84,41 @@ class Est(Stat):
     .. include:: ../docstrings/objects.Est.rst

     """
-    func: str | Callable[[Vector], float] = 'mean'
-    errorbar: str | tuple[str, float] = ('ci', 95)
+    func: str | Callable[[Vector], float] = "mean"
+    errorbar: str | tuple[str, float] = ("ci", 95)
     n_boot: int = 1000
     seed: int | None = None
+
     group_by_orient: ClassVar[bool] = True

-    def __call__(self, data: DataFrame, groupby: GroupBy, orient: str,
-        scales: dict[str, Scale]) ->DataFrame:
-        boot_kws = {'n_boot': self.n_boot, 'seed': self.seed}
-        if 'weight' in data:
+    def _process(
+        self, data: DataFrame, var: str, estimator: EstimateAggregator
+    ) -> DataFrame:
+        # Needed because GroupBy.apply assumes func is DataFrame -> DataFrame
+        # which we could probably make more general to allow Series return
+        res = estimator(data, var)
+        return pd.DataFrame([res])
+
+    def __call__(
+        self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],
+    ) -> DataFrame:
+
+        boot_kws = {"n_boot": self.n_boot, "seed": self.seed}
+        if "weight" in data:
             engine = WeightedAggregator(self.func, self.errorbar, **boot_kws)
         else:
             engine = EstimateAggregator(self.func, self.errorbar, **boot_kws)
-        var = {'x': 'y', 'y': 'x'}[orient]
-        res = groupby.apply(data, self._process, var, engine).dropna(subset
-            =[var]).reset_index(drop=True)
-        res = res.fillna({f'{var}min': res[var], f'{var}max': res[var]})
+
+        var = {"x": "y", "y": "x"}[orient]
+        res = (
+            groupby
+            .apply(data, self._process, var, engine)
+            .dropna(subset=[var])
+            .reset_index(drop=True)
+        )
+
+        res = res.fillna({f"{var}min": res[var], f"{var}max": res[var]})
+
         return res


diff --git a/seaborn/_stats/base.py b/seaborn/_stats/base.py
index 4c8201ba..b80b2281 100644
--- a/seaborn/_stats/base.py
+++ b/seaborn/_stats/base.py
@@ -4,6 +4,7 @@ from collections.abc import Iterable
 from dataclasses import dataclass
 from typing import ClassVar, Any
 import warnings
+
 from typing import TYPE_CHECKING
 if TYPE_CHECKING:
     from pandas import DataFrame
@@ -14,18 +15,51 @@ if TYPE_CHECKING:
 @dataclass
 class Stat:
     """Base class for objects that apply statistical transformations."""
+
+    # The class supports a partial-function application pattern. The object is
+    # initialized with desired parameters and the result is a callable that
+    # accepts and returns dataframes.
+
+    # The statistical transformation logic should not add any state to the instance
+    # beyond what is defined with the initialization parameters.
+
+    # Subclasses can declare whether the orient dimension should be used in grouping
+    # TODO consider whether this should be a parameter. Motivating example:
+    # use the same KDE class violin plots and univariate density estimation.
+    # In the former case, we would expect separate densities for each unique
+    # value on the orient axis, but we would not in the latter case.
     group_by_orient: ClassVar[bool] = False

-    def _check_param_one_of(self, param: str, options: Iterable[Any]) ->None:
+    def _check_param_one_of(self, param: str, options: Iterable[Any]) -> None:
         """Raise when parameter value is not one of a specified set."""
-        pass
+        value = getattr(self, param)
+        if value not in options:
+            *most, last = options
+            option_str = ", ".join(f"{x!r}" for x in most[:-1]) + f" or {last!r}"
+            err = " ".join([
+                f"The `{param}` parameter for `{self.__class__.__name__}` must be",
+                f"one of {option_str}; not {value!r}.",
+            ])
+            raise ValueError(err)

-    def _check_grouping_vars(self, param: str, data_vars: list[str],
-        stacklevel: int=2) ->None:
+    def _check_grouping_vars(
+        self, param: str, data_vars: list[str], stacklevel: int = 2,
+    ) -> None:
         """Warn if vars are named in parameter without being present in the data."""
-        pass
+        param_vars = getattr(self, param)
+        undefined = set(param_vars) - set(data_vars)
+        if undefined:
+            param = f"{self.__class__.__name__}.{param}"
+            names = ", ".join(f"{x!r}" for x in undefined)
+            msg = f"Undefined variable(s) passed for {param}: {names}."
+            warnings.warn(msg, stacklevel=stacklevel)

-    def __call__(self, data: DataFrame, groupby: GroupBy, orient: str,
-        scales: dict[str, Scale]) ->DataFrame:
+    def __call__(
+        self,
+        data: DataFrame,
+        groupby: GroupBy,
+        orient: str,
+        scales: dict[str, Scale],
+    ) -> DataFrame:
         """Apply statistical transform to data subgroups and return combined result."""
         return data
diff --git a/seaborn/_stats/counting.py b/seaborn/_stats/counting.py
index b1bf2a2d..0c2fb7d4 100644
--- a/seaborn/_stats/counting.py
+++ b/seaborn/_stats/counting.py
@@ -1,12 +1,15 @@
 from __future__ import annotations
 from dataclasses import dataclass
 from typing import ClassVar
+
 import numpy as np
 import pandas as pd
 from pandas import DataFrame
+
 from seaborn._core.groupby import GroupBy
 from seaborn._core.scales import Scale
 from seaborn._stats.base import Stat
+
 from typing import TYPE_CHECKING
 if TYPE_CHECKING:
     from numpy.typing import ArrayLike
@@ -28,11 +31,17 @@ class Count(Stat):
     """
     group_by_orient: ClassVar[bool] = True

-    def __call__(self, data: DataFrame, groupby: GroupBy, orient: str,
-        scales: dict[str, Scale]) ->DataFrame:
-        var = {'x': 'y', 'y': 'x'}[orient]
-        res = groupby.agg(data.assign(**{var: data[orient]}), {var: len}
-            ).dropna(subset=['x', 'y']).reset_index(drop=True)
+    def __call__(
+        self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],
+    ) -> DataFrame:
+
+        var = {"x": "y", "y": "x"}[orient]
+        res = (
+            groupby
+            .agg(data.assign(**{var: data[orient]}), {var: len})
+            .dropna(subset=["x", "y"])
+            .reset_index(drop=True)
+        )
         return res


@@ -95,8 +104,8 @@ class Hist(Stat):
     .. include:: ../docstrings/objects.Hist.rst

     """
-    stat: str = 'count'
-    bins: str | int | ArrayLike = 'auto'
+    stat: str = "count"
+    bins: str | int | ArrayLike = "auto"
     binwidth: float | None = None
     binrange: tuple[float, float] | None = None
     common_norm: bool | list[str] = True
@@ -105,21 +114,94 @@ class Hist(Stat):
     discrete: bool = False

     def __post_init__(self):
-        stat_options = ['count', 'density', 'percent', 'probability',
-            'proportion', 'frequency']
-        self._check_param_one_of('stat', stat_options)

-    def _define_bin_edges(self, vals, weight, bins, binwidth, binrange,
-        discrete):
+        stat_options = [
+            "count", "density", "percent", "probability", "proportion", "frequency"
+        ]
+        self._check_param_one_of("stat", stat_options)
+
+    def _define_bin_edges(self, vals, weight, bins, binwidth, binrange, discrete):
         """Inner function that takes bin parameters as arguments."""
-        pass
+        vals = vals.replace(-np.inf, np.nan).replace(np.inf, np.nan).dropna()
+
+        if binrange is None:
+            start, stop = vals.min(), vals.max()
+        else:
+            start, stop = binrange
+
+        if discrete:
+            bin_edges = np.arange(start - .5, stop + 1.5)
+        else:
+            if binwidth is not None:
+                bins = int(round((stop - start) / binwidth))
+            bin_edges = np.histogram_bin_edges(vals, bins, binrange, weight)
+
+        # TODO warning or cap on too many bins?
+
+        return bin_edges

     def _define_bin_params(self, data, orient, scale_type):
         """Given data, return numpy.histogram parameters to define bins."""
-        pass
+        vals = data[orient]
+        weights = data.get("weight", None)
+
+        # TODO We'll want this for ordinal / discrete scales too
+        # (Do we need discrete as a parameter or just infer from scale?)
+        discrete = self.discrete or scale_type == "nominal"
+
+        bin_edges = self._define_bin_edges(
+            vals, weights, self.bins, self.binwidth, self.binrange, discrete,
+        )
+
+        if isinstance(self.bins, (str, int)):
+            n_bins = len(bin_edges) - 1
+            bin_range = bin_edges.min(), bin_edges.max()
+            bin_kws = dict(bins=n_bins, range=bin_range)
+        else:
+            bin_kws = dict(bins=bin_edges)
+
+        return bin_kws
+
+    def _get_bins_and_eval(self, data, orient, groupby, scale_type):
+
+        bin_kws = self._define_bin_params(data, orient, scale_type)
+        return groupby.apply(data, self._eval, orient, bin_kws)
+
+    def _eval(self, data, orient, bin_kws):
+
+        vals = data[orient]
+        weights = data.get("weight", None)
+
+        density = self.stat == "density"
+        hist, edges = np.histogram(vals, **bin_kws, weights=weights, density=density)
+
+        width = np.diff(edges)
+        center = edges[:-1] + width / 2
+
+        return pd.DataFrame({orient: center, "count": hist, "space": width})
+
+    def _normalize(self, data):
+
+        hist = data["count"]
+        if self.stat == "probability" or self.stat == "proportion":
+            hist = hist.astype(float) / hist.sum()
+        elif self.stat == "percent":
+            hist = hist.astype(float) / hist.sum() * 100
+        elif self.stat == "frequency":
+            hist = hist.astype(float) / data["space"]
+
+        if self.cumulative:
+            if self.stat in ["density", "frequency"]:
+                hist = (hist * data["space"]).cumsum()
+            else:
+                hist = hist.cumsum()
+
+        return data.assign(**{self.stat: hist})
+
+    def __call__(
+        self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],
+    ) -> DataFrame:

-    def __call__(self, data: DataFrame, groupby: GroupBy, orient: str,
-        scales: dict[str, Scale]) ->DataFrame:
         scale_type = scales[orient].__class__.__name__.lower()
         grouping_vars = [str(v) for v in data if v in groupby.order]
         if not grouping_vars or self.common_bins is True:
@@ -130,9 +212,12 @@ class Hist(Stat):
                 bin_groupby = GroupBy(grouping_vars)
             else:
                 bin_groupby = GroupBy(self.common_bins)
-                self._check_grouping_vars('common_bins', grouping_vars)
-            data = bin_groupby.apply(data, self._get_bins_and_eval, orient,
-                groupby, scale_type)
+                self._check_grouping_vars("common_bins", grouping_vars)
+
+            data = bin_groupby.apply(
+                data, self._get_bins_and_eval, orient, groupby, scale_type,
+            )
+
         if not grouping_vars or self.common_norm is True:
             data = self._normalize(data)
         else:
@@ -140,7 +225,8 @@ class Hist(Stat):
                 norm_groupby = GroupBy(grouping_vars)
             else:
                 norm_groupby = GroupBy(self.common_norm)
-                self._check_grouping_vars('common_norm', grouping_vars)
+                self._check_grouping_vars("common_norm", grouping_vars)
             data = norm_groupby.apply(data, self._normalize)
-        other = {'x': 'y', 'y': 'x'}[orient]
+
+        other = {"x": "y", "y": "x"}[orient]
         return data.assign(**{other: data[self.stat]})
diff --git a/seaborn/_stats/density.py b/seaborn/_stats/density.py
index 410c1a0b..e4613876 100644
--- a/seaborn/_stats/density.py
+++ b/seaborn/_stats/density.py
@@ -1,6 +1,7 @@
 from __future__ import annotations
 from dataclasses import dataclass
 from typing import Any, Callable
+
 import numpy as np
 from numpy import ndarray
 import pandas as pd
@@ -11,6 +12,7 @@ try:
 except ImportError:
     from seaborn.external.kde import gaussian_kde
     _no_scipy = True
+
 from seaborn._core.groupby import GroupBy
 from seaborn._core.scales import Scale
 from seaborn._stats.base import Stat
@@ -82,7 +84,7 @@ class KDE(Stat):

     """
     bw_adjust: float = 1
-    bw_method: str | float | Callable[[gaussian_kde], float] = 'scott'
+    bw_method: str | float | Callable[[gaussian_kde], float] = "scott"
     common_norm: bool | list[str] = True
     common_grid: bool | list[str] = True
     gridsize: int | None = 200
@@ -90,37 +92,92 @@ class KDE(Stat):
     cumulative: bool = False

     def __post_init__(self):
+
         if self.cumulative and _no_scipy:
-            raise RuntimeError('Cumulative KDE evaluation requires scipy')
+            raise RuntimeError("Cumulative KDE evaluation requires scipy")

-    def _check_var_list_or_boolean(self, param: str, grouping_vars: Any
-        ) ->None:
+    def _check_var_list_or_boolean(self, param: str, grouping_vars: Any) -> None:
         """Do input checks on grouping parameters."""
-        pass
-
-    def _fit(self, data: DataFrame, orient: str) ->gaussian_kde:
+        value = getattr(self, param)
+        if not (
+            isinstance(value, bool)
+            or (isinstance(value, list) and all(isinstance(v, str) for v in value))
+        ):
+            param_name = f"{self.__class__.__name__}.{param}"
+            raise TypeError(f"{param_name} must be a boolean or list of strings.")
+        self._check_grouping_vars(param, grouping_vars, stacklevel=3)
+
+    def _fit(self, data: DataFrame, orient: str) -> gaussian_kde:
         """Fit and return a KDE object."""
-        pass
+        # TODO need to handle singular data

-    def _get_support(self, data: DataFrame, orient: str) ->ndarray:
-        """Define the grid that the KDE will be evaluated on."""
-        pass
+        fit_kws: dict[str, Any] = {"bw_method": self.bw_method}
+        if "weight" in data:
+            fit_kws["weights"] = data["weight"]
+        kde = gaussian_kde(data[orient], **fit_kws)
+        kde.set_bandwidth(kde.factor * self.bw_adjust)
+
+        return kde

-    def _fit_and_evaluate(self, data: DataFrame, orient: str, support: ndarray
-        ) ->DataFrame:
+    def _get_support(self, data: DataFrame, orient: str) -> ndarray:
+        """Define the grid that the KDE will be evaluated on."""
+        if self.gridsize is None:
+            return data[orient].to_numpy()
+
+        kde = self._fit(data, orient)
+        bw = np.sqrt(kde.covariance.squeeze())
+        gridmin = data[orient].min() - bw * self.cut
+        gridmax = data[orient].max() + bw * self.cut
+        return np.linspace(gridmin, gridmax, self.gridsize)
+
+    def _fit_and_evaluate(
+        self, data: DataFrame, orient: str, support: ndarray
+    ) -> DataFrame:
         """Transform single group by fitting a KDE and evaluating on a support grid."""
-        pass
+        empty = pd.DataFrame(columns=[orient, "weight", "density"], dtype=float)
+        if len(data) < 2:
+            return empty
+        try:
+            kde = self._fit(data, orient)
+        except np.linalg.LinAlgError:
+            return empty
+
+        if self.cumulative:
+            s_0 = support[0]
+            density = np.array([kde.integrate_box_1d(s_0, s_i) for s_i in support])
+        else:
+            density = kde(support)

-    def _transform(self, data: DataFrame, orient: str, grouping_vars: list[str]
-        ) ->DataFrame:
-        """Transform multiple groups by fitting KDEs and evaluating."""
-        pass
+        weight = data["weight"].sum()
+        return pd.DataFrame({orient: support, "weight": weight, "density": density})

-    def __call__(self, data: DataFrame, groupby: GroupBy, orient: str,
-        scales: dict[str, Scale]) ->DataFrame:
-        if 'weight' not in data:
+    def _transform(
+        self, data: DataFrame, orient: str, grouping_vars: list[str]
+    ) -> DataFrame:
+        """Transform multiple groups by fitting KDEs and evaluating."""
+        empty = pd.DataFrame(columns=[*data.columns, "density"], dtype=float)
+        if len(data) < 2:
+            return empty
+        try:
+            support = self._get_support(data, orient)
+        except np.linalg.LinAlgError:
+            return empty
+
+        grouping_vars = [x for x in grouping_vars if data[x].nunique() > 1]
+        if not grouping_vars:
+            return self._fit_and_evaluate(data, orient, support)
+        groupby = GroupBy(grouping_vars)
+        return groupby.apply(data, self._fit_and_evaluate, orient, support)
+
+    def __call__(
+        self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],
+    ) -> DataFrame:
+
+        if "weight" not in data:
             data = data.assign(weight=1)
-        data = data.dropna(subset=[orient, 'weight'])
+        data = data.dropna(subset=[orient, "weight"])
+
+        # Transform each group separately
         grouping_vars = [str(v) for v in data if v in groupby.order]
         if not grouping_vars or self.common_grid is True:
             res = self._transform(data, orient, grouping_vars)
@@ -128,21 +185,30 @@ class KDE(Stat):
             if self.common_grid is False:
                 grid_vars = grouping_vars
             else:
-                self._check_var_list_or_boolean('common_grid', grouping_vars)
+                self._check_var_list_or_boolean("common_grid", grouping_vars)
                 grid_vars = [v for v in self.common_grid if v in grouping_vars]
-            res = GroupBy(grid_vars).apply(data, self._transform, orient,
-                grouping_vars)
+
+            res = (
+                GroupBy(grid_vars)
+                .apply(data, self._transform, orient, grouping_vars)
+            )
+
+        # Normalize, potentially within groups
         if not grouping_vars or self.common_norm is True:
-            res = res.assign(group_weight=data['weight'].sum())
+            res = res.assign(group_weight=data["weight"].sum())
         else:
             if self.common_norm is False:
                 norm_vars = grouping_vars
             else:
-                self._check_var_list_or_boolean('common_norm', grouping_vars)
+                self._check_var_list_or_boolean("common_norm", grouping_vars)
                 norm_vars = [v for v in self.common_norm if v in grouping_vars]
-            res = res.join(data.groupby(norm_vars)['weight'].sum().rename(
-                'group_weight'), on=norm_vars)
-        res['density'] *= res.eval('weight / group_weight')
-        value = {'x': 'y', 'y': 'x'}[orient]
-        res[value] = res['density']
-        return res.drop(['weight', 'group_weight'], axis=1)
+
+            res = res.join(
+                data.groupby(norm_vars)["weight"].sum().rename("group_weight"),
+                on=norm_vars,
+            )
+
+        res["density"] *= res.eval("weight / group_weight")
+        value = {"x": "y", "y": "x"}[orient]
+        res[value] = res["density"]
+        return res.drop(["weight", "group_weight"], axis=1)
diff --git a/seaborn/_stats/order.py b/seaborn/_stats/order.py
index f80fd1f6..c37c0985 100644
--- a/seaborn/_stats/order.py
+++ b/seaborn/_stats/order.py
@@ -1,20 +1,37 @@
+
 from __future__ import annotations
 from dataclasses import dataclass
 from typing import ClassVar, cast
 try:
     from typing import Literal
 except ImportError:
-    from typing_extensions import Literal
+    from typing_extensions import Literal  # type: ignore
+
 import numpy as np
 from pandas import DataFrame
+
 from seaborn._core.scales import Scale
 from seaborn._core.groupby import GroupBy
 from seaborn._stats.base import Stat
 from seaborn.utils import _version_predates
-_MethodKind = Literal['inverted_cdf', 'averaged_inverted_cdf',
-    'closest_observation', 'interpolated_inverted_cdf', 'hazen', 'weibull',
-    'linear', 'median_unbiased', 'normal_unbiased', 'lower', 'higher',
-    'midpoint', 'nearest']
+
+
+# From https://github.com/numpy/numpy/blob/main/numpy/lib/function_base.pyi
+_MethodKind = Literal[
+    "inverted_cdf",
+    "averaged_inverted_cdf",
+    "closest_observation",
+    "interpolated_inverted_cdf",
+    "hazen",
+    "weibull",
+    "linear",
+    "median_unbiased",
+    "normal_unbiased",
+    "lower",
+    "higher",
+    "midpoint",
+    "nearest",
+]


 @dataclass
@@ -38,10 +55,24 @@ class Perc(Stat):

     """
     k: int | list[float] = 5
-    method: str = 'linear'
+    method: str = "linear"
+
     group_by_orient: ClassVar[bool] = True

-    def __call__(self, data: DataFrame, groupby: GroupBy, orient: str,
-        scales: dict[str, Scale]) ->DataFrame:
-        var = {'x': 'y', 'y': 'x'}[orient]
+    def _percentile(self, data: DataFrame, var: str) -> DataFrame:
+
+        k = list(np.linspace(0, 100, self.k)) if isinstance(self.k, int) else self.k
+        method = cast(_MethodKind, self.method)
+        values = data[var].dropna()
+        if _version_predates(np, "1.22"):
+            res = np.percentile(values, k, interpolation=method)  # type: ignore
+        else:
+            res = np.percentile(data[var].dropna(), k, method=method)
+        return DataFrame({var: res, "percentile": k})
+
+    def __call__(
+        self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],
+    ) -> DataFrame:
+
+        var = {"x": "y", "y": "x"}[orient]
         return groupby.apply(data, self._percentile, var)
diff --git a/seaborn/_stats/regression.py b/seaborn/_stats/regression.py
index d5681d04..9ec81a4e 100644
--- a/seaborn/_stats/regression.py
+++ b/seaborn/_stats/regression.py
@@ -1,7 +1,9 @@
 from __future__ import annotations
 from dataclasses import dataclass
+
 import numpy as np
 import pandas as pd
+
 from seaborn._stats.base import Stat


@@ -10,13 +12,39 @@ class PolyFit(Stat):
     """
     Fit a polynomial of the given order and resample data onto predicted curve.
     """
+    # This is a provisional class that is useful for building out functionality.
+    # It may or may not change substantially in form or dissappear as we think
+    # through the organization of the stats subpackage.
+
     order: int = 2
     gridsize: int = 100

+    def _fit_predict(self, data):
+
+        x = data["x"]
+        y = data["y"]
+        if x.nunique() <= self.order:
+            # TODO warn?
+            xx = yy = []
+        else:
+            p = np.polyfit(x, y, self.order)
+            xx = np.linspace(x.min(), x.max(), self.gridsize)
+            yy = np.polyval(p, xx)
+
+        return pd.DataFrame(dict(x=xx, y=yy))
+
+    # TODO we should have a way of identifying the method that will be applied
+    # and then only define __call__ on a base-class of stats with this pattern
+
     def __call__(self, data, groupby, orient, scales):
-        return groupby.apply(data.dropna(subset=['x', 'y']), self._fit_predict)
+
+        return (
+            groupby
+            .apply(data.dropna(subset=["x", "y"]), self._fit_predict)
+        )


 @dataclass
 class OLSFit(Stat):
+
     ...
diff --git a/seaborn/_testing.py b/seaborn/_testing.py
index b1953e9a..c6f821cb 100644
--- a/seaborn/_testing.py
+++ b/seaborn/_testing.py
@@ -2,5 +2,89 @@ import numpy as np
 import matplotlib as mpl
 from matplotlib.colors import to_rgb, to_rgba
 from numpy.testing import assert_array_equal
-USE_PROPS = ['alpha', 'edgecolor', 'facecolor', 'fill', 'hatch', 'height',
-    'linestyle', 'linewidth', 'paths', 'xy', 'xydata', 'sizes', 'zorder']
+
+
+USE_PROPS = [
+    "alpha",
+    "edgecolor",
+    "facecolor",
+    "fill",
+    "hatch",
+    "height",
+    "linestyle",
+    "linewidth",
+    "paths",
+    "xy",
+    "xydata",
+    "sizes",
+    "zorder",
+]
+
+
+def assert_artists_equal(list1, list2):
+
+    assert len(list1) == len(list2)
+    for a1, a2 in zip(list1, list2):
+        assert a1.__class__ == a2.__class__
+        prop1 = a1.properties()
+        prop2 = a2.properties()
+        for key in USE_PROPS:
+            if key not in prop1:
+                continue
+            v1 = prop1[key]
+            v2 = prop2[key]
+            if key == "paths":
+                for p1, p2 in zip(v1, v2):
+                    assert_array_equal(p1.vertices, p2.vertices)
+                    assert_array_equal(p1.codes, p2.codes)
+            elif key == "color":
+                v1 = mpl.colors.to_rgba(v1)
+                v2 = mpl.colors.to_rgba(v2)
+                assert v1 == v2
+            elif isinstance(v1, np.ndarray):
+                assert_array_equal(v1, v2)
+            else:
+                assert v1 == v2
+
+
+def assert_legends_equal(leg1, leg2):
+
+    assert leg1.get_title().get_text() == leg2.get_title().get_text()
+    for t1, t2 in zip(leg1.get_texts(), leg2.get_texts()):
+        assert t1.get_text() == t2.get_text()
+
+    assert_artists_equal(
+        leg1.get_patches(), leg2.get_patches(),
+    )
+    assert_artists_equal(
+        leg1.get_lines(), leg2.get_lines(),
+    )
+
+
+def assert_plots_equal(ax1, ax2, labels=True):
+
+    assert_artists_equal(ax1.patches, ax2.patches)
+    assert_artists_equal(ax1.lines, ax2.lines)
+    assert_artists_equal(ax1.collections, ax2.collections)
+
+    if labels:
+        assert ax1.get_xlabel() == ax2.get_xlabel()
+        assert ax1.get_ylabel() == ax2.get_ylabel()
+
+
+def assert_colors_equal(a, b, check_alpha=True):
+
+    def handle_array(x):
+
+        if isinstance(x, np.ndarray):
+            if x.ndim > 1:
+                x = np.unique(x, axis=0).squeeze()
+            if x.ndim > 1:
+                raise ValueError("Color arrays must be 1 dimensional")
+        return x
+
+    a = handle_array(a)
+    b = handle_array(b)
+
+    f = to_rgba if check_alpha else to_rgb
+    assert f(a) == f(b)
diff --git a/seaborn/algorithms.py b/seaborn/algorithms.py
index 2939e8bd..2e34b9dd 100644
--- a/seaborn/algorithms.py
+++ b/seaborn/algorithms.py
@@ -32,9 +32,89 @@ def bootstrap(*args, **kwargs):
         array of bootstrapped statistic values

     """
-    pass
+    # Ensure list of arrays are same length
+    if len(np.unique(list(map(len, args)))) > 1:
+        raise ValueError("All input arrays must have the same length")
+    n = len(args[0])
+
+    # Default keyword arguments
+    n_boot = kwargs.get("n_boot", 10000)
+    func = kwargs.get("func", "mean")
+    axis = kwargs.get("axis", None)
+    units = kwargs.get("units", None)
+    random_seed = kwargs.get("random_seed", None)
+    if random_seed is not None:
+        msg = "`random_seed` has been renamed to `seed` and will be removed"
+        warnings.warn(msg)
+    seed = kwargs.get("seed", random_seed)
+    if axis is None:
+        func_kwargs = dict()
+    else:
+        func_kwargs = dict(axis=axis)
+
+    # Initialize the resampler
+    if isinstance(seed, np.random.RandomState):
+        rng = seed
+    else:
+        rng = np.random.default_rng(seed)
+
+    # Coerce to arrays
+    args = list(map(np.asarray, args))
+    if units is not None:
+        units = np.asarray(units)
+
+    if isinstance(func, str):
+
+        # Allow named numpy functions
+        f = getattr(np, func)
+
+        # Try to use nan-aware version of function if necessary
+        missing_data = np.isnan(np.sum(np.column_stack(args)))
+
+        if missing_data and not func.startswith("nan"):
+            nanf = getattr(np, f"nan{func}", None)
+            if nanf is None:
+                msg = f"Data contain nans but no nan-aware version of `{func}` found"
+                warnings.warn(msg, UserWarning)
+            else:
+                f = nanf
+
+    else:
+        f = func
+
+    # Handle numpy changes
+    try:
+        integers = rng.integers
+    except AttributeError:
+        integers = rng.randint
+
+    # Do the bootstrap
+    if units is not None:
+        return _structured_bootstrap(args, n_boot, units, f,
+                                     func_kwargs, integers)
+
+    boot_dist = []
+    for i in range(int(n_boot)):
+        resampler = integers(0, n, n, dtype=np.intp)  # intp is indexing dtype
+        sample = [a.take(resampler, axis=0) for a in args]
+        boot_dist.append(f(*sample, **func_kwargs))
+    return np.array(boot_dist)


 def _structured_bootstrap(args, n_boot, units, func, func_kwargs, integers):
     """Resample units instead of datapoints."""
-    pass
+    unique_units = np.unique(units)
+    n_units = len(unique_units)
+
+    args = [[a[units == unit] for unit in unique_units] for a in args]
+
+    boot_dist = []
+    for i in range(int(n_boot)):
+        resampler = integers(0, n_units, n_units, dtype=np.intp)
+        sample = [[a[i] for i in resampler] for a in args]
+        lengths = map(len, sample[0])
+        resampler = [integers(0, n, n, dtype=np.intp) for n in lengths]
+        sample = [[c.take(r, axis=0) for c, r in zip(a, resampler)] for a in sample]
+        sample = list(map(np.concatenate, sample))
+        boot_dist.append(func(*sample, **func_kwargs))
+    return np.array(boot_dist)
diff --git a/seaborn/axisgrid.py b/seaborn/axisgrid.py
index 54c0052d..17d333bc 100644
--- a/seaborn/axisgrid.py
+++ b/seaborn/axisgrid.py
@@ -3,20 +3,35 @@ from itertools import product
 from inspect import signature
 import warnings
 from textwrap import dedent
+
 import numpy as np
 import pandas as pd
 import matplotlib as mpl
 import matplotlib.pyplot as plt
+
 from ._base import VectorPlotter, variable_type, categorical_order
 from ._core.data import handle_data_source
 from ._compat import share_axis, get_legend_handles
 from . import utils
-from .utils import adjust_legend_subtitles, set_hls_values, _check_argument, _draw_figure, _disable_autolayout
+from .utils import (
+    adjust_legend_subtitles,
+    set_hls_values,
+    _check_argument,
+    _draw_figure,
+    _disable_autolayout
+)
 from .palettes import color_palette, blend_palette
-from ._docstrings import DocstringComponents, _core_docs
-__all__ = ['FacetGrid', 'PairGrid', 'JointGrid', 'pairplot', 'jointplot']
-_param_docs = DocstringComponents.from_nested_components(core=_core_docs[
-    'params'])
+from ._docstrings import (
+    DocstringComponents,
+    _core_docs,
+)
+
+__all__ = ["FacetGrid", "PairGrid", "JointGrid", "pairplot", "jointplot"]
+
+
+_param_docs = DocstringComponents.from_nested_components(
+    core=_core_docs["params"],
+)


 class _BaseGrid:
@@ -24,17 +39,24 @@ class _BaseGrid:

     def set(self, **kwargs):
         """Set attributes on each subplot Axes."""
-        pass
+        for ax in self.axes.flat:
+            if ax is not None:  # Handle removed axes
+                ax.set(**kwargs)
+        return self

     @property
     def fig(self):
         """DEPRECATED: prefer the `figure` property."""
-        pass
+        # Grid.figure is preferred because it matches the Axes attribute name.
+        # But as the maintanace burden on having this property is minimal,
+        # let's be slow about formally deprecating it. For now just note its deprecation
+        # in the docstring; add a warning in version 0.13, and eventually remove it.
+        return self._figure

     @property
     def figure(self):
         """Access the :class:`matplotlib.figure.Figure` object underlying the grid."""
-        pass
+        return self._figure

     def apply(self, func, *args, **kwargs):
         """
@@ -48,7 +70,8 @@ class _BaseGrid:
         Added in v0.12.0.

         """
-        pass
+        func(self, *args, **kwargs)
+        return self

     def pipe(self, func, *args, **kwargs):
         """
@@ -62,7 +85,7 @@ class _BaseGrid:
         Added in v0.12.0.

         """
-        pass
+        return func(self, *args, **kwargs)

     def savefig(self, *args, **kwargs):
         """
@@ -72,7 +95,9 @@ class _BaseGrid:
         by default. Parameters are passed through to the matplotlib function.

         """
-        pass
+        kwargs = kwargs.copy()
+        kwargs.setdefault("bbox_inches", "tight")
+        self.figure.savefig(*args, **kwargs)


 class Grid(_BaseGrid):
@@ -81,16 +106,25 @@ class Grid(_BaseGrid):
     _legend_out = True

     def __init__(self):
+
         self._tight_layout_rect = [0, 0, 1, 1]
         self._tight_layout_pad = None
+
+        # This attribute is set externally and is a hack to handle newer functions that
+        # don't add proxy artists onto the Axes. We need an overall cleaner approach.
         self._extract_legend_handles = False

     def tight_layout(self, *args, **kwargs):
         """Call fig.tight_layout within rect that exclude the legend."""
-        pass
+        kwargs = kwargs.copy()
+        kwargs.setdefault("rect", self._tight_layout_rect)
+        if self._tight_layout_pad is not None:
+            kwargs.setdefault("pad", self._tight_layout_pad)
+        self._figure.tight_layout(*args, **kwargs)
+        return self

     def add_legend(self, legend_data=None, title=None, label_order=None,
-        adjust_subtitles=False, **kwargs):
+                   adjust_subtitles=False, **kwargs):
         """Draw a legend, maybe placing it outside axes and resizing the figure.

         Parameters
@@ -117,20 +151,138 @@ class Grid(_BaseGrid):
             Returns self for easy chaining.

         """
-        pass
+        # Find the data for the legend
+        if legend_data is None:
+            legend_data = self._legend_data
+        if label_order is None:
+            if self.hue_names is None:
+                label_order = list(legend_data.keys())
+            else:
+                label_order = list(map(utils.to_utf8, self.hue_names))
+
+        blank_handle = mpl.patches.Patch(alpha=0, linewidth=0)
+        handles = [legend_data.get(lab, blank_handle) for lab in label_order]
+        title = self._hue_var if title is None else title
+        title_size = mpl.rcParams["legend.title_fontsize"]
+
+        # Unpack nested labels from a hierarchical legend
+        labels = []
+        for entry in label_order:
+            if isinstance(entry, tuple):
+                _, label = entry
+            else:
+                label = entry
+            labels.append(label)
+
+        # Set default legend kwargs
+        kwargs.setdefault("scatterpoints", 1)
+
+        if self._legend_out:
+
+            kwargs.setdefault("frameon", False)
+            kwargs.setdefault("loc", "center right")
+
+            # Draw a full-figure legend outside the grid
+            figlegend = self._figure.legend(handles, labels, **kwargs)
+
+            self._legend = figlegend
+            figlegend.set_title(title, prop={"size": title_size})
+
+            if adjust_subtitles:
+                adjust_legend_subtitles(figlegend)
+
+            # Draw the plot to set the bounding boxes correctly
+            _draw_figure(self._figure)
+
+            # Calculate and set the new width of the figure so the legend fits
+            legend_width = figlegend.get_window_extent().width / self._figure.dpi
+            fig_width, fig_height = self._figure.get_size_inches()
+            self._figure.set_size_inches(fig_width + legend_width, fig_height)
+
+            # Draw the plot again to get the new transformations
+            _draw_figure(self._figure)
+
+            # Now calculate how much space we need on the right side
+            legend_width = figlegend.get_window_extent().width / self._figure.dpi
+            space_needed = legend_width / (fig_width + legend_width)
+            margin = .04 if self._margin_titles else .01
+            self._space_needed = margin + space_needed
+            right = 1 - self._space_needed
+
+            # Place the subplot axes to give space for the legend
+            self._figure.subplots_adjust(right=right)
+            self._tight_layout_rect[2] = right
+
+        else:
+            # Draw a legend in the first axis
+            ax = self.axes.flat[0]
+            kwargs.setdefault("loc", "best")
+
+            leg = ax.legend(handles, labels, **kwargs)
+            leg.set_title(title, prop={"size": title_size})
+            self._legend = leg
+
+            if adjust_subtitles:
+                adjust_legend_subtitles(leg)
+
+        return self

     def _update_legend_data(self, ax):
         """Extract the legend data from an axes object and save it."""
-        pass
+        data = {}
+
+        # Get data directly from the legend, which is necessary
+        # for newer functions that don't add labeled proxy artists
+        if ax.legend_ is not None and self._extract_legend_handles:
+            handles = get_legend_handles(ax.legend_)
+            labels = [t.get_text() for t in ax.legend_.texts]
+            data.update({label: handle for handle, label in zip(handles, labels)})
+
+        handles, labels = ax.get_legend_handles_labels()
+        data.update({label: handle for handle, label in zip(handles, labels)})
+
+        self._legend_data.update(data)
+
+        # Now clear the legend
+        ax.legend_ = None

     def _get_palette(self, data, hue, hue_order, palette):
         """Get a list of colors for the hue variable."""
-        pass
+        if hue is None:
+            palette = color_palette(n_colors=1)
+
+        else:
+            hue_names = categorical_order(data[hue], hue_order)
+            n_colors = len(hue_names)
+
+            # By default use either the current color palette or HUSL
+            if palette is None:
+                current_palette = utils.get_color_cycle()
+                if n_colors > len(current_palette):
+                    colors = color_palette("husl", n_colors)
+                else:
+                    colors = color_palette(n_colors=n_colors)
+
+            # Allow for palette to map from hue variable names
+            elif isinstance(palette, dict):
+                color_names = [palette[h] for h in hue_names]
+                colors = color_palette(color_names, n_colors)
+
+            # Otherwise act as if we just got a list of colors
+            else:
+                colors = color_palette(palette, n_colors)
+
+            palette = color_palette(colors, n_colors)
+
+        return palette

     @property
     def legend(self):
         """The :class:`matplotlib.legend.Legend` object, if present."""
-        pass
+        try:
+            return self._legend
+        except AttributeError:
+            return None

     def tick_params(self, axis='both', **kwargs):
         """Modify the ticks, tick labels, and gridlines.
@@ -149,81 +301,111 @@ class Grid(_BaseGrid):
             Returns self for easy chaining.

         """
-        pass
+        for ax in self.figure.axes:
+            ax.tick_params(axis=axis, **kwargs)
+        return self
+

+_facet_docs = dict(

-_facet_docs = dict(data=dedent(
-    """    data : DataFrame
+    data=dedent("""\
+    data : DataFrame
         Tidy ("long-form") dataframe where each column is a variable and each
-        row is an observation.    """
-    ), rowcol=dedent(
-    """    row, col : vectors or keys in ``data``
-        Variables that define subsets to plot on different facets.    """
-    ), rowcol_order=dedent(
-    """    {row,col}_order : vector of strings
+        row is an observation.\
+    """),
+    rowcol=dedent("""\
+    row, col : vectors or keys in ``data``
+        Variables that define subsets to plot on different facets.\
+    """),
+    rowcol_order=dedent("""\
+    {row,col}_order : vector of strings
         Specify the order in which levels of the ``row`` and/or ``col`` variables
-        appear in the grid of subplots.    """
-    ), col_wrap=dedent(
-    """    col_wrap : int
+        appear in the grid of subplots.\
+    """),
+    col_wrap=dedent("""\
+    col_wrap : int
         "Wrap" the column variable at this width, so that the column facets
-        span multiple rows. Incompatible with a ``row`` facet.    """
-    ), share_xy=dedent(
-    """    share{x,y} : bool, 'col', or 'row' optional
+        span multiple rows. Incompatible with a ``row`` facet.\
+    """),
+    share_xy=dedent("""\
+    share{x,y} : bool, 'col', or 'row' optional
         If true, the facets will share y axes across columns and/or x axes
-        across rows.    """
-    ), height=dedent(
-    """    height : scalar
-        Height (in inches) of each facet. See also: ``aspect``.    """
-    ), aspect=dedent(
-    """    aspect : scalar
+        across rows.\
+    """),
+    height=dedent("""\
+    height : scalar
+        Height (in inches) of each facet. See also: ``aspect``.\
+    """),
+    aspect=dedent("""\
+    aspect : scalar
         Aspect ratio of each facet, so that ``aspect * height`` gives the width
-        of each facet in inches.    """
-    ), palette=dedent(
-    """    palette : palette name, list, or dict
+        of each facet in inches.\
+    """),
+    palette=dedent("""\
+    palette : palette name, list, or dict
         Colors to use for the different levels of the ``hue`` variable. Should
         be something that can be interpreted by :func:`color_palette`, or a
-        dictionary mapping hue levels to matplotlib colors.    """
-    ), legend_out=dedent(
-    """    legend_out : bool
+        dictionary mapping hue levels to matplotlib colors.\
+    """),
+    legend_out=dedent("""\
+    legend_out : bool
         If ``True``, the figure size will be extended, and the legend will be
-        drawn outside the plot on the center right.    """
-    ), margin_titles=dedent(
-    """    margin_titles : bool
+        drawn outside the plot on the center right.\
+    """),
+    margin_titles=dedent("""\
+    margin_titles : bool
         If ``True``, the titles for the row variable are drawn to the right of
         the last column. This option is experimental and may not work in all
-        cases.    """
-    ), facet_kws=dedent(
-    """    facet_kws : dict
+        cases.\
+    """),
+    facet_kws=dedent("""\
+    facet_kws : dict
         Additional parameters passed to :class:`FacetGrid`.
-    """
-    ))
+    """),
+)


 class FacetGrid(Grid):
     """Multi-plot grid for plotting conditional relationships."""

-    def __init__(self, data, *, row=None, col=None, hue=None, col_wrap=None,
+    def __init__(
+        self, data, *,
+        row=None, col=None, hue=None, col_wrap=None,
         sharex=True, sharey=True, height=3, aspect=1, palette=None,
         row_order=None, col_order=None, hue_order=None, hue_kws=None,
-        dropna=False, legend_out=True, despine=True, margin_titles=False,
-        xlim=None, ylim=None, subplot_kws=None, gridspec_kws=None):
+        dropna=False, legend_out=True, despine=True,
+        margin_titles=False, xlim=None, ylim=None, subplot_kws=None,
+        gridspec_kws=None,
+    ):
+
         super().__init__()
         data = handle_data_source(data)
+
+        # Determine the hue facet layer information
         hue_var = hue
         if hue is None:
             hue_names = None
         else:
             hue_names = categorical_order(data[hue], hue_order)
+
         colors = self._get_palette(data, hue, hue_order, palette)
+
+        # Set up the lists of names for the row and column facet variables
         if row is None:
             row_names = []
         else:
             row_names = categorical_order(data[row], row_order)
+
         if col is None:
             col_names = []
         else:
             col_names = categorical_order(data[col], col_order)
+
+        # Additional dict of kwarg -> list of values for mapping the hue var
         hue_kws = hue_kws if hue_kws is not None else {}
+
+        # Make a boolean mask that is True anywhere there is an NA
+        # value in one of the faceting variables, but only if dropna is True
         none_na = np.zeros(len(data), bool)
         if dropna:
             row_na = none_na if row is None else data[row].isnull()
@@ -232,33 +414,53 @@ class FacetGrid(Grid):
             not_na = ~(row_na | col_na | hue_na)
         else:
             not_na = ~none_na
+
+        # Compute the grid shape
         ncol = 1 if col is None else len(col_names)
         nrow = 1 if row is None else len(row_names)
         self._n_facets = ncol * nrow
+
         self._col_wrap = col_wrap
         if col_wrap is not None:
             if row is not None:
-                err = 'Cannot use `row` and `col_wrap` together.'
+                err = "Cannot use `row` and `col_wrap` together."
                 raise ValueError(err)
             ncol = col_wrap
             nrow = int(np.ceil(len(col_names) / col_wrap))
         self._ncol = ncol
         self._nrow = nrow
-        figsize = ncol * height * aspect, nrow * height
+
+        # Calculate the base figure size
+        # This can get stretched later by a legend
+        # TODO this doesn't account for axis labels
+        figsize = (ncol * height * aspect, nrow * height)
+
+        # Validate some inputs
         if col_wrap is not None:
             margin_titles = False
+
+        # Build the subplot keyword dictionary
         subplot_kws = {} if subplot_kws is None else subplot_kws.copy()
         gridspec_kws = {} if gridspec_kws is None else gridspec_kws.copy()
         if xlim is not None:
-            subplot_kws['xlim'] = xlim
+            subplot_kws["xlim"] = xlim
         if ylim is not None:
-            subplot_kws['ylim'] = ylim
+            subplot_kws["ylim"] = ylim
+
+        # --- Initialize the subplot grid
+
         with _disable_autolayout():
             fig = plt.figure(figsize=figsize)
+
         if col_wrap is None:
-            kwargs = dict(squeeze=False, sharex=sharex, sharey=sharey,
-                subplot_kw=subplot_kws, gridspec_kw=gridspec_kws)
+
+            kwargs = dict(squeeze=False,
+                          sharex=sharex, sharey=sharey,
+                          subplot_kw=subplot_kws,
+                          gridspec_kw=gridspec_kws)
+
             axes = fig.subplots(nrow, ncol, **kwargs)
+
             if col is None and row is None:
                 axes_dict = {}
             elif col is None:
@@ -268,32 +470,48 @@ class FacetGrid(Grid):
             else:
                 facet_product = product(row_names, col_names)
                 axes_dict = dict(zip(facet_product, axes.flat))
+
         else:
+
+            # If wrapping the col variable we need to make the grid ourselves
             if gridspec_kws:
-                warnings.warn('`gridspec_kws` ignored when using `col_wrap`')
+                warnings.warn("`gridspec_kws` ignored when using `col_wrap`")
+
             n_axes = len(col_names)
             axes = np.empty(n_axes, object)
             axes[0] = fig.add_subplot(nrow, ncol, 1, **subplot_kws)
             if sharex:
-                subplot_kws['sharex'] = axes[0]
+                subplot_kws["sharex"] = axes[0]
             if sharey:
-                subplot_kws['sharey'] = axes[0]
+                subplot_kws["sharey"] = axes[0]
             for i in range(1, n_axes):
                 axes[i] = fig.add_subplot(nrow, ncol, i + 1, **subplot_kws)
+
             axes_dict = dict(zip(col_names, axes))
+
+        # --- Set up the class attributes
+
+        # Attributes that are part of the public API but accessed through
+        # a  property so that Sphinx adds them to the auto class doc
         self._figure = fig
         self._axes = axes
         self._axes_dict = axes_dict
         self._legend = None
+
+        # Public attributes that aren't explicitly documented
+        # (It's not obvious that having them be public was a good idea)
         self.data = data
         self.row_names = row_names
         self.col_names = col_names
         self.hue_names = hue_names
         self.hue_kws = hue_kws
+
+        # Next the private variables
         self._nrow = nrow
         self._row_var = row
         self._ncol = ncol
         self._col_var = col
+
         self._margin_titles = margin_titles
         self._margin_titles_texts = []
         self._col_wrap = col_wrap
@@ -307,24 +525,31 @@ class FacetGrid(Grid):
         self._sharey = sharey
         self._dropna = dropna
         self._not_na = not_na
+
+        # --- Make the axes look good
+
         self.set_titles()
         self.tight_layout()
+
         if despine:
             self.despine()
+
         if sharex in [True, 'col']:
             for ax in self._not_bottom_axes:
                 for label in ax.get_xticklabels():
                     label.set_visible(False)
                 ax.xaxis.offsetText.set_visible(False)
                 ax.xaxis.label.set_visible(False)
+
         if sharey in [True, 'row']:
             for ax in self._not_left_axes:
                 for label in ax.get_yticklabels():
                     label.set_visible(False)
                 ax.yaxis.offsetText.set_visible(False)
                 ax.yaxis.label.set_visible(False)
-    __init__.__doc__ = dedent(
-        """        Initialize the matplotlib figure and FacetGrid object.
+
+    __init__.__doc__ = dedent("""\
+        Initialize the matplotlib figure and FacetGrid object.

         This class maps a dataset onto multiple axes arrayed in a grid of rows
         and columns that correspond to *levels* of variables in the dataset.
@@ -413,8 +638,7 @@ class FacetGrid(Grid):

         .. include:: ../docstrings/FacetGrid.rst

-        """
-        ).format(**_facet_docs)
+        """).format(**_facet_docs)

     def facet_data(self):
         """Generator for name indices and data subsets for each facet.
@@ -429,7 +653,32 @@ class FacetGrid(Grid):
             is None.

         """
-        pass
+        data = self.data
+
+        # Construct masks for the row variable
+        if self.row_names:
+            row_masks = [data[self._row_var] == n for n in self.row_names]
+        else:
+            row_masks = [np.repeat(True, len(self.data))]
+
+        # Construct masks for the column variable
+        if self.col_names:
+            col_masks = [data[self._col_var] == n for n in self.col_names]
+        else:
+            col_masks = [np.repeat(True, len(self.data))]
+
+        # Construct masks for the hue variable
+        if self.hue_names:
+            hue_masks = [data[self._hue_var] == n for n in self.hue_names]
+        else:
+            hue_masks = [np.repeat(True, len(self.data))]
+
+        # Here is the main generator loop
+        for (i, row), (j, col), (k, hue) in product(enumerate(row_masks),
+                                                    enumerate(col_masks),
+                                                    enumerate(hue_masks)):
+            data_ijk = data[row & col & hue & self._not_na]
+            yield (i, j, k), data_ijk

     def map(self, func, *args, **kwargs):
         """Apply a plotting function to each facet's subset of the data.
@@ -454,7 +703,64 @@ class FacetGrid(Grid):
             Returns self.

         """
-        pass
+        # If color was a keyword argument, grab it here
+        kw_color = kwargs.pop("color", None)
+
+        # How we use the function depends on where it comes from
+        func_module = str(getattr(func, "__module__", ""))
+
+        # Check for categorical plots without order information
+        if func_module == "seaborn.categorical":
+            if "order" not in kwargs:
+                warning = ("Using the {} function without specifying "
+                           "`order` is likely to produce an incorrect "
+                           "plot.".format(func.__name__))
+                warnings.warn(warning)
+            if len(args) == 3 and "hue_order" not in kwargs:
+                warning = ("Using the {} function without specifying "
+                           "`hue_order` is likely to produce an incorrect "
+                           "plot.".format(func.__name__))
+                warnings.warn(warning)
+
+        # Iterate over the data subsets
+        for (row_i, col_j, hue_k), data_ijk in self.facet_data():
+
+            # If this subset is null, move on
+            if not data_ijk.values.size:
+                continue
+
+            # Get the current axis
+            modify_state = not func_module.startswith("seaborn")
+            ax = self.facet_axis(row_i, col_j, modify_state)
+
+            # Decide what color to plot with
+            kwargs["color"] = self._facet_color(hue_k, kw_color)
+
+            # Insert the other hue aesthetics if appropriate
+            for kw, val_list in self.hue_kws.items():
+                kwargs[kw] = val_list[hue_k]
+
+            # Insert a label in the keyword arguments for the legend
+            if self._hue_var is not None:
+                kwargs["label"] = utils.to_utf8(self.hue_names[hue_k])
+
+            # Get the actual data we are going to plot with
+            plot_data = data_ijk[list(args)]
+            if self._dropna:
+                plot_data = plot_data.dropna()
+            plot_args = [v for k, v in plot_data.items()]
+
+            # Some matplotlib functions don't handle pandas objects correctly
+            if func_module.startswith("matplotlib"):
+                plot_args = [v.values for v in plot_args]
+
+            # Draw the plot
+            self._facet_plot(func, ax, plot_args, kwargs)
+
+        # Finalize the annotations and layout
+        self._finalize_grid(args[:2])
+
+        return self

     def map_dataframe(self, func, *args, **kwargs):
         """Like ``.map`` but passes args as strings and inserts data in kwargs.
@@ -484,43 +790,158 @@ class FacetGrid(Grid):
             Returns self.

         """
-        pass
+
+        # If color was a keyword argument, grab it here
+        kw_color = kwargs.pop("color", None)
+
+        # Iterate over the data subsets
+        for (row_i, col_j, hue_k), data_ijk in self.facet_data():
+
+            # If this subset is null, move on
+            if not data_ijk.values.size:
+                continue
+
+            # Get the current axis
+            modify_state = not str(func.__module__).startswith("seaborn")
+            ax = self.facet_axis(row_i, col_j, modify_state)
+
+            # Decide what color to plot with
+            kwargs["color"] = self._facet_color(hue_k, kw_color)
+
+            # Insert the other hue aesthetics if appropriate
+            for kw, val_list in self.hue_kws.items():
+                kwargs[kw] = val_list[hue_k]
+
+            # Insert a label in the keyword arguments for the legend
+            if self._hue_var is not None:
+                kwargs["label"] = self.hue_names[hue_k]
+
+            # Stick the facet dataframe into the kwargs
+            if self._dropna:
+                data_ijk = data_ijk.dropna()
+            kwargs["data"] = data_ijk
+
+            # Draw the plot
+            self._facet_plot(func, ax, args, kwargs)
+
+        # For axis labels, prefer to use positional args for backcompat
+        # but also extract the x/y kwargs and use if no corresponding arg
+        axis_labels = [kwargs.get("x", None), kwargs.get("y", None)]
+        for i, val in enumerate(args[:2]):
+            axis_labels[i] = val
+        self._finalize_grid(axis_labels)
+
+        return self
+
+    def _facet_color(self, hue_index, kw_color):
+
+        color = self._colors[hue_index]
+        if kw_color is not None:
+            return kw_color
+        elif color is not None:
+            return color
+
+    def _facet_plot(self, func, ax, plot_args, plot_kwargs):
+
+        # Draw the plot
+        if str(func.__module__).startswith("seaborn"):
+            plot_kwargs = plot_kwargs.copy()
+            semantics = ["x", "y", "hue", "size", "style"]
+            for key, val in zip(semantics, plot_args):
+                plot_kwargs[key] = val
+            plot_args = []
+            plot_kwargs["ax"] = ax
+        func(*plot_args, **plot_kwargs)
+
+        # Sort out the supporting information
+        self._update_legend_data(ax)

     def _finalize_grid(self, axlabels):
         """Finalize the annotations and layout."""
-        pass
+        self.set_axis_labels(*axlabels)
+        self.tight_layout()

     def facet_axis(self, row_i, col_j, modify_state=True):
         """Make the axis identified by these indices active and return it."""
-        pass
+
+        # Calculate the actual indices of the axes to plot on
+        if self._col_wrap is not None:
+            ax = self.axes.flat[col_j]
+        else:
+            ax = self.axes[row_i, col_j]
+
+        # Get a reference to the axes object we want, and make it active
+        if modify_state:
+            plt.sca(ax)
+        return ax

     def despine(self, **kwargs):
         """Remove axis spines from the facets."""
-        pass
+        utils.despine(self._figure, **kwargs)
+        return self

-    def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **
-        kwargs):
+    def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):
         """Set axis labels on the left column and bottom row of the grid."""
-        pass
+        if x_var is not None:
+            self._x_var = x_var
+            self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)
+        if y_var is not None:
+            self._y_var = y_var
+            self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)
+
+        return self

     def set_xlabels(self, label=None, clear_inner=True, **kwargs):
         """Label the x axis on the bottom row of the grid."""
-        pass
+        if label is None:
+            label = self._x_var
+        for ax in self._bottom_axes:
+            ax.set_xlabel(label, **kwargs)
+        if clear_inner:
+            for ax in self._not_bottom_axes:
+                ax.set_xlabel("")
+        return self

     def set_ylabels(self, label=None, clear_inner=True, **kwargs):
         """Label the y axis on the left column of the grid."""
-        pass
+        if label is None:
+            label = self._y_var
+        for ax in self._left_axes:
+            ax.set_ylabel(label, **kwargs)
+        if clear_inner:
+            for ax in self._not_left_axes:
+                ax.set_ylabel("")
+        return self

     def set_xticklabels(self, labels=None, step=None, **kwargs):
         """Set x axis tick labels of the grid."""
-        pass
+        for ax in self.axes.flat:
+            curr_ticks = ax.get_xticks()
+            ax.set_xticks(curr_ticks)
+            if labels is None:
+                curr_labels = [label.get_text() for label in ax.get_xticklabels()]
+                if step is not None:
+                    xticks = ax.get_xticks()[::step]
+                    curr_labels = curr_labels[::step]
+                    ax.set_xticks(xticks)
+                ax.set_xticklabels(curr_labels, **kwargs)
+            else:
+                ax.set_xticklabels(labels, **kwargs)
+        return self

     def set_yticklabels(self, labels=None, **kwargs):
         """Set y axis tick labels on the left column of the grid."""
-        pass
+        for ax in self.axes.flat:
+            curr_ticks = ax.get_yticks()
+            ax.set_yticks(curr_ticks)
+            if labels is None:
+                curr_labels = [label.get_text() for label in ax.get_yticklabels()]
+                ax.set_yticklabels(curr_labels, **kwargs)
+            else:
+                ax.set_yticklabels(labels, **kwargs)
+        return self

-    def set_titles(self, template=None, row_template=None, col_template=
-        None, **kwargs):
+    def set_titles(self, template=None, row_template=None, col_template=None, **kwargs):
         """Draw titles either above each facet or on the grid margins.

         Parameters
@@ -542,10 +963,76 @@ class FacetGrid(Grid):
             Returns self.

         """
-        pass
-
-    def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws
-        ):
+        args = dict(row_var=self._row_var, col_var=self._col_var)
+        kwargs["size"] = kwargs.pop("size", mpl.rcParams["axes.labelsize"])
+
+        # Establish default templates
+        if row_template is None:
+            row_template = "{row_var} = {row_name}"
+        if col_template is None:
+            col_template = "{col_var} = {col_name}"
+        if template is None:
+            if self._row_var is None:
+                template = col_template
+            elif self._col_var is None:
+                template = row_template
+            else:
+                template = " | ".join([row_template, col_template])
+
+        row_template = utils.to_utf8(row_template)
+        col_template = utils.to_utf8(col_template)
+        template = utils.to_utf8(template)
+
+        if self._margin_titles:
+
+            # Remove any existing title texts
+            for text in self._margin_titles_texts:
+                text.remove()
+            self._margin_titles_texts = []
+
+            if self.row_names is not None:
+                # Draw the row titles on the right edge of the grid
+                for i, row_name in enumerate(self.row_names):
+                    ax = self.axes[i, -1]
+                    args.update(dict(row_name=row_name))
+                    title = row_template.format(**args)
+                    text = ax.annotate(
+                        title, xy=(1.02, .5), xycoords="axes fraction",
+                        rotation=270, ha="left", va="center",
+                        **kwargs
+                    )
+                    self._margin_titles_texts.append(text)
+
+            if self.col_names is not None:
+                # Draw the column titles  as normal titles
+                for j, col_name in enumerate(self.col_names):
+                    args.update(dict(col_name=col_name))
+                    title = col_template.format(**args)
+                    self.axes[0, j].set_title(title, **kwargs)
+
+            return self
+
+        # Otherwise title each facet with all the necessary information
+        if (self._row_var is not None) and (self._col_var is not None):
+            for i, row_name in enumerate(self.row_names):
+                for j, col_name in enumerate(self.col_names):
+                    args.update(dict(row_name=row_name, col_name=col_name))
+                    title = template.format(**args)
+                    self.axes[i, j].set_title(title, **kwargs)
+        elif self.row_names is not None and len(self.row_names):
+            for i, row_name in enumerate(self.row_names):
+                args.update(dict(row_name=row_name))
+                title = template.format(**args)
+                self.axes[i, 0].set_title(title, **kwargs)
+        elif self.col_names is not None and len(self.col_names):
+            for i, col_name in enumerate(self.col_names):
+                args.update(dict(col_name=col_name))
+                title = template.format(**args)
+                # Index the flat array so col_wrap works
+                self.axes.flat[i].set_title(title, **kwargs)
+        return self
+
+    def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):
         """Add a reference line(s) to each facet.

         Parameters
@@ -568,17 +1055,34 @@ class FacetGrid(Grid):
             Returns ``self`` for easy method chaining.

         """
-        pass
+        line_kws['color'] = color
+        line_kws['linestyle'] = linestyle
+
+        if x is not None:
+            self.map(plt.axvline, x=x, **line_kws)
+
+        if y is not None:
+            self.map(plt.axhline, y=y, **line_kws)
+
+        return self
+
+    # ------ Properties that are part of the public API and documented by Sphinx

     @property
     def axes(self):
         """An array of the :class:`matplotlib.axes.Axes` objects in the grid."""
-        pass
+        return self._axes

     @property
     def ax(self):
         """The :class:`matplotlib.axes.Axes` when no faceting variables are assigned."""
-        pass
+        if self.axes.shape == (1, 1):
+            return self.axes[0, 0]
+        else:
+            err = (
+                "Use the `.axes` attribute when facet variables are assigned."
+            )
+            raise AttributeError(err)

     @property
     def axes_dict(self):
@@ -589,32 +1093,85 @@ class FacetGrid(Grid):
         assigned, each key is a ``({row_level}, {col_level})`` tuple.

         """
-        pass
+        return self._axes_dict
+
+    # ------ Private properties, that require some computation to get

     @property
     def _inner_axes(self):
         """Return a flat array of the inner axes."""
-        pass
+        if self._col_wrap is None:
+            return self.axes[:-1, 1:].flat
+        else:
+            axes = []
+            n_empty = self._nrow * self._ncol - self._n_facets
+            for i, ax in enumerate(self.axes):
+                append = (
+                    i % self._ncol
+                    and i < (self._ncol * (self._nrow - 1))
+                    and i < (self._ncol * (self._nrow - 1) - n_empty)
+                )
+                if append:
+                    axes.append(ax)
+            return np.array(axes, object).flat

     @property
     def _left_axes(self):
         """Return a flat array of the left column of axes."""
-        pass
+        if self._col_wrap is None:
+            return self.axes[:, 0].flat
+        else:
+            axes = []
+            for i, ax in enumerate(self.axes):
+                if not i % self._ncol:
+                    axes.append(ax)
+            return np.array(axes, object).flat

     @property
     def _not_left_axes(self):
         """Return a flat array of axes that aren't on the left column."""
-        pass
+        if self._col_wrap is None:
+            return self.axes[:, 1:].flat
+        else:
+            axes = []
+            for i, ax in enumerate(self.axes):
+                if i % self._ncol:
+                    axes.append(ax)
+            return np.array(axes, object).flat

     @property
     def _bottom_axes(self):
         """Return a flat array of the bottom row of axes."""
-        pass
+        if self._col_wrap is None:
+            return self.axes[-1, :].flat
+        else:
+            axes = []
+            n_empty = self._nrow * self._ncol - self._n_facets
+            for i, ax in enumerate(self.axes):
+                append = (
+                    i >= (self._ncol * (self._nrow - 1))
+                    or i >= (self._ncol * (self._nrow - 1) - n_empty)
+                )
+                if append:
+                    axes.append(ax)
+            return np.array(axes, object).flat

     @property
     def _not_bottom_axes(self):
         """Return a flat array of axes that aren't on the bottom row."""
-        pass
+        if self._col_wrap is None:
+            return self.axes[:-1, :].flat
+        else:
+            axes = []
+            n_empty = self._nrow * self._ncol - self._n_facets
+            for i, ax in enumerate(self.axes):
+                append = (
+                    i < (self._ncol * (self._nrow - 1))
+                    and i < (self._ncol * (self._nrow - 1) - n_empty)
+                )
+                if append:
+                    axes.append(ax)
+            return np.array(axes, object).flat


 class PairGrid(Grid):
@@ -631,11 +1188,11 @@ class PairGrid(Grid):
     See the :ref:`tutorial <grid_tutorial>` for more information.

     """
-
-    def __init__(self, data, *, hue=None, vars=None, x_vars=None, y_vars=
-        None, hue_order=None, palette=None, hue_kws=None, corner=False,
-        diag_sharey=True, height=2.5, aspect=1, layout_pad=0.5, despine=
-        True, dropna=False):
+    def __init__(
+        self, data, *, hue=None, vars=None, x_vars=None, y_vars=None,
+        hue_order=None, palette=None, hue_kws=None, corner=False, diag_sharey=True,
+        height=2.5, aspect=1, layout_pad=.5, despine=True, dropna=False,
+    ):
         """Initialize the plot figure and PairGrid object.

         Parameters
@@ -686,8 +1243,11 @@ class PairGrid(Grid):
         .. include:: ../docstrings/PairGrid.rst

         """
+
         super().__init__()
         data = handle_data_source(data)
+
+        # Sort out the variables that define the grid
         numeric_cols = self._find_numeric_cols(data)
         if hue in numeric_cols:
             numeric_cols.remove(hue)
@@ -698,52 +1258,88 @@ class PairGrid(Grid):
             x_vars = numeric_cols
         if y_vars is None:
             y_vars = numeric_cols
+
         if np.isscalar(x_vars):
             x_vars = [x_vars]
         if np.isscalar(y_vars):
             y_vars = [y_vars]
+
         self.x_vars = x_vars = list(x_vars)
         self.y_vars = y_vars = list(y_vars)
         self.square_grid = self.x_vars == self.y_vars
+
         if not x_vars:
-            raise ValueError('No variables found for grid columns.')
+            raise ValueError("No variables found for grid columns.")
         if not y_vars:
-            raise ValueError('No variables found for grid rows.')
+            raise ValueError("No variables found for grid rows.")
+
+        # Create the figure and the array of subplots
         figsize = len(x_vars) * height * aspect, len(y_vars) * height
+
         with _disable_autolayout():
             fig = plt.figure(figsize=figsize)
-        axes = fig.subplots(len(y_vars), len(x_vars), sharex='col', sharey=
-            'row', squeeze=False)
+
+        axes = fig.subplots(len(y_vars), len(x_vars),
+                            sharex="col", sharey="row",
+                            squeeze=False)
+
+        # Possibly remove upper axes to make a corner grid
+        # Note: setting up the axes is usually the most time-intensive part
+        # of using the PairGrid. We are foregoing the speed improvement that
+        # we would get by just not setting up the hidden axes so that we can
+        # avoid implementing fig.subplots ourselves. But worth thinking about.
         self._corner = corner
         if corner:
             hide_indices = np.triu_indices_from(axes, 1)
             for i, j in zip(*hide_indices):
                 axes[i, j].remove()
                 axes[i, j] = None
+
         self._figure = fig
         self.axes = axes
         self.data = data
+
+        # Save what we are going to do with the diagonal
         self.diag_sharey = diag_sharey
         self.diag_vars = None
         self.diag_axes = None
+
         self._dropna = dropna
+
+        # Label the axes
         self._add_axis_labels()
+
+        # Sort out the hue variable
         self._hue_var = hue
         if hue is None:
-            self.hue_names = hue_order = ['_nolegend_']
-            self.hue_vals = pd.Series(['_nolegend_'] * len(data), index=
-                data.index)
+            self.hue_names = hue_order = ["_nolegend_"]
+            self.hue_vals = pd.Series(["_nolegend_"] * len(data),
+                                      index=data.index)
         else:
+            # We need hue_order and hue_names because the former is used to control
+            # the order of drawing and the latter is used to control the order of
+            # the legend. hue_names can become string-typed while hue_order must
+            # retain the type of the input data. This is messy but results from
+            # the fact that PairGrid can implement the hue-mapping logic itself
+            # (and was originally written exclusively that way) but now can delegate
+            # to the axes-level functions, while always handling legend creation.
+            # See GH2307
             hue_names = hue_order = categorical_order(data[hue], hue_order)
             if dropna:
+                # Filter NA from the list of unique hue names
                 hue_names = list(filter(pd.notnull, hue_names))
             self.hue_names = hue_names
             self.hue_vals = data[hue]
+
+        # Additional dict of kwarg -> list of values for mapping the hue var
         self.hue_kws = hue_kws if hue_kws is not None else {}
+
         self._orig_palette = palette
         self._hue_order = hue_order
         self.palette = self._get_palette(data, hue, hue_order, palette)
         self._legend_data = {}
+
+        # Make the plot look nice
         for ax in axes[:-1, :].flat:
             if ax is None:
                 continue
@@ -751,6 +1347,7 @@ class PairGrid(Grid):
                 label.set_visible(False)
             ax.xaxis.offsetText.set_visible(False)
             ax.xaxis.label.set_visible(False)
+
         for ax in axes[:, 1:].flat:
             if ax is None:
                 continue
@@ -758,7 +1355,8 @@ class PairGrid(Grid):
                 label.set_visible(False)
             ax.yaxis.offsetText.set_visible(False)
             ax.yaxis.label.set_visible(False)
-        self._tight_layout_rect = [0.01, 0.01, 0.99, 0.99]
+
+        self._tight_layout_rect = [.01, .01, .99, .99]
         self._tight_layout_pad = layout_pad
         self._despine = despine
         if despine:
@@ -776,7 +1374,11 @@ class PairGrid(Grid):
             called ``color`` and  ``label``.

         """
-        pass
+        row_indices, col_indices = np.indices(self.axes.shape)
+        indices = zip(row_indices.flat, col_indices.flat)
+        self._map_bivariate(func, indices, **kwargs)
+
+        return self

     def map_lower(self, func, **kwargs):
         """Plot with a bivariate function on the lower diagonal subplots.
@@ -789,7 +1391,9 @@ class PairGrid(Grid):
             called ``color`` and  ``label``.

         """
-        pass
+        indices = zip(*np.tril_indices_from(self.axes, -1))
+        self._map_bivariate(func, indices, **kwargs)
+        return self

     def map_upper(self, func, **kwargs):
         """Plot with a bivariate function on the upper diagonal subplots.
@@ -802,7 +1406,9 @@ class PairGrid(Grid):
             called ``color`` and  ``label``.

         """
-        pass
+        indices = zip(*np.triu_indices_from(self.axes, 1))
+        self._map_bivariate(func, indices, **kwargs)
+        return self

     def map_offdiag(self, func, **kwargs):
         """Plot with a bivariate function on the off-diagonal subplots.
@@ -815,7 +1421,18 @@ class PairGrid(Grid):
             called ``color`` and  ``label``.

         """
-        pass
+        if self.square_grid:
+            self.map_lower(func, **kwargs)
+            if not self._corner:
+                self.map_upper(func, **kwargs)
+        else:
+            indices = []
+            for i, (y_var) in enumerate(self.y_vars):
+                for j, (x_var) in enumerate(self.x_vars):
+                    if x_var != y_var:
+                        indices.append((i, j))
+            self._map_bivariate(func, indices, **kwargs)
+        return self

     def map_diag(self, func, **kwargs):
         """Plot with a univariate function on each diagonal subplot.
@@ -828,31 +1445,235 @@ class PairGrid(Grid):
             called ``color`` and  ``label``.

         """
-        pass
+        # Add special diagonal axes for the univariate plot
+        if self.diag_axes is None:
+            diag_vars = []
+            diag_axes = []
+            for i, y_var in enumerate(self.y_vars):
+                for j, x_var in enumerate(self.x_vars):
+                    if x_var == y_var:
+
+                        # Make the density axes
+                        diag_vars.append(x_var)
+                        ax = self.axes[i, j]
+                        diag_ax = ax.twinx()
+                        diag_ax.set_axis_off()
+                        diag_axes.append(diag_ax)
+
+                        # Work around matplotlib bug
+                        # https://github.com/matplotlib/matplotlib/issues/15188
+                        if not plt.rcParams.get("ytick.left", True):
+                            for tick in ax.yaxis.majorTicks:
+                                tick.tick1line.set_visible(False)
+
+                        # Remove main y axis from density axes in a corner plot
+                        if self._corner:
+                            ax.yaxis.set_visible(False)
+                            if self._despine:
+                                utils.despine(ax=ax, left=True)
+                            # TODO add optional density ticks (on the right)
+                            # when drawing a corner plot?
+
+            if self.diag_sharey and diag_axes:
+                for ax in diag_axes[1:]:
+                    share_axis(diag_axes[0], ax, "y")
+
+            self.diag_vars = diag_vars
+            self.diag_axes = diag_axes
+
+        if "hue" not in signature(func).parameters:
+            return self._map_diag_iter_hue(func, **kwargs)
+
+        # Loop over diagonal variables and axes, making one plot in each
+        for var, ax in zip(self.diag_vars, self.diag_axes):
+
+            plot_kwargs = kwargs.copy()
+            if str(func.__module__).startswith("seaborn"):
+                plot_kwargs["ax"] = ax
+            else:
+                plt.sca(ax)
+
+            vector = self.data[var]
+            if self._hue_var is not None:
+                hue = self.data[self._hue_var]
+            else:
+                hue = None
+
+            if self._dropna:
+                not_na = vector.notna()
+                if hue is not None:
+                    not_na &= hue.notna()
+                vector = vector[not_na]
+                if hue is not None:
+                    hue = hue[not_na]
+
+            plot_kwargs.setdefault("hue", hue)
+            plot_kwargs.setdefault("hue_order", self._hue_order)
+            plot_kwargs.setdefault("palette", self._orig_palette)
+            func(x=vector, **plot_kwargs)
+            ax.legend_ = None
+
+        self._add_axis_labels()
+        return self

     def _map_diag_iter_hue(self, func, **kwargs):
         """Put marginal plot on each diagonal axes, iterating over hue."""
-        pass
+        # Plot on each of the diagonal axes
+        fixed_color = kwargs.pop("color", None)
+
+        for var, ax in zip(self.diag_vars, self.diag_axes):
+            hue_grouped = self.data[var].groupby(self.hue_vals, observed=True)
+
+            plot_kwargs = kwargs.copy()
+            if str(func.__module__).startswith("seaborn"):
+                plot_kwargs["ax"] = ax
+            else:
+                plt.sca(ax)
+
+            for k, label_k in enumerate(self._hue_order):
+
+                # Attempt to get data for this level, allowing for empty
+                try:
+                    data_k = hue_grouped.get_group(label_k)
+                except KeyError:
+                    data_k = pd.Series([], dtype=float)
+
+                if fixed_color is None:
+                    color = self.palette[k]
+                else:
+                    color = fixed_color
+
+                if self._dropna:
+                    data_k = utils.remove_na(data_k)
+
+                if str(func.__module__).startswith("seaborn"):
+                    func(x=data_k, label=label_k, color=color, **plot_kwargs)
+                else:
+                    func(data_k, label=label_k, color=color, **plot_kwargs)
+
+        self._add_axis_labels()
+
+        return self

     def _map_bivariate(self, func, indices, **kwargs):
         """Draw a bivariate plot on the indicated axes."""
-        pass
+        # This is a hack to handle the fact that new distribution plots don't add
+        # their artists onto the axes. This is probably superior in general, but
+        # we'll need a better way to handle it in the axisgrid functions.
+        from .distributions import histplot, kdeplot
+        if func is histplot or func is kdeplot:
+            self._extract_legend_handles = True
+
+        kws = kwargs.copy()  # Use copy as we insert other kwargs
+        for i, j in indices:
+            x_var = self.x_vars[j]
+            y_var = self.y_vars[i]
+            ax = self.axes[i, j]
+            if ax is None:  # i.e. we are in corner mode
+                continue
+            self._plot_bivariate(x_var, y_var, ax, func, **kws)
+        self._add_axis_labels()
+
+        if "hue" in signature(func).parameters:
+            self.hue_names = list(self._legend_data)

     def _plot_bivariate(self, x_var, y_var, ax, func, **kwargs):
         """Draw a bivariate plot on the specified axes."""
-        pass
+        if "hue" not in signature(func).parameters:
+            self._plot_bivariate_iter_hue(x_var, y_var, ax, func, **kwargs)
+            return
+
+        kwargs = kwargs.copy()
+        if str(func.__module__).startswith("seaborn"):
+            kwargs["ax"] = ax
+        else:
+            plt.sca(ax)
+
+        if x_var == y_var:
+            axes_vars = [x_var]
+        else:
+            axes_vars = [x_var, y_var]
+
+        if self._hue_var is not None and self._hue_var not in axes_vars:
+            axes_vars.append(self._hue_var)
+
+        data = self.data[axes_vars]
+        if self._dropna:
+            data = data.dropna()
+
+        x = data[x_var]
+        y = data[y_var]
+        if self._hue_var is None:
+            hue = None
+        else:
+            hue = data.get(self._hue_var)
+
+        if "hue" not in kwargs:
+            kwargs.update({
+                "hue": hue, "hue_order": self._hue_order, "palette": self._orig_palette,
+            })
+        func(x=x, y=y, **kwargs)
+
+        self._update_legend_data(ax)

     def _plot_bivariate_iter_hue(self, x_var, y_var, ax, func, **kwargs):
         """Draw a bivariate plot while iterating over hue subsets."""
-        pass
+        kwargs = kwargs.copy()
+        if str(func.__module__).startswith("seaborn"):
+            kwargs["ax"] = ax
+        else:
+            plt.sca(ax)
+
+        if x_var == y_var:
+            axes_vars = [x_var]
+        else:
+            axes_vars = [x_var, y_var]
+
+        hue_grouped = self.data.groupby(self.hue_vals, observed=True)
+        for k, label_k in enumerate(self._hue_order):
+
+            kws = kwargs.copy()
+
+            # Attempt to get data for this level, allowing for empty
+            try:
+                data_k = hue_grouped.get_group(label_k)
+            except KeyError:
+                data_k = pd.DataFrame(columns=axes_vars,
+                                      dtype=float)
+
+            if self._dropna:
+                data_k = data_k[axes_vars].dropna()
+
+            x = data_k[x_var]
+            y = data_k[y_var]
+
+            for kw, val_list in self.hue_kws.items():
+                kws[kw] = val_list[k]
+            kws.setdefault("color", self.palette[k])
+            if self._hue_var is not None:
+                kws["label"] = label_k
+
+            if str(func.__module__).startswith("seaborn"):
+                func(x=x, y=y, **kws)
+            else:
+                func(x, y, **kws)
+
+        self._update_legend_data(ax)

     def _add_axis_labels(self):
         """Add labels to the left and bottom Axes."""
-        pass
+        for ax, label in zip(self.axes[-1, :], self.x_vars):
+            ax.set_xlabel(label)
+        for ax, label in zip(self.axes[:, 0], self.y_vars):
+            ax.set_ylabel(label)

     def _find_numeric_cols(self, data):
         """Find which variables in a DataFrame are numeric."""
-        pass
+        numeric_cols = []
+        for col in data:
+            if variable_type(data[col]) == "numeric":
+                numeric_cols.append(col)
+        return numeric_cols


 class JointGrid(_BaseGrid):
@@ -863,22 +1684,34 @@ class JointGrid(_BaseGrid):

     """

-    def __init__(self, data=None, *, x=None, y=None, hue=None, height=6,
-        ratio=5, space=0.2, palette=None, hue_order=None, hue_norm=None,
-        dropna=False, xlim=None, ylim=None, marginal_ticks=False):
+    def __init__(
+        self, data=None, *,
+        x=None, y=None, hue=None,
+        height=6, ratio=5, space=.2,
+        palette=None, hue_order=None, hue_norm=None,
+        dropna=False, xlim=None, ylim=None, marginal_ticks=False,
+    ):
+
+        # Set up the subplot grid
         f = plt.figure(figsize=(height, height))
         gs = plt.GridSpec(ratio + 1, ratio + 1)
+
         ax_joint = f.add_subplot(gs[1:, :-1])
         ax_marg_x = f.add_subplot(gs[0, :-1], sharex=ax_joint)
         ax_marg_y = f.add_subplot(gs[1:, -1], sharey=ax_joint)
+
         self._figure = f
         self.ax_joint = ax_joint
         self.ax_marg_x = ax_marg_x
         self.ax_marg_y = ax_marg_y
+
+        # Turn off tick visibility for the measure axis on the marginal plots
         plt.setp(ax_marg_x.get_xticklabels(), visible=False)
         plt.setp(ax_marg_y.get_yticklabels(), visible=False)
         plt.setp(ax_marg_x.get_xticklabels(minor=True), visible=False)
         plt.setp(ax_marg_y.get_yticklabels(minor=True), visible=False)
+
+        # Turn off the ticks on the density axis for the marginal plots
         if not marginal_ticks:
             plt.setp(ax_marg_x.yaxis.get_majorticklines(), visible=False)
             plt.setp(ax_marg_x.yaxis.get_minorticklines(), visible=False)
@@ -890,8 +1723,12 @@ class JointGrid(_BaseGrid):
             plt.setp(ax_marg_y.get_xticklabels(minor=True), visible=False)
             ax_marg_x.yaxis.grid(False)
             ax_marg_y.xaxis.grid(False)
+
+        # Process the input variables
         p = VectorPlotter(data=data, variables=dict(x=x, y=y, hue=hue))
         plot_data = p.plot_data.loc[:, p.plot_data.notna().any()]
+
+        # Possibly drop NA
         if dropna:
             plot_data = plot_data.dropna()

@@ -900,19 +1737,25 @@ class JointGrid(_BaseGrid):
             if vector is not None:
                 vector = vector.rename(p.variables.get(var, None))
             return vector
-        self.x = get_var('x')
-        self.y = get_var('y')
-        self.hue = get_var('hue')
-        for axis in 'xy':
+
+        self.x = get_var("x")
+        self.y = get_var("y")
+        self.hue = get_var("hue")
+
+        for axis in "xy":
             name = p.variables.get(axis, None)
             if name is not None:
-                getattr(ax_joint, f'set_{axis}label')(name)
+                getattr(ax_joint, f"set_{axis}label")(name)
+
         if xlim is not None:
             ax_joint.set_xlim(xlim)
         if ylim is not None:
             ax_joint.set_ylim(ylim)
-        self._hue_params = dict(palette=palette, hue_order=hue_order,
-            hue_norm=hue_norm)
+
+        # Store the semantic mapping parameters for axes-level functions
+        self._hue_params = dict(palette=palette, hue_order=hue_order, hue_norm=hue_norm)
+
+        # Make the grid look nice
         utils.despine(f)
         if not marginal_ticks:
             utils.despine(ax=ax_marg_x, left=True)
@@ -925,7 +1768,10 @@ class JointGrid(_BaseGrid):

     def _inject_kwargs(self, func, kws, params):
         """Add params to kws if they are accepted by func."""
-        pass
+        func_params = signature(func).parameters
+        for key, val in params.items():
+            if key in func_params:
+                kws.setdefault(key, val)

     def plot(self, joint_func, marginal_func, **kwargs):
         """Draw the plot by passing functions for joint and marginal axes.
@@ -949,7 +1795,9 @@ class JointGrid(_BaseGrid):
             Returns ``self`` for easy method chaining.

         """
-        pass
+        self.plot_marginals(marginal_func, **kwargs)
+        self.plot_joint(joint_func, **kwargs)
+        return self

     def plot_joint(self, func, **kwargs):
         """Draw a bivariate plot on the joint axes of the grid.
@@ -971,7 +1819,21 @@ class JointGrid(_BaseGrid):
             Returns ``self`` for easy method chaining.

         """
-        pass
+        kwargs = kwargs.copy()
+        if str(func.__module__).startswith("seaborn"):
+            kwargs["ax"] = self.ax_joint
+        else:
+            plt.sca(self.ax_joint)
+        if self.hue is not None:
+            kwargs["hue"] = self.hue
+            self._inject_kwargs(func, kwargs, self._hue_params)
+
+        if str(func.__module__).startswith("seaborn"):
+            func(x=self.x, y=self.y, **kwargs)
+        else:
+            func(self.x, self.y, **kwargs)
+
+        return self

     def plot_marginals(self, func, **kwargs):
         """Draw univariate plots on each marginal axes.
@@ -994,10 +1856,50 @@ class JointGrid(_BaseGrid):
             Returns ``self`` for easy method chaining.

         """
-        pass
+        seaborn_func = (
+            str(func.__module__).startswith("seaborn")
+            # deprecated distplot has a legacy API, special case it
+            and not func.__name__ == "distplot"
+        )
+        func_params = signature(func).parameters
+        kwargs = kwargs.copy()
+        if self.hue is not None:
+            kwargs["hue"] = self.hue
+            self._inject_kwargs(func, kwargs, self._hue_params)
+
+        if "legend" in func_params:
+            kwargs.setdefault("legend", False)
+
+        if "orientation" in func_params:
+            # e.g. plt.hist
+            orient_kw_x = {"orientation": "vertical"}
+            orient_kw_y = {"orientation": "horizontal"}
+        elif "vertical" in func_params:
+            # e.g. sns.distplot (also how did this get backwards?)
+            orient_kw_x = {"vertical": False}
+            orient_kw_y = {"vertical": True}
+
+        if seaborn_func:
+            func(x=self.x, ax=self.ax_marg_x, **kwargs)
+        else:
+            plt.sca(self.ax_marg_x)
+            func(self.x, **orient_kw_x, **kwargs)
+
+        if seaborn_func:
+            func(y=self.y, ax=self.ax_marg_y, **kwargs)
+        else:
+            plt.sca(self.ax_marg_y)
+            func(self.y, **orient_kw_y, **kwargs)

-    def refline(self, *, x=None, y=None, joint=True, marginal=True, color=
-        '.5', linestyle='--', **line_kws):
+        self.ax_marg_x.yaxis.get_label().set_visible(False)
+        self.ax_marg_y.xaxis.get_label().set_visible(False)
+
+        return self
+
+    def refline(
+        self, *, x=None, y=None, joint=True, marginal=True,
+        color='.5', linestyle='--', **line_kws
+    ):
         """Add a reference line(s) to joint and/or marginal axes.

         Parameters
@@ -1021,9 +1923,24 @@ class JointGrid(_BaseGrid):
             Returns ``self`` for easy method chaining.

         """
-        pass
+        line_kws['color'] = color
+        line_kws['linestyle'] = linestyle
+
+        if x is not None:
+            if joint:
+                self.ax_joint.axvline(x, **line_kws)
+            if marginal:
+                self.ax_marg_x.axvline(x, **line_kws)

-    def set_axis_labels(self, xlabel='', ylabel='', **kwargs):
+        if y is not None:
+            if joint:
+                self.ax_joint.axhline(y, **line_kws)
+            if marginal:
+                self.ax_marg_y.axhline(y, **line_kws)
+
+        return self
+
+    def set_axis_labels(self, xlabel="", ylabel="", **kwargs):
         """Set axis labels on the bivariate axes.

         Parameters
@@ -1042,11 +1959,13 @@ class JointGrid(_BaseGrid):
             Returns ``self`` for easy method chaining.

         """
-        pass
+        self.ax_joint.set_xlabel(xlabel, **kwargs)
+        self.ax_joint.set_ylabel(ylabel, **kwargs)
+        return self


-JointGrid.__init__.__doc__ = (
-    """Set up the grid of subplots and store data internally for easy plotting.
+JointGrid.__init__.__doc__ = """\
+Set up the grid of subplots and store data internally for easy plotting.

 Parameters
 ----------
@@ -1082,14 +2001,20 @@ Examples

 .. include:: ../docstrings/JointGrid.rst

-"""
-    .format(params=_param_docs, seealso=_core_docs['seealso']))
+""".format(
+    params=_param_docs,
+    seealso=_core_docs["seealso"],
+)


-def pairplot(data, *, hue=None, hue_order=None, palette=None, vars=None,
-    x_vars=None, y_vars=None, kind='scatter', diag_kind='auto', markers=
-    None, height=2.5, aspect=1, corner=False, dropna=False, plot_kws=None,
-    diag_kws=None, grid_kws=None, size=None):
+def pairplot(
+    data, *,
+    hue=None, hue_order=None, palette=None,
+    vars=None, x_vars=None, y_vars=None,
+    kind="scatter", diag_kind="auto", markers=None,
+    height=2.5, aspect=1, corner=False, dropna=False,
+    plot_kws=None, diag_kws=None, grid_kws=None, size=None,
+):
     """Plot pairwise relationships in a dataset.

     By default, this function will create a grid of Axes such that each numeric
@@ -1164,11 +2089,260 @@ def pairplot(data, *, hue=None, hue_order=None, palette=None, vars=None,
     .. include:: ../docstrings/pairplot.rst

     """
-    pass
+    # Avoid circular import
+    from .distributions import histplot, kdeplot
+
+    # Handle deprecations
+    if size is not None:
+        height = size
+        msg = ("The `size` parameter has been renamed to `height`; "
+               "please update your code.")
+        warnings.warn(msg, UserWarning)
+
+    if not isinstance(data, pd.DataFrame):
+        raise TypeError(
+            f"'data' must be pandas DataFrame object, not: {type(data)}")
+
+    plot_kws = {} if plot_kws is None else plot_kws.copy()
+    diag_kws = {} if diag_kws is None else diag_kws.copy()
+    grid_kws = {} if grid_kws is None else grid_kws.copy()
+
+    # Resolve "auto" diag kind
+    if diag_kind == "auto":
+        if hue is None:
+            diag_kind = "kde" if kind == "kde" else "hist"
+        else:
+            diag_kind = "hist" if kind == "hist" else "kde"
+
+    # Set up the PairGrid
+    grid_kws.setdefault("diag_sharey", diag_kind == "hist")
+    grid = PairGrid(data, vars=vars, x_vars=x_vars, y_vars=y_vars, hue=hue,
+                    hue_order=hue_order, palette=palette, corner=corner,
+                    height=height, aspect=aspect, dropna=dropna, **grid_kws)
+
+    # Add the markers here as PairGrid has figured out how many levels of the
+    # hue variable are needed and we don't want to duplicate that process
+    if markers is not None:
+        if kind == "reg":
+            # Needed until regplot supports style
+            if grid.hue_names is None:
+                n_markers = 1
+            else:
+                n_markers = len(grid.hue_names)
+            if not isinstance(markers, list):
+                markers = [markers] * n_markers
+            if len(markers) != n_markers:
+                raise ValueError("markers must be a singleton or a list of "
+                                 "markers for each level of the hue variable")
+            grid.hue_kws = {"marker": markers}
+        elif kind == "scatter":
+            if isinstance(markers, str):
+                plot_kws["marker"] = markers
+            elif hue is not None:
+                plot_kws["style"] = data[hue]
+                plot_kws["markers"] = markers
+
+    # Draw the marginal plots on the diagonal
+    diag_kws = diag_kws.copy()
+    diag_kws.setdefault("legend", False)
+    if diag_kind == "hist":
+        grid.map_diag(histplot, **diag_kws)
+    elif diag_kind == "kde":
+        diag_kws.setdefault("fill", True)
+        diag_kws.setdefault("warn_singular", False)
+        grid.map_diag(kdeplot, **diag_kws)
+
+    # Maybe plot on the off-diagonals
+    if diag_kind is not None:
+        plotter = grid.map_offdiag
+    else:
+        plotter = grid.map
+
+    if kind == "scatter":
+        from .relational import scatterplot  # Avoid circular import
+        plotter(scatterplot, **plot_kws)
+    elif kind == "reg":
+        from .regression import regplot  # Avoid circular import
+        plotter(regplot, **plot_kws)
+    elif kind == "kde":
+        from .distributions import kdeplot  # Avoid circular import
+        plot_kws.setdefault("warn_singular", False)
+        plotter(kdeplot, **plot_kws)
+    elif kind == "hist":
+        from .distributions import histplot  # Avoid circular import
+        plotter(histplot, **plot_kws)
+
+    # Add a legend
+    if hue is not None:
+        grid.add_legend()
+
+    grid.tight_layout()
+
+    return grid
+
+
+def jointplot(
+    data=None, *, x=None, y=None, hue=None, kind="scatter",
+    height=6, ratio=5, space=.2, dropna=False, xlim=None, ylim=None,
+    color=None, palette=None, hue_order=None, hue_norm=None, marginal_ticks=False,
+    joint_kws=None, marginal_kws=None,
+    **kwargs
+):
+    # Avoid circular imports
+    from .relational import scatterplot
+    from .regression import regplot, residplot
+    from .distributions import histplot, kdeplot, _freedman_diaconis_bins
+
+    if kwargs.pop("ax", None) is not None:
+        msg = "Ignoring `ax`; jointplot is a figure-level function."
+        warnings.warn(msg, UserWarning, stacklevel=2)
+
+    # Set up empty default kwarg dicts
+    joint_kws = {} if joint_kws is None else joint_kws.copy()
+    joint_kws.update(kwargs)
+    marginal_kws = {} if marginal_kws is None else marginal_kws.copy()
+
+    # Handle deprecations of distplot-specific kwargs
+    distplot_keys = [
+        "rug", "fit", "hist_kws", "norm_hist" "hist_kws", "rug_kws",
+    ]
+    unused_keys = []
+    for key in distplot_keys:
+        if key in marginal_kws:
+            unused_keys.append(key)
+            marginal_kws.pop(key)
+    if unused_keys and kind != "kde":
+        msg = (
+            "The marginal plotting function has changed to `histplot`,"
+            " which does not accept the following argument(s): {}."
+        ).format(", ".join(unused_keys))
+        warnings.warn(msg, UserWarning)
+
+    # Validate the plot kind
+    plot_kinds = ["scatter", "hist", "hex", "kde", "reg", "resid"]
+    _check_argument("kind", plot_kinds, kind)
+
+    # Raise early if using `hue` with a kind that does not support it
+    if hue is not None and kind in ["hex", "reg", "resid"]:
+        msg = f"Use of `hue` with `kind='{kind}'` is not currently supported."
+        raise ValueError(msg)
+
+    # Make a colormap based off the plot color
+    # (Currently used only for kind="hex")
+    if color is None:
+        color = "C0"
+    color_rgb = mpl.colors.colorConverter.to_rgb(color)
+    colors = [set_hls_values(color_rgb, l=val) for val in np.linspace(1, 0, 12)]
+    cmap = blend_palette(colors, as_cmap=True)
+
+    # Matplotlib's hexbin plot is not na-robust
+    if kind == "hex":
+        dropna = True
+
+    # Initialize the JointGrid object
+    grid = JointGrid(
+        data=data, x=x, y=y, hue=hue,
+        palette=palette, hue_order=hue_order, hue_norm=hue_norm,
+        dropna=dropna, height=height, ratio=ratio, space=space,
+        xlim=xlim, ylim=ylim, marginal_ticks=marginal_ticks,
+    )
+
+    if grid.hue is not None:
+        marginal_kws.setdefault("legend", False)
+
+    # Plot the data using the grid
+    if kind.startswith("scatter"):
+
+        joint_kws.setdefault("color", color)
+        grid.plot_joint(scatterplot, **joint_kws)
+
+        if grid.hue is None:
+            marg_func = histplot
+        else:
+            marg_func = kdeplot
+            marginal_kws.setdefault("warn_singular", False)
+            marginal_kws.setdefault("fill", True)
+
+        marginal_kws.setdefault("color", color)
+        grid.plot_marginals(marg_func, **marginal_kws)
+
+    elif kind.startswith("hist"):
+
+        # TODO process pair parameters for bins, etc. and pass
+        # to both joint and marginal plots
+
+        joint_kws.setdefault("color", color)
+        grid.plot_joint(histplot, **joint_kws)
+
+        marginal_kws.setdefault("kde", False)
+        marginal_kws.setdefault("color", color)
+
+        marg_x_kws = marginal_kws.copy()
+        marg_y_kws = marginal_kws.copy()
+
+        pair_keys = "bins", "binwidth", "binrange"
+        for key in pair_keys:
+            if isinstance(joint_kws.get(key), tuple):
+                x_val, y_val = joint_kws[key]
+                marg_x_kws.setdefault(key, x_val)
+                marg_y_kws.setdefault(key, y_val)
+
+        histplot(data=data, x=x, hue=hue, **marg_x_kws, ax=grid.ax_marg_x)
+        histplot(data=data, y=y, hue=hue, **marg_y_kws, ax=grid.ax_marg_y)
+
+    elif kind.startswith("kde"):
+
+        joint_kws.setdefault("color", color)
+        joint_kws.setdefault("warn_singular", False)
+        grid.plot_joint(kdeplot, **joint_kws)
+
+        marginal_kws.setdefault("color", color)
+        if "fill" in joint_kws:
+            marginal_kws.setdefault("fill", joint_kws["fill"])
+
+        grid.plot_marginals(kdeplot, **marginal_kws)
+
+    elif kind.startswith("hex"):
+
+        x_bins = min(_freedman_diaconis_bins(grid.x), 50)
+        y_bins = min(_freedman_diaconis_bins(grid.y), 50)
+        gridsize = int(np.mean([x_bins, y_bins]))
+
+        joint_kws.setdefault("gridsize", gridsize)
+        joint_kws.setdefault("cmap", cmap)
+        grid.plot_joint(plt.hexbin, **joint_kws)
+
+        marginal_kws.setdefault("kde", False)
+        marginal_kws.setdefault("color", color)
+        grid.plot_marginals(histplot, **marginal_kws)
+
+    elif kind.startswith("reg"):
+
+        marginal_kws.setdefault("color", color)
+        marginal_kws.setdefault("kde", True)
+        grid.plot_marginals(histplot, **marginal_kws)
+
+        joint_kws.setdefault("color", color)
+        grid.plot_joint(regplot, **joint_kws)
+
+    elif kind.startswith("resid"):
+
+        joint_kws.setdefault("color", color)
+        grid.plot_joint(residplot, **joint_kws)
+
+        x, y = grid.ax_joint.collections[0].get_offsets().T
+        marginal_kws.setdefault("color", color)
+        histplot(x=x, hue=hue, ax=grid.ax_marg_x, **marginal_kws)
+        histplot(y=y, hue=hue, ax=grid.ax_marg_y, **marginal_kws)
+
+    # Make the main axes active in the matplotlib state machine
+    plt.sca(grid.ax_joint)
+
+    return grid


-jointplot.__doc__ = (
-    """Draw a plot of two variables with bivariate and univariate graphs.
+jointplot.__doc__ = """\
+Draw a plot of two variables with bivariate and univariate graphs.

 This function provides a convenient interface to the :class:`JointGrid`
 class, with several canned plot kinds. This is intended to be a fairly
@@ -1220,6 +2394,8 @@ Examples

 .. include:: ../docstrings/jointplot.rst

-"""
-    .format(params=_param_docs, returns=_core_docs['returns'], seealso=
-    _core_docs['seealso']))
+""".format(
+    params=_param_docs,
+    returns=_core_docs["returns"],
+    seealso=_core_docs["seealso"],
+)
diff --git a/seaborn/categorical.py b/seaborn/categorical.py
index b0ff7288..a43c085b 100644
--- a/seaborn/categorical.py
+++ b/seaborn/categorical.py
@@ -3,62 +3,131 @@ from textwrap import dedent
 import warnings
 from colorsys import rgb_to_hls
 from functools import partial
+
 import numpy as np
 import pandas as pd
+
 import matplotlib as mpl
 from matplotlib.cbook import normalize_kwargs
 from matplotlib.collections import PatchCollection
 from matplotlib.markers import MarkerStyle
 from matplotlib.patches import Rectangle
 import matplotlib.pyplot as plt
+
 from seaborn._core.typing import default, deprecated
 from seaborn._base import VectorPlotter, infer_orient, categorical_order
 from seaborn._stats.density import KDE
 from seaborn import utils
-from seaborn.utils import desaturate, _check_argument, _draw_figure, _default_color, _get_patch_legend_artist, _get_transform_functions, _scatter_legend_artist, _version_predates
+from seaborn.utils import (
+    desaturate,
+    _check_argument,
+    _draw_figure,
+    _default_color,
+    _get_patch_legend_artist,
+    _get_transform_functions,
+    _scatter_legend_artist,
+    _version_predates,
+)
 from seaborn._compat import groupby_apply_include_groups
-from seaborn._statistics import EstimateAggregator, LetterValues, WeightedAggregator
+from seaborn._statistics import (
+    EstimateAggregator,
+    LetterValues,
+    WeightedAggregator,
+)
 from seaborn.palettes import light_palette
 from seaborn.axisgrid import FacetGrid, _facet_docs
-__all__ = ['catplot', 'stripplot', 'swarmplot', 'boxplot', 'violinplot',
-    'boxenplot', 'pointplot', 'barplot', 'countplot']
+
+
+__all__ = [
+    "catplot",
+    "stripplot", "swarmplot",
+    "boxplot", "violinplot", "boxenplot",
+    "pointplot", "barplot", "countplot",
+]


 class _CategoricalPlotter(VectorPlotter):
-    wide_structure = {'x': '@columns', 'y': '@values', 'hue': '@columns'}
-    flat_structure = {'y': '@values'}
-    _legend_attributes = ['color']

-    def __init__(self, data=None, variables={}, order=None, orient=None,
-        require_numeric=False, color=None, legend='auto'):
+    wide_structure = {"x": "@columns", "y": "@values", "hue": "@columns"}
+    flat_structure = {"y": "@values"}
+
+    _legend_attributes = ["color"]
+
+    def __init__(
+        self,
+        data=None,
+        variables={},
+        order=None,
+        orient=None,
+        require_numeric=False,
+        color=None,
+        legend="auto",
+    ):
+
         super().__init__(data=data, variables=variables)
-        if self.input_format == 'wide' and orient in ['h', 'y']:
-            self.plot_data = self.plot_data.rename(columns={'x': 'y', 'y': 'x'}
-                )
+
+        # This method takes care of some bookkeeping that is necessary because the
+        # original categorical plots (prior to the 2021 refactor) had some rules that
+        # don't fit exactly into VectorPlotter logic. It may be wise to have a second
+        # round of refactoring that moves the logic deeper, but this will keep things
+        # relatively sensible for now.
+
+        # For wide data, orient determines assignment to x/y differently from the
+        # default VectorPlotter rules. If we do decide to make orient part of the
+        # _base variable assignment, we'll want to figure out how to express that.
+        if self.input_format == "wide" and orient in ["h", "y"]:
+            self.plot_data = self.plot_data.rename(columns={"x": "y", "y": "x"})
             orig_variables = set(self.variables)
-            orig_x = self.variables.pop('x', None)
-            orig_y = self.variables.pop('y', None)
-            orig_x_type = self.var_types.pop('x', None)
-            orig_y_type = self.var_types.pop('y', None)
-            if 'x' in orig_variables:
-                self.variables['y'] = orig_x
-                self.var_types['y'] = orig_x_type
-            if 'y' in orig_variables:
-                self.variables['x'] = orig_y
-                self.var_types['x'] = orig_y_type
-        if (self.input_format == 'wide' and 'hue' in self.variables and 
-            color is not None):
-            self.plot_data.drop('hue', axis=1)
-            self.variables.pop('hue')
-        self.orient = infer_orient(x=self.plot_data.get('x', None), y=self.
-            plot_data.get('y', None), orient=orient, require_numeric=False)
+            orig_x = self.variables.pop("x", None)
+            orig_y = self.variables.pop("y", None)
+            orig_x_type = self.var_types.pop("x", None)
+            orig_y_type = self.var_types.pop("y", None)
+            if "x" in orig_variables:
+                self.variables["y"] = orig_x
+                self.var_types["y"] = orig_x_type
+            if "y" in orig_variables:
+                self.variables["x"] = orig_y
+                self.var_types["x"] = orig_y_type
+
+        # Initially there was more special code for wide-form data where plots were
+        # multi-colored by default and then either palette or color could be used.
+        # We want to provide backwards compatibility for this behavior in a relatively
+        # simply way, so we delete the hue information when color is specified.
+        if (
+            self.input_format == "wide"
+            and "hue" in self.variables
+            and color is not None
+        ):
+            self.plot_data.drop("hue", axis=1)
+            self.variables.pop("hue")
+
+        # The concept of an "orientation" is important to the original categorical
+        # plots, but there's no provision for it in VectorPlotter, so we need it here.
+        # Note that it could be useful for the other functions in at least two ways
+        # (orienting a univariate distribution plot from long-form data and selecting
+        # the aggregation axis in lineplot), so we may want to eventually refactor it.
+        self.orient = infer_orient(
+            x=self.plot_data.get("x", None),
+            y=self.plot_data.get("y", None),
+            orient=orient,
+            require_numeric=False,
+        )
+
         self.legend = legend
+
+        # Short-circuit in the case of an empty plot
         if not self.has_xy_data:
             return
+
+        # Categorical plots can be "univariate" in which case they get an anonymous
+        # category label on the opposite axis. Note: this duplicates code in the core
+        # scale_categorical function. We need to do it here because of the next line.
         if self.orient not in self.variables:
             self.variables[self.orient] = None
-            self.var_types[self.orient] = 'categorical'
-            self.plot_data[self.orient] = ''
+            self.var_types[self.orient] = "categorical"
+            self.plot_data[self.orient] = ""
+
+        # Categorical variables have discrete levels that we need to track
         cat_levels = categorical_order(self.plot_data[self.orient], order)
         self.var_levels[self.orient] = cat_levels

@@ -70,92 +139,1253 @@ class _CategoricalPlotter(VectorPlotter):
         It can be removed after completion of the work.

         """
-        pass
+        # The original categorical functions applied a palette to the categorical axis
+        # by default. We want to require an explicit hue mapping, to be more consistent
+        # with how things work elsewhere now. I don't think there's any good way to
+        # do this gently -- because it's triggered by the default value of hue=None,
+        # users would always get a warning, unless we introduce some sentinel "default"
+        # argument for this change. That's possible, but asking users to set `hue=None`
+        # on every call is annoying.
+        # We are keeping the logic for implementing the old behavior in with the current
+        # system so that (a) we can punt on that decision and (b) we can ensure that
+        # refactored code passes old tests.
+        default_behavior = color is None or palette is not None
+        if force_hue and "hue" not in self.variables and default_behavior:
+            self._redundant_hue = True
+            self.plot_data["hue"] = self.plot_data[self.orient]
+            self.variables["hue"] = self.variables[self.orient]
+            self.var_types["hue"] = "categorical"
+            hue_order = self.var_levels[self.orient]
+
+            # Because we convert the categorical axis variable to string,
+            # we need to update a dictionary palette too
+            if isinstance(palette, dict):
+                palette = {str(k): v for k, v in palette.items()}
+
+        else:
+            if "hue" in self.variables:
+                redundant = (self.plot_data["hue"] == self.plot_data[self.orient]).all()
+            else:
+                redundant = False
+            self._redundant_hue = redundant
+
+        # Previously, categorical plots had a trick where color= could seed the palette.
+        # Because that's an explicit parameterization, we are going to give it one
+        # release cycle with a warning before removing.
+        if "hue" in self.variables and palette is None and color is not None:
+            if not isinstance(color, str):
+                color = mpl.colors.to_hex(color)
+            palette = f"dark:{color}"
+            msg = (
+                "\n\nSetting a gradient palette using color= is deprecated and will be "
+                f"removed in v0.14.0. Set `palette='{palette}'` for the same effect.\n"
+            )
+            warnings.warn(msg, FutureWarning, stacklevel=3)
+
+        return palette, hue_order

     def _palette_without_hue_backcompat(self, palette, hue_order):
         """Provide one cycle where palette= implies hue= when not provided"""
-        pass
+        if "hue" not in self.variables and palette is not None:
+            msg = (
+                "\n\nPassing `palette` without assigning `hue` is deprecated "
+                f"and will be removed in v0.14.0. Assign the `{self.orient}` variable "
+                "to `hue` and set `legend=False` for the same effect.\n"
+            )
+            warnings.warn(msg, FutureWarning, stacklevel=3)
+
+            self.legend = False
+            self.plot_data["hue"] = self.plot_data[self.orient]
+            self.variables["hue"] = self.variables.get(self.orient)
+            self.var_types["hue"] = self.var_types.get(self.orient)
+
+            hue_order = self.var_levels.get(self.orient)
+            self._var_levels.pop("hue", None)
+
+        return hue_order

     def _point_kwargs_backcompat(self, scale, join, kwargs):
         """Provide two cycles where scale= and join= work, but redirect to kwargs."""
-        pass
+        if scale is not deprecated:
+            lw = mpl.rcParams["lines.linewidth"] * 1.8 * scale
+            mew = lw * .75
+            ms = lw * 2
+
+            msg = (
+                "\n\n"
+                "The `scale` parameter is deprecated and will be removed in v0.15.0. "
+                "You can now control the size of each plot element using matplotlib "
+                "`Line2D` parameters (e.g., `linewidth`, `markersize`, etc.)."
+                "\n"
+            )
+            warnings.warn(msg, stacklevel=3)
+            kwargs.update(linewidth=lw, markeredgewidth=mew, markersize=ms)
+
+        if join is not deprecated:
+            msg = (
+                "\n\n"
+                "The `join` parameter is deprecated and will be removed in v0.15.0."
+            )
+            if not join:
+                msg += (
+                    " You can remove the line between points with `linestyle='none'`."
+                )
+                kwargs.update(linestyle="")
+            msg += "\n"
+            warnings.warn(msg, stacklevel=3)

     def _err_kws_backcompat(self, err_kws, errcolor, errwidth, capsize):
         """Provide two cycles where existing signature-level err_kws are handled."""
-        pass
-
-    def _violin_scale_backcompat(self, scale, scale_hue, density_norm,
-        common_norm):
+        def deprecate_err_param(name, key, val):
+            if val is deprecated:
+                return
+            suggest = f"err_kws={{'{key}': {val!r}}}"
+            msg = (
+                f"\n\nThe `{name}` parameter is deprecated. And will be removed "
+                f"in v0.15.0. Pass `{suggest}` instead.\n"
+            )
+            warnings.warn(msg, FutureWarning, stacklevel=4)
+            err_kws[key] = val
+
+        if errcolor is not None:
+            deprecate_err_param("errcolor", "color", errcolor)
+        deprecate_err_param("errwidth", "linewidth", errwidth)
+
+        if capsize is None:
+            capsize = 0
+            msg = (
+                "\n\nPassing `capsize=None` is deprecated and will be removed "
+                "in v0.15.0. Pass `capsize=0` to disable caps.\n"
+            )
+            warnings.warn(msg, FutureWarning, stacklevel=3)
+
+        return err_kws, capsize
+
+    def _violin_scale_backcompat(self, scale, scale_hue, density_norm, common_norm):
         """Provide two cycles of backcompat for scale kwargs"""
-        pass
+        if scale is not deprecated:
+            density_norm = scale
+            msg = (
+                "\n\nThe `scale` parameter has been renamed and will be removed "
+                f"in v0.15.0. Pass `density_norm={scale!r}` for the same effect."
+            )
+            warnings.warn(msg, FutureWarning, stacklevel=3)
+
+        if scale_hue is not deprecated:
+            common_norm = scale_hue
+            msg = (
+                "\n\nThe `scale_hue` parameter has been replaced and will be removed "
+                f"in v0.15.0. Pass `common_norm={not scale_hue}` for the same effect."
+            )
+            warnings.warn(msg, FutureWarning, stacklevel=3)
+
+        return density_norm, common_norm

     def _violin_bw_backcompat(self, bw, bw_method):
         """Provide two cycles of backcompat for violin bandwidth parameterization."""
-        pass
+        if bw is not deprecated:
+            bw_method = bw
+            msg = dedent(f"""\n
+                The `bw` parameter is deprecated in favor of `bw_method`/`bw_adjust`.
+                Setting `bw_method={bw!r}`, but please see docs for the new parameters
+                and update your code. This will become an error in seaborn v0.15.0.
+            """)
+            warnings.warn(msg, FutureWarning, stacklevel=3)
+        return bw_method

     def _boxen_scale_backcompat(self, scale, width_method):
         """Provide two cycles of backcompat for scale kwargs"""
-        pass
+        if scale is not deprecated:
+            width_method = scale
+            msg = (
+                "\n\nThe `scale` parameter has been renamed to `width_method` and "
+                f"will be removed in v0.15. Pass `width_method={scale!r}"
+            )
+            if scale == "area":
+                msg += ", but note that the result for 'area' will appear different."
+            else:
+                msg += " for the same effect."
+            warnings.warn(msg, FutureWarning, stacklevel=3)
+
+        return width_method

     def _complement_color(self, color, base_color, hue_map):
         """Allow a color to be set automatically using a basis of comparison."""
-        pass
+        if color == "gray":
+            msg = (
+                'Use "auto" to set automatic grayscale colors. From v0.14.0, '
+                '"gray" will default to matplotlib\'s definition.'
+            )
+            warnings.warn(msg, FutureWarning, stacklevel=3)
+            color = "auto"
+        elif color is None or color is default:
+            color = "auto"
+
+        if color != "auto":
+            return color
+
+        if hue_map.lookup_table is None:
+            if base_color is None:
+                return None
+            basis = [mpl.colors.to_rgb(base_color)]
+        else:
+            basis = [mpl.colors.to_rgb(c) for c in hue_map.lookup_table.values()]
+        unique_colors = np.unique(basis, axis=0)
+        light_vals = [rgb_to_hls(*rgb[:3])[1] for rgb in unique_colors]
+        lum = min(light_vals) * .6
+        return (lum, lum, lum)

     def _map_prop_with_hue(self, name, value, fallback, plot_kws):
         """Support pointplot behavior of modifying the marker/linestyle with hue."""
-        pass
+        if value is default:
+            value = plot_kws.pop(name, fallback)
+
+        if "hue" in self.variables:
+            levels = self._hue_map.levels
+            if isinstance(value, list):
+                mapping = {k: v for k, v in zip(levels, value)}
+            else:
+                mapping = {k: value for k in levels}
+        else:
+            mapping = {None: value}
+
+        return mapping

     def _adjust_cat_axis(self, ax, axis):
         """Set ticks and limits for a categorical variable."""
-        pass
+        # Note: in theory, this could happen in _attach for all categorical axes
+        # But two reasons not to do that:
+        # - If it happens before plotting, autoscaling messes up the plot limits
+        # - It would change existing plots from other seaborn functions
+        if self.var_types[axis] != "categorical":
+            return
+
+        # If both x/y data are empty, the correct way to set up the plot is
+        # somewhat undefined; because we don't add null category data to the plot in
+        # this case we don't *have* a categorical axis (yet), so best to just bail.
+        if self.plot_data[axis].empty:
+            return
+
+        # We can infer the total number of categories (including those from previous
+        # plots that are not part of the plot we are currently making) from the number
+        # of ticks, which matplotlib sets up while doing unit conversion. This feels
+        # slightly risky, as if we are relying on something that may be a matplotlib
+        # implementation detail. But I cannot think of a better way to keep track of
+        # the state from previous categorical calls (see GH2516 for context)
+        n = len(getattr(ax, f"get_{axis}ticks")())
+
+        if axis == "x":
+            ax.xaxis.grid(False)
+            ax.set_xlim(-.5, n - .5, auto=None)
+        else:
+            ax.yaxis.grid(False)
+            # Note limits that correspond to previously-inverted y axis
+            ax.set_ylim(n - .5, -.5, auto=None)

     def _dodge_needed(self):
         """Return True when use of `hue` would cause overlaps."""
-        pass
+        groupers = list({self.orient, "col", "row"} & set(self.variables))
+        if "hue" in self.variables:
+            orient = self.plot_data[groupers].value_counts()
+            paired = self.plot_data[[*groupers, "hue"]].value_counts()
+            return orient.size != paired.size
+        return False

     def _dodge(self, keys, data):
         """Apply a dodge transform to coordinates in place."""
-        pass
+        if "hue" not in self.variables:
+            # Short-circuit if hue variable was not assigned
+            # We could potentially warn when hue=None, dodge=True, user may be confused
+            # But I think it's fine to just treat it as a no-op.
+            return
+        hue_idx = self._hue_map.levels.index(keys["hue"])
+        n = len(self._hue_map.levels)
+        data["width"] /= n
+
+        full_width = data["width"] * n
+        offset = data["width"] * hue_idx + data["width"] / 2 - full_width / 2
+        data[self.orient] += offset

-    def _invert_scale(self, ax, data, vars=('x', 'y')):
+    def _invert_scale(self, ax, data, vars=("x", "y")):
         """Undo scaling after computation so data are plotted correctly."""
-        pass
+        for var in vars:
+            _, inv = _get_transform_functions(ax, var[0])
+            if var == self.orient and "width" in data:
+                hw = data["width"] / 2
+                data["edge"] = inv(data[var] - hw)
+                data["width"] = inv(data[var] + hw) - data["edge"].to_numpy()
+            for suf in ["", "min", "max"]:
+                if (col := f"{var}{suf}") in data:
+                    data[col] = inv(data[col])
+
+    def _configure_legend(self, ax, func, common_kws=None, semantic_kws=None):
+        if self.legend == "auto":
+            show_legend = not self._redundant_hue and self.input_format != "wide"
+        else:
+            show_legend = bool(self.legend)
+        if show_legend:
+            self.add_legend_data(ax, func, common_kws, semantic_kws=semantic_kws)
+            handles, _ = ax.get_legend_handles_labels()
+            if handles:
+                ax.legend(title=self.legend_title)

     @property
     def _native_width(self):
         """Return unit of width separating categories on native numeric scale."""
-        pass
+        # Categorical data always have a unit width
+        if self.var_types[self.orient] == "categorical":
+            return 1
+
+        # Otherwise, define the width as the smallest space between observations
+        unique_values = np.unique(self.comp_data[self.orient])
+        if len(unique_values) > 1:
+            native_width = np.nanmin(np.diff(unique_values))
+        else:
+            native_width = 1
+        return native_width

     def _nested_offsets(self, width, dodge):
         """Return offsets for each hue level for dodged plots."""
-        pass
+        offsets = None
+        if "hue" in self.variables and self._hue_map.levels is not None:
+            n_levels = len(self._hue_map.levels)
+            if dodge:
+                each_width = width / n_levels
+                offsets = np.linspace(0, width - each_width, n_levels)
+                offsets -= offsets.mean()
+            else:
+                offsets = np.zeros(n_levels)
+        return offsets
+
+    # Note that the plotting methods here aim (in most cases) to produce the
+    # exact same artists as the original (pre 0.12) version of the code, so
+    # there is some weirdness that might not otherwise be clean or make sense in
+    # this context, such as adding empty artists for combinations of variables
+    # with no observations
+
+    def plot_strips(
+        self,
+        jitter,
+        dodge,
+        color,
+        plot_kws,
+    ):
+
+        width = .8 * self._native_width
+        offsets = self._nested_offsets(width, dodge)
+
+        if jitter is True:
+            jlim = 0.1
+        else:
+            jlim = float(jitter)
+        if "hue" in self.variables and dodge and self._hue_map.levels is not None:
+            jlim /= len(self._hue_map.levels)
+        jlim *= self._native_width
+        jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)
+
+        iter_vars = [self.orient]
+        if dodge:
+            iter_vars.append("hue")
+
+        ax = self.ax
+        dodge_move = jitter_move = 0
+
+        if "marker" in plot_kws and not MarkerStyle(plot_kws["marker"]).is_filled():
+            plot_kws.pop("edgecolor", None)
+
+        for sub_vars, sub_data in self.iter_data(iter_vars,
+                                                 from_comp_data=True,
+                                                 allow_empty=True):
+
+            ax = self._get_axes(sub_vars)
+
+            if offsets is not None and (offsets != 0).any():
+                dodge_move = offsets[sub_data["hue"].map(self._hue_map.levels.index)]
+
+            jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0
+
+            adjusted_data = sub_data[self.orient] + dodge_move + jitter_move
+            sub_data[self.orient] = adjusted_data
+            self._invert_scale(ax, sub_data)
+
+            points = ax.scatter(sub_data["x"], sub_data["y"], color=color, **plot_kws)
+            if "hue" in self.variables:
+                points.set_facecolors(self._hue_map(sub_data["hue"]))
+
+        self._configure_legend(ax, _scatter_legend_artist, common_kws=plot_kws)
+
+    def plot_swarms(
+        self,
+        dodge,
+        color,
+        warn_thresh,
+        plot_kws,
+    ):
+
+        width = .8 * self._native_width
+        offsets = self._nested_offsets(width, dodge)
+
+        iter_vars = [self.orient]
+        if dodge:
+            iter_vars.append("hue")
+
+        ax = self.ax
+        point_collections = {}
+        dodge_move = 0
+
+        if "marker" in plot_kws and not MarkerStyle(plot_kws["marker"]).is_filled():
+            plot_kws.pop("edgecolor", None)
+
+        for sub_vars, sub_data in self.iter_data(iter_vars,
+                                                 from_comp_data=True,
+                                                 allow_empty=True):
+
+            ax = self._get_axes(sub_vars)
+
+            if offsets is not None:
+                dodge_move = offsets[sub_data["hue"].map(self._hue_map.levels.index)]
+
+            if not sub_data.empty:
+                sub_data[self.orient] = sub_data[self.orient] + dodge_move
+
+            self._invert_scale(ax, sub_data)
+
+            points = ax.scatter(sub_data["x"], sub_data["y"], color=color, **plot_kws)
+            if "hue" in self.variables:
+                points.set_facecolors(self._hue_map(sub_data["hue"]))
+
+            if not sub_data.empty:
+                point_collections[(ax, sub_data[self.orient].iloc[0])] = points
+
+        beeswarm = Beeswarm(width=width, orient=self.orient, warn_thresh=warn_thresh)
+        for (ax, center), points in point_collections.items():
+            if points.get_offsets().shape[0] > 1:
+
+                def draw(points, renderer, *, center=center):
+
+                    beeswarm(points, center)
+
+                    if self.orient == "y":
+                        scalex = False
+                        scaley = ax.get_autoscaley_on()
+                    else:
+                        scalex = ax.get_autoscalex_on()
+                        scaley = False
+
+                    # This prevents us from undoing the nice categorical axis limits
+                    # set in _adjust_cat_axis, because that method currently leave
+                    # the autoscale flag in its original setting. It may be better
+                    # to disable autoscaling there to avoid needing to do this.
+                    fixed_scale = self.var_types[self.orient] == "categorical"
+                    ax.update_datalim(points.get_datalim(ax.transData))
+                    if not fixed_scale and (scalex or scaley):
+                        ax.autoscale_view(scalex=scalex, scaley=scaley)
+
+                    super(points.__class__, points).draw(renderer)
+
+                points.draw = draw.__get__(points)
+
+        _draw_figure(ax.figure)
+        self._configure_legend(ax, _scatter_legend_artist, plot_kws)
+
+    def plot_boxes(
+        self,
+        width,
+        dodge,
+        gap,
+        fill,
+        whis,
+        color,
+        linecolor,
+        linewidth,
+        fliersize,
+        plot_kws,  # TODO rename user_kws?
+    ):
+
+        iter_vars = ["hue"]
+        value_var = {"x": "y", "y": "x"}[self.orient]
+
+        def get_props(element, artist=mpl.lines.Line2D):
+            return normalize_kwargs(plot_kws.pop(f"{element}props", {}), artist)
+
+        if not fill and linewidth is None:
+            linewidth = mpl.rcParams["lines.linewidth"]
+        bootstrap = plot_kws.pop("bootstrap", mpl.rcParams["boxplot.bootstrap"])
+        plot_kws.setdefault("shownotches", plot_kws.pop("notch", False))
+
+        box_artist = mpl.patches.Rectangle if fill else mpl.lines.Line2D
+        props = {
+            "box": get_props("box", box_artist),
+            "median": get_props("median"),
+            "whisker": get_props("whisker"),
+            "flier": get_props("flier"),
+            "cap": get_props("cap"),
+        }
+
+        props["median"].setdefault("solid_capstyle", "butt")
+        props["whisker"].setdefault("solid_capstyle", "butt")
+        props["flier"].setdefault("markersize", fliersize)
+
+        ax = self.ax
+
+        for sub_vars, sub_data in self.iter_data(iter_vars,
+                                                 from_comp_data=True,
+                                                 allow_empty=False):
+
+            ax = self._get_axes(sub_vars)
+
+            grouped = sub_data.groupby(self.orient)[value_var]
+            positions = sorted(sub_data[self.orient].unique().astype(float))
+            value_data = [x.to_numpy() for _, x in grouped]
+            stats = pd.DataFrame(mpl.cbook.boxplot_stats(value_data, whis=whis,
+                                                         bootstrap=bootstrap))
+
+            orig_width = width * self._native_width
+            data = pd.DataFrame({self.orient: positions, "width": orig_width})
+            if dodge:
+                self._dodge(sub_vars, data)
+            if gap:
+                data["width"] *= 1 - gap
+            capwidth = plot_kws.get("capwidths", 0.5 * data["width"])
+
+            self._invert_scale(ax, data)
+            _, inv = _get_transform_functions(ax, value_var)
+            for stat in ["mean", "med", "q1", "q3", "cilo", "cihi", "whislo", "whishi"]:
+                stats[stat] = inv(stats[stat])
+            stats["fliers"] = stats["fliers"].map(inv)
+
+            linear_orient_scale = getattr(ax, f"get_{self.orient}scale")() == "linear"
+
+            maincolor = self._hue_map(sub_vars["hue"]) if "hue" in sub_vars else color
+            if fill:
+                boxprops = {
+                    "facecolor": maincolor, "edgecolor": linecolor, **props["box"]
+                }
+                medianprops = {"color": linecolor, **props["median"]}
+                whiskerprops = {"color": linecolor, **props["whisker"]}
+                flierprops = {"markeredgecolor": linecolor, **props["flier"]}
+                capprops = {"color": linecolor, **props["cap"]}
+            else:
+                boxprops = {"color": maincolor, **props["box"]}
+                medianprops = {"color": maincolor, **props["median"]}
+                whiskerprops = {"color": maincolor, **props["whisker"]}
+                flierprops = {"markeredgecolor": maincolor, **props["flier"]}
+                capprops = {"color": maincolor, **props["cap"]}
+
+            if linewidth is not None:
+                for prop_dict in [boxprops, medianprops, whiskerprops, capprops]:
+                    prop_dict.setdefault("linewidth", linewidth)
+
+            default_kws = dict(
+                bxpstats=stats.to_dict("records"),
+                positions=data[self.orient],
+                # Set width to 0 to avoid going out of domain
+                widths=data["width"] if linear_orient_scale else 0,
+                patch_artist=fill,
+                vert=self.orient == "x",
+                manage_ticks=False,
+                boxprops=boxprops,
+                medianprops=medianprops,
+                whiskerprops=whiskerprops,
+                flierprops=flierprops,
+                capprops=capprops,
+                # Added in matplotlib 3.6.0; see below
+                # capwidths=capwidth,
+                **(
+                    {} if _version_predates(mpl, "3.6.0")
+                    else {"capwidths": capwidth}
+                )
+            )
+            boxplot_kws = {**default_kws, **plot_kws}
+            artists = ax.bxp(**boxplot_kws)
+
+            # Reset artist widths after adding so everything stays positive
+            ori_idx = ["x", "y"].index(self.orient)
+
+            if not linear_orient_scale:
+                for i, box in enumerate(data.to_dict("records")):
+                    p0 = box["edge"]
+                    p1 = box["edge"] + box["width"]
+
+                    if artists["boxes"]:
+                        box_artist = artists["boxes"][i]
+                        if fill:
+                            box_verts = box_artist.get_path().vertices.T
+                        else:
+                            box_verts = box_artist.get_data()
+                        box_verts[ori_idx][0] = p0
+                        box_verts[ori_idx][3:] = p0
+                        box_verts[ori_idx][1:3] = p1
+                        if not fill:
+                            # When fill is True, the data get changed in place
+                            box_artist.set_data(box_verts)
+                        ax.update_datalim(
+                            np.transpose(box_verts),
+                            updatex=self.orient == "x",
+                            updatey=self.orient == "y",
+                        )
+
+                    if artists["medians"]:
+                        verts = artists["medians"][i].get_xydata().T
+                        verts[ori_idx][:] = p0, p1
+                        artists["medians"][i].set_data(verts)
+
+                    if artists["caps"]:
+                        f_fwd, f_inv = _get_transform_functions(ax, self.orient)
+                        for line in artists["caps"][2 * i:2 * i + 2]:
+                            p0 = f_inv(f_fwd(box[self.orient]) - capwidth[i] / 2)
+                            p1 = f_inv(f_fwd(box[self.orient]) + capwidth[i] / 2)
+                            verts = line.get_xydata().T
+                            verts[ori_idx][:] = p0, p1
+                            line.set_data(verts)
+
+            ax.add_container(BoxPlotContainer(artists))
+
+        legend_artist = _get_patch_legend_artist(fill)
+        self._configure_legend(ax, legend_artist, boxprops)
+
+    def plot_boxens(
+        self,
+        width,
+        dodge,
+        gap,
+        fill,
+        color,
+        linecolor,
+        linewidth,
+        width_method,
+        k_depth,
+        outlier_prop,
+        trust_alpha,
+        showfliers,
+        box_kws,
+        flier_kws,
+        line_kws,
+        plot_kws,
+    ):
+
+        iter_vars = [self.orient, "hue"]
+        value_var = {"x": "y", "y": "x"}[self.orient]
+
+        estimator = LetterValues(k_depth, outlier_prop, trust_alpha)
+
+        width_method_options = ["exponential", "linear", "area"]
+        _check_argument("width_method", width_method_options, width_method)
+
+        box_kws = plot_kws if box_kws is None else {**plot_kws, **box_kws}
+        flier_kws = {} if flier_kws is None else flier_kws.copy()
+        line_kws = {} if line_kws is None else line_kws.copy()
+
+        if linewidth is None:
+            if fill:
+                linewidth = 0.5 * mpl.rcParams["lines.linewidth"]
+            else:
+                linewidth = mpl.rcParams["lines.linewidth"]
+
+        ax = self.ax
+
+        for sub_vars, sub_data in self.iter_data(iter_vars,
+                                                 from_comp_data=True,
+                                                 allow_empty=False):
+
+            ax = self._get_axes(sub_vars)
+            _, inv_ori = _get_transform_functions(ax, self.orient)
+            _, inv_val = _get_transform_functions(ax, value_var)
+
+            # Statistics
+            lv_data = estimator(sub_data[value_var])
+            n = lv_data["k"] * 2 - 1
+            vals = lv_data["values"]
+
+            pos_data = pd.DataFrame({
+                self.orient: [sub_vars[self.orient]],
+                "width": [width * self._native_width],
+            })
+            if dodge:
+                self._dodge(sub_vars, pos_data)
+            if gap:
+                pos_data["width"] *= 1 - gap
+
+            # Letter-value boxes
+            levels = lv_data["levels"]
+            exponent = (levels - 1 - lv_data["k"]).astype(float)
+            if width_method == "linear":
+                rel_widths = levels + 1
+            elif width_method == "exponential":
+                rel_widths = 2 ** exponent
+            elif width_method == "area":
+                tails = levels < (lv_data["k"] - 1)
+                rel_widths = 2 ** (exponent - tails) / np.diff(lv_data["values"])
+
+            center = pos_data[self.orient].item()
+            widths = rel_widths / rel_widths.max() * pos_data["width"].item()
+
+            box_vals = inv_val(vals)
+            box_pos = inv_ori(center - widths / 2)
+            box_heights = inv_val(vals[1:]) - inv_val(vals[:-1])
+            box_widths = inv_ori(center + widths / 2) - inv_ori(center - widths / 2)
+
+            maincolor = self._hue_map(sub_vars["hue"]) if "hue" in sub_vars else color
+            flier_colors = {
+                "facecolor": "none", "edgecolor": ".45" if fill else maincolor
+            }
+            if fill:
+                cmap = light_palette(maincolor, as_cmap=True)
+                boxcolors = cmap(2 ** ((exponent + 2) / 3))
+            else:
+                boxcolors = maincolor
+
+            boxen = []
+            for i in range(n):
+                if self.orient == "x":
+                    xy = (box_pos[i], box_vals[i])
+                    w, h = (box_widths[i], box_heights[i])
+                else:
+                    xy = (box_vals[i], box_pos[i])
+                    w, h = (box_heights[i], box_widths[i])
+                boxen.append(Rectangle(xy, w, h))
+
+            if fill:
+                box_colors = {"facecolors": boxcolors, "edgecolors": linecolor}
+            else:
+                box_colors = {"facecolors": "none", "edgecolors": boxcolors}
+
+            collection_kws = {**box_colors, "linewidth": linewidth, **box_kws}
+            ax.add_collection(PatchCollection(boxen, **collection_kws), autolim=False)
+            ax.update_datalim(
+                np.column_stack([box_vals, box_vals]),
+                updatex=self.orient == "y",
+                updatey=self.orient == "x",
+            )
+
+            # Median line
+            med = lv_data["median"]
+            hw = pos_data["width"].item() / 2
+            if self.orient == "x":
+                x, y = inv_ori([center - hw, center + hw]), inv_val([med, med])
+            else:
+                x, y = inv_val([med, med]), inv_ori([center - hw, center + hw])
+            default_kws = {
+                "color": linecolor if fill else maincolor,
+                "solid_capstyle": "butt",
+                "linewidth": 1.25 * linewidth,
+            }
+            ax.plot(x, y, **{**default_kws, **line_kws})
+
+            # Outliers ("fliers")
+            if showfliers:
+                vals = inv_val(lv_data["fliers"])
+                pos = np.full(len(vals), inv_ori(pos_data[self.orient].item()))
+                x, y = (pos, vals) if self.orient == "x" else (vals, pos)
+                ax.scatter(x, y, **{**flier_colors, "s": 25, **flier_kws})
+
+        ax.autoscale_view(scalex=self.orient == "y", scaley=self.orient == "x")
+
+        legend_artist = _get_patch_legend_artist(fill)
+        common_kws = {**box_kws, "linewidth": linewidth, "edgecolor": linecolor}
+        self._configure_legend(ax, legend_artist, common_kws)
+
+    def plot_violins(
+        self,
+        width,
+        dodge,
+        gap,
+        split,
+        color,
+        fill,
+        linecolor,
+        linewidth,
+        inner,
+        density_norm,
+        common_norm,
+        kde_kws,
+        inner_kws,
+        plot_kws,
+    ):
+
+        iter_vars = [self.orient, "hue"]
+        value_var = {"x": "y", "y": "x"}[self.orient]
+
+        inner_options = ["box", "quart", "stick", "point", None]
+        _check_argument("inner", inner_options, inner, prefix=True)
+        _check_argument("density_norm", ["area", "count", "width"], density_norm)
+
+        if linewidth is None:
+            if fill:
+                linewidth = 1.25 * mpl.rcParams["patch.linewidth"]
+            else:
+                linewidth = mpl.rcParams["lines.linewidth"]
+
+        if inner is not None and inner.startswith("box"):
+            box_width = inner_kws.pop("box_width", linewidth * 4.5)
+            whis_width = inner_kws.pop("whis_width", box_width / 3)
+            marker = inner_kws.pop("marker", "_" if self.orient == "x" else "|")
+
+        kde = KDE(**kde_kws)
+        ax = self.ax
+        violin_data = []
+
+        # Iterate through all the data splits once to compute the KDEs
+        for sub_vars, sub_data in self.iter_data(iter_vars,
+                                                 from_comp_data=True,
+                                                 allow_empty=False):
+
+            sub_data["weight"] = sub_data.get("weights", 1)
+            stat_data = kde._transform(sub_data, value_var, [])
+
+            maincolor = self._hue_map(sub_vars["hue"]) if "hue" in sub_vars else color
+            if not fill:
+                linecolor = maincolor
+                maincolor = "none"
+            default_kws = dict(
+                facecolor=maincolor,
+                edgecolor=linecolor,
+                linewidth=linewidth,
+            )
+
+            violin_data.append({
+                "position": sub_vars[self.orient],
+                "observations": sub_data[value_var],
+                "density": stat_data["density"],
+                "support": stat_data[value_var],
+                "kwargs": {**default_kws, **plot_kws},
+                "sub_vars": sub_vars,
+                "ax": self._get_axes(sub_vars),
+            })
+
+        # Once we've computed all the KDEs, get statistics for normalization
+        def vars_to_key(sub_vars):
+            return tuple((k, v) for k, v in sub_vars.items() if k != self.orient)
+
+        norm_keys = [vars_to_key(violin["sub_vars"]) for violin in violin_data]
+        if common_norm:
+            common_max_density = np.nanmax([v["density"].max() for v in violin_data])
+            common_max_count = np.nanmax([len(v["observations"]) for v in violin_data])
+            max_density = {key: common_max_density for key in norm_keys}
+            max_count = {key: common_max_count for key in norm_keys}
+        else:
+            with warnings.catch_warnings():
+                # Ignore warning when all violins are singular; it's not important
+                warnings.filterwarnings('ignore', "All-NaN (slice|axis) encountered")
+                max_density = {
+                    key: np.nanmax([
+                        v["density"].max() for v in violin_data
+                        if vars_to_key(v["sub_vars"]) == key
+                    ]) for key in norm_keys
+                }
+            max_count = {
+                key: np.nanmax([
+                    len(v["observations"]) for v in violin_data
+                    if vars_to_key(v["sub_vars"]) == key
+                ]) for key in norm_keys
+            }
+
+        real_width = width * self._native_width
+
+        # Now iterate through the violins again to apply the normalization and plot
+        for violin in violin_data:
+
+            index = pd.RangeIndex(0, max(len(violin["support"]), 1))
+            data = pd.DataFrame({
+                self.orient: violin["position"],
+                value_var: violin["support"],
+                "density": violin["density"],
+                "width": real_width,
+            }, index=index)
+
+            if dodge:
+                self._dodge(violin["sub_vars"], data)
+            if gap:
+                data["width"] *= 1 - gap
+
+            # Normalize the density across the distribution(s) and relative to the width
+            norm_key = vars_to_key(violin["sub_vars"])
+            hw = data["width"] / 2
+            peak_density = violin["density"].max()
+            if np.isnan(peak_density):
+                span = 1
+            elif density_norm == "area":
+                span = data["density"] / max_density[norm_key]
+            elif density_norm == "count":
+                count = len(violin["observations"])
+                span = data["density"] / peak_density * (count / max_count[norm_key])
+            elif density_norm == "width":
+                span = data["density"] / peak_density
+            span = span * hw * (2 if split else 1)
+
+            # Handle split violins (i.e. asymmetric spans)
+            right_side = (
+                0 if "hue" not in self.variables
+                else self._hue_map.levels.index(violin["sub_vars"]["hue"]) % 2
+            )
+            if split:
+                offsets = (hw, span - hw) if right_side else (span - hw, hw)
+            else:
+                offsets = span, span
+
+            ax = violin["ax"]
+            _, invx = _get_transform_functions(ax, "x")
+            _, invy = _get_transform_functions(ax, "y")
+            inv_pos = {"x": invx, "y": invy}[self.orient]
+            inv_val = {"x": invx, "y": invy}[value_var]
+
+            linecolor = violin["kwargs"]["edgecolor"]
+
+            # Handle singular datasets (one or more observations with no variance
+            if np.isnan(peak_density):
+                pos = data[self.orient].iloc[0]
+                val = violin["observations"].mean()
+                if self.orient == "x":
+                    x, y = [pos - offsets[0], pos + offsets[1]], [val, val]
+                else:
+                    x, y = [val, val], [pos - offsets[0], pos + offsets[1]]
+                ax.plot(invx(x), invy(y), color=linecolor, linewidth=linewidth)
+                continue
+
+            # Plot the main violin body
+            plot_func = {"x": ax.fill_betweenx, "y": ax.fill_between}[self.orient]
+            plot_func(
+                inv_val(data[value_var]),
+                inv_pos(data[self.orient] - offsets[0]),
+                inv_pos(data[self.orient] + offsets[1]),
+                **violin["kwargs"]
+            )
+
+            # Adjust the observation data
+            obs = violin["observations"]
+            pos_dict = {self.orient: violin["position"], "width": real_width}
+            if dodge:
+                self._dodge(violin["sub_vars"], pos_dict)
+            if gap:
+                pos_dict["width"] *= (1 - gap)
+
+            # --- Plot the inner components
+            if inner is None:
+                continue
+
+            elif inner.startswith("point"):
+                pos = np.array([pos_dict[self.orient]] * len(obs))
+                if split:
+                    pos += (-1 if right_side else 1) * pos_dict["width"] / 2
+                x, y = (pos, obs) if self.orient == "x" else (obs, pos)
+                kws = {
+                    "color": linecolor,
+                    "edgecolor": linecolor,
+                    "s": (linewidth * 2) ** 2,
+                    "zorder": violin["kwargs"].get("zorder", 2) + 1,
+                    **inner_kws,
+                }
+                ax.scatter(invx(x), invy(y), **kws)
+
+            elif inner.startswith("stick"):
+                pos0 = np.interp(obs, data[value_var], data[self.orient] - offsets[0])
+                pos1 = np.interp(obs, data[value_var], data[self.orient] + offsets[1])
+                pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])
+                val_pts = np.stack([inv_val(obs), inv_val(obs)])
+                segments = np.stack([pos_pts, val_pts]).transpose(2, 1, 0)
+                if self.orient == "y":
+                    segments = segments[:, :, ::-1]
+                kws = {
+                    "color": linecolor,
+                    "linewidth": linewidth / 2,
+                    **inner_kws,
+                }
+                lines = mpl.collections.LineCollection(segments, **kws)
+                ax.add_collection(lines, autolim=False)
+
+            elif inner.startswith("quart"):
+                stats = np.percentile(obs, [25, 50, 75])
+                pos0 = np.interp(stats, data[value_var], data[self.orient] - offsets[0])
+                pos1 = np.interp(stats, data[value_var], data[self.orient] + offsets[1])
+                pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])
+                val_pts = np.stack([inv_val(stats), inv_val(stats)])
+                segments = np.stack([pos_pts, val_pts]).transpose(2, 0, 1)
+                if self.orient == "y":
+                    segments = segments[:, ::-1, :]
+                dashes = [(1.25, .75), (2.5, 1), (1.25, .75)]
+                for i, segment in enumerate(segments):
+                    kws = {
+                        "color": linecolor,
+                        "linewidth": linewidth,
+                        "dashes": dashes[i],
+                        **inner_kws,
+                    }
+                    ax.plot(*segment, **kws)
+
+            elif inner.startswith("box"):
+                stats = mpl.cbook.boxplot_stats(obs)[0]
+                pos = np.array(pos_dict[self.orient])
+                if split:
+                    pos += (-1 if right_side else 1) * pos_dict["width"] / 2
+                pos = [pos, pos], [pos, pos], [pos]
+                val = (
+                    [stats["whislo"], stats["whishi"]],
+                    [stats["q1"], stats["q3"]],
+                    [stats["med"]]
+                )
+                if self.orient == "x":
+                    (x0, x1, x2), (y0, y1, y2) = pos, val
+                else:
+                    (x0, x1, x2), (y0, y1, y2) = val, pos
+
+                if split:
+                    offset = (1 if right_side else -1) * box_width / 72 / 2
+                    dx, dy = (offset, 0) if self.orient == "x" else (0, -offset)
+                    trans = ax.transData + mpl.transforms.ScaledTranslation(
+                        dx, dy, ax.figure.dpi_scale_trans,
+                    )
+                else:
+                    trans = ax.transData
+                line_kws = {
+                    "color": linecolor,
+                    "transform": trans,
+                    **inner_kws,
+                    "linewidth": whis_width,
+                }
+                ax.plot(invx(x0), invy(y0), **line_kws)
+                line_kws["linewidth"] = box_width
+                ax.plot(invx(x1), invy(y1), **line_kws)
+                dot_kws = {
+                    "marker": marker,
+                    "markersize": box_width / 1.2,
+                    "markeredgewidth": box_width / 5,
+                    "transform": trans,
+                    **inner_kws,
+                    "markeredgecolor": "w",
+                    "markerfacecolor": "w",
+                    "color": linecolor,  # simplify tests
+                }
+                ax.plot(invx(x2), invy(y2), **dot_kws)
+
+        legend_artist = _get_patch_legend_artist(fill)
+        common_kws = {**plot_kws, "linewidth": linewidth, "edgecolor": linecolor}
+        self._configure_legend(ax, legend_artist, common_kws)
+
+    def plot_points(
+        self,
+        aggregator,
+        markers,
+        linestyles,
+        dodge,
+        color,
+        capsize,
+        err_kws,
+        plot_kws,
+    ):
+
+        agg_var = {"x": "y", "y": "x"}[self.orient]
+        iter_vars = ["hue"]
+
+        plot_kws = normalize_kwargs(plot_kws, mpl.lines.Line2D)
+        plot_kws.setdefault("linewidth", mpl.rcParams["lines.linewidth"] * 1.8)
+        plot_kws.setdefault("markeredgewidth", plot_kws["linewidth"] * 0.75)
+        plot_kws.setdefault("markersize", plot_kws["linewidth"] * np.sqrt(2 * np.pi))
+
+        markers = self._map_prop_with_hue("marker", markers, "o", plot_kws)
+        linestyles = self._map_prop_with_hue("linestyle", linestyles, "-", plot_kws)
+
+        base_positions = self.var_levels[self.orient]
+        if self.var_types[self.orient] == "categorical":
+            min_cat_val = int(self.comp_data[self.orient].min())
+            max_cat_val = int(self.comp_data[self.orient].max())
+            base_positions = [i for i in range(min_cat_val, max_cat_val + 1)]
+
+        n_hue_levels = 0 if self._hue_map.levels is None else len(self._hue_map.levels)
+        if dodge is True:
+            dodge = .025 * n_hue_levels
+
+        ax = self.ax
+
+        for sub_vars, sub_data in self.iter_data(iter_vars,
+                                                 from_comp_data=True,
+                                                 allow_empty=True):
+
+            ax = self._get_axes(sub_vars)
+
+            ori_axis = getattr(ax, f"{self.orient}axis")
+            transform, _ = _get_transform_functions(ax, self.orient)
+            positions = transform(ori_axis.convert_units(base_positions))
+            agg_data = sub_data if sub_data.empty else (
+                sub_data
+                .groupby(self.orient)
+                .apply(aggregator, agg_var, **groupby_apply_include_groups(False))
+                .reindex(pd.Index(positions, name=self.orient))
+                .reset_index()
+            )
+
+            if dodge:
+                hue_idx = self._hue_map.levels.index(sub_vars["hue"])
+                step_size = dodge / (n_hue_levels - 1)
+                offset = -dodge / 2 + step_size * hue_idx
+                agg_data[self.orient] += offset * self._native_width
+
+            self._invert_scale(ax, agg_data)
+
+            sub_kws = plot_kws.copy()
+            sub_kws.update(
+                marker=markers[sub_vars.get("hue")],
+                linestyle=linestyles[sub_vars.get("hue")],
+                color=self._hue_map(sub_vars["hue"]) if "hue" in sub_vars else color,
+            )
+
+            line, = ax.plot(agg_data["x"], agg_data["y"], **sub_kws)
+
+            sub_err_kws = err_kws.copy()
+            line_props = line.properties()
+            for prop in ["color", "linewidth", "alpha", "zorder"]:
+                sub_err_kws.setdefault(prop, line_props[prop])
+            if aggregator.error_method is not None:
+                self.plot_errorbars(ax, agg_data, capsize, sub_err_kws)
+
+        legend_artist = partial(mpl.lines.Line2D, [], [])
+        semantic_kws = {"hue": {"marker": markers, "linestyle": linestyles}}
+        self._configure_legend(ax, legend_artist, sub_kws, semantic_kws)
+
+    def plot_bars(
+        self,
+        aggregator,
+        dodge,
+        gap,
+        width,
+        fill,
+        color,
+        capsize,
+        err_kws,
+        plot_kws,
+    ):
+
+        agg_var = {"x": "y", "y": "x"}[self.orient]
+        iter_vars = ["hue"]
+
+        ax = self.ax
+
+        if self._hue_map.levels is None:
+            dodge = False
+
+        if dodge and capsize is not None:
+            capsize = capsize / len(self._hue_map.levels)
+
+        if not fill:
+            plot_kws.setdefault("linewidth", 1.5 * mpl.rcParams["lines.linewidth"])
+
+        err_kws.setdefault("linewidth", 1.5 * mpl.rcParams["lines.linewidth"])
+
+        for sub_vars, sub_data in self.iter_data(iter_vars,
+                                                 from_comp_data=True,
+                                                 allow_empty=True):
+
+            ax = self._get_axes(sub_vars)
+
+            agg_data = sub_data if sub_data.empty else (
+                sub_data
+                .groupby(self.orient)
+                .apply(aggregator, agg_var, **groupby_apply_include_groups(False))
+                .reset_index()
+            )
+
+            agg_data["width"] = width * self._native_width
+            if dodge:
+                self._dodge(sub_vars, agg_data)
+            if gap:
+                agg_data["width"] *= 1 - gap
+
+            agg_data["edge"] = agg_data[self.orient] - agg_data["width"] / 2
+            self._invert_scale(ax, agg_data)
+
+            if self.orient == "x":
+                bar_func = ax.bar
+                kws = dict(
+                    x=agg_data["edge"], height=agg_data["y"], width=agg_data["width"]
+                )
+            else:
+                bar_func = ax.barh
+                kws = dict(
+                    y=agg_data["edge"], width=agg_data["x"], height=agg_data["width"]
+                )
+
+            main_color = self._hue_map(sub_vars["hue"]) if "hue" in sub_vars else color
+
+            # Set both color and facecolor for property cycle logic
+            kws["align"] = "edge"
+            if fill:
+                kws.update(color=main_color, facecolor=main_color)
+            else:
+                kws.update(color=main_color, edgecolor=main_color, facecolor="none")
+
+            bar_func(**{**kws, **plot_kws})
+
+            if aggregator.error_method is not None:
+                self.plot_errorbars(
+                    ax, agg_data, capsize,
+                    {"color": ".26" if fill else main_color, **err_kws}
+                )
+
+        legend_artist = _get_patch_legend_artist(fill)
+        self._configure_legend(ax, legend_artist, plot_kws)
+
+    def plot_errorbars(self, ax, data, capsize, err_kws):
+
+        var = {"x": "y", "y": "x"}[self.orient]
+        for row in data.to_dict("records"):
+
+            row = dict(row)
+            pos = np.array([row[self.orient], row[self.orient]])
+            val = np.array([row[f"{var}min"], row[f"{var}max"]])
+
+            if capsize:
+
+                cw = capsize * self._native_width / 2
+                scl, inv = _get_transform_functions(ax, self.orient)
+                cap = inv(scl(pos[0]) - cw), inv(scl(pos[1]) + cw)
+
+                pos = np.concatenate([
+                    [*cap, np.nan], pos, [np.nan, *cap]
+                ])
+                val = np.concatenate([
+                    [val[0], val[0], np.nan], val, [np.nan, val[-1], val[-1]],
+                ])
+
+            if self.orient == "x":
+                args = pos, val
+            else:
+                args = val, pos
+            ax.plot(*args, **err_kws)


 class _CategoricalAggPlotter(_CategoricalPlotter):
-    flat_structure = {'x': '@index', 'y': '@values'}

+    flat_structure = {"x": "@index", "y": "@values"}
+
+
+_categorical_docs = dict(

-_categorical_docs = dict(categorical_narrative=dedent(
-    """    See the :ref:`tutorial <categorical_tutorial>` for more information.
+    # Shared narrative docs
+    categorical_narrative=dedent("""\
+    See the :ref:`tutorial <categorical_tutorial>` for more information.

     .. note::
         By default, this function treats one of the variables as categorical
         and draws data at ordinal positions (0, 1, ... n) on the relevant axis.
         As of version 0.13.0, this can be disabled by setting `native_scale=True`.
-    """
-    ), input_params=dedent(
-    """    x, y, hue : names of variables in `data` or vector data
-        Inputs for plotting long-form data. See examples for interpretation.    """
-    ), categorical_data=dedent(
-    """    data : DataFrame, Series, dict, array, or list of arrays
+    """),
+
+    # Shared function parameters
+    input_params=dedent("""\
+    x, y, hue : names of variables in `data` or vector data
+        Inputs for plotting long-form data. See examples for interpretation.\
+    """),
+    categorical_data=dedent("""\
+    data : DataFrame, Series, dict, array, or list of arrays
         Dataset for plotting. If `x` and `y` are absent, this is
-        interpreted as wide-form. Otherwise it is expected to be long-form.    """
-    ), order_vars=dedent(
-    """    order, hue_order : lists of strings
+        interpreted as wide-form. Otherwise it is expected to be long-form.\
+    """),
+    order_vars=dedent("""\
+    order, hue_order : lists of strings
         Order to plot the categorical levels in; otherwise the levels are
-        inferred from the data objects.    """
-    ), stat_api_params=dedent(
-    """    estimator : string or callable that maps vector -> scalar
+        inferred from the data objects.\
+    """),
+    stat_api_params=dedent("""\
+    estimator : string or callable that maps vector -> scalar
         Statistical function to estimate within each categorical bin.
     errorbar : string, (string, number) tuple, callable or None
         Name of errorbar method (either "ci", "pi", "se", or "sd"), or a tuple
@@ -175,150 +1405,253 @@ _categorical_docs = dict(categorical_narrative=dedent(
         Data values or column used to compute weighted statistics.
         Note that the use of weights may limit other statistical options.

-        .. versionadded:: v0.13.1    """
-    ), ci=dedent(
-    """    ci : float
+        .. versionadded:: v0.13.1\
+    """),
+    ci=dedent("""\
+    ci : float
         Level of the confidence interval to show, in [0, 100].

         .. deprecated:: v0.12.0
-            Use `errorbar=("ci", ...)`.    """
-    ), orient=dedent(
-    """    orient : "v" | "h" | "x" | "y"
+            Use `errorbar=("ci", ...)`.\
+    """),
+    orient=dedent("""\
+    orient : "v" | "h" | "x" | "y"
         Orientation of the plot (vertical or horizontal). This is usually
         inferred based on the type of the input variables, but it can be used
         to resolve ambiguity when both `x` and `y` are numeric or when
         plotting wide-form data.

         .. versionchanged:: v0.13.0
-            Added 'x'/'y' as options, equivalent to 'v'/'h'.    """
-    ), color=dedent(
-    """    color : matplotlib color
-        Single color for the elements in the plot.    """
-    ), palette=dedent(
-    """    palette : palette name, list, dict, or :class:`matplotlib.colors.Colormap`
+            Added 'x'/'y' as options, equivalent to 'v'/'h'.\
+    """),
+    color=dedent("""\
+    color : matplotlib color
+        Single color for the elements in the plot.\
+    """),
+    palette=dedent("""\
+    palette : palette name, list, dict, or :class:`matplotlib.colors.Colormap`
         Color palette that maps the hue variable. If the palette is a dictionary,
         keys should be names of levels and values should be matplotlib colors.
-        The type/value will sometimes force a qualitative/quantitative mapping.    """
-    ), hue_norm=dedent(
-    """    hue_norm : tuple or :class:`matplotlib.colors.Normalize` object
+        The type/value will sometimes force a qualitative/quantitative mapping.\
+    """),
+    hue_norm=dedent("""\
+    hue_norm : tuple or :class:`matplotlib.colors.Normalize` object
         Normalization in data units for colormap applied to the `hue`
         variable when it is numeric. Not relevant if `hue` is categorical.

-        .. versionadded:: v0.12.0    """
-    ), saturation=dedent(
-    """    saturation : float
+        .. versionadded:: v0.12.0\
+    """),
+    saturation=dedent("""\
+    saturation : float
         Proportion of the original saturation to draw fill colors in. Large
         patches often look better with desaturated colors, but set this to
-        `1` if you want the colors to perfectly match the input values.    """
-    ), capsize=dedent(
-    """    capsize : float
-        Width of the "caps" on error bars, relative to bar spacing.    """
-    ), errcolor=dedent(
-    """    errcolor : matplotlib color
+        `1` if you want the colors to perfectly match the input values.\
+    """),
+    capsize=dedent("""\
+    capsize : float
+        Width of the "caps" on error bars, relative to bar spacing.\
+    """),
+    errcolor=dedent("""\
+    errcolor : matplotlib color
         Color used for the error bar lines.

         .. deprecated:: 0.13.0
-            Use `err_kws={'color': ...}`.    """
-    ), errwidth=dedent(
-    """    errwidth : float
+            Use `err_kws={'color': ...}`.\
+    """),
+    errwidth=dedent("""\
+    errwidth : float
         Thickness of error bar lines (and caps), in points.

         .. deprecated:: 0.13.0
-            Use `err_kws={'linewidth': ...}`.    """
-    ), fill=dedent(
-    """    fill : bool
+            Use `err_kws={'linewidth': ...}`.\
+    """),
+    fill=dedent("""\
+    fill : bool
         If True, use a solid patch. Otherwise, draw as line art.

-        .. versionadded:: v0.13.0    """
-    ), gap=dedent(
-    """    gap : float
+        .. versionadded:: v0.13.0\
+    """),
+    gap=dedent("""\
+    gap : float
         Shrink on the orient axis by this factor to add a gap between dodged elements.

-        .. versionadded:: 0.13.0    """
-    ), width=dedent(
-    """    width : float
+        .. versionadded:: 0.13.0\
+    """),
+    width=dedent("""\
+    width : float
         Width allotted to each element on the orient axis. When `native_scale=True`,
-        it is relative to the minimum distance between two values in the native scale.    """
-    ), dodge=dedent(
-    """    dodge : "auto" or bool
+        it is relative to the minimum distance between two values in the native scale.\
+    """),
+    dodge=dedent("""\
+    dodge : "auto" or bool
         When hue mapping is used, whether elements should be narrowed and shifted along
         the orient axis to eliminate overlap. If `"auto"`, set to `True` when the
         orient variable is crossed with the categorical variable or `False` otherwise.

         .. versionchanged:: 0.13.0

-            Added `"auto"` mode as a new default.    """
-    ), linewidth=dedent(
-    """    linewidth : float
-        Width of the lines that frame the plot elements.    """
-    ), linecolor=dedent(
-    """    linecolor : color
+            Added `"auto"` mode as a new default.\
+    """),
+    linewidth=dedent("""\
+    linewidth : float
+        Width of the lines that frame the plot elements.\
+    """),
+    linecolor=dedent("""\
+    linecolor : color
         Color to use for line elements, when `fill` is True.

-        .. versionadded:: v0.13.0    """
-    ), log_scale=dedent(
-    """    log_scale : bool or number, or pair of bools or numbers
+        .. versionadded:: v0.13.0\
+    """),
+    log_scale=dedent("""\
+    log_scale : bool or number, or pair of bools or numbers
         Set axis scale(s) to log. A single value sets the data axis for any numeric
         axes in the plot. A pair of values sets each axis independently.
         Numeric values are interpreted as the desired base (default 10).
         When `None` or `False`, seaborn defers to the existing Axes scale.

-        .. versionadded:: v0.13.0    """
-    ), native_scale=dedent(
-    """    native_scale : bool
+        .. versionadded:: v0.13.0\
+    """),
+    native_scale=dedent("""\
+    native_scale : bool
         When True, numeric or datetime values on the categorical axis will maintain
         their original scaling rather than being converted to fixed indices.

-        .. versionadded:: v0.13.0    """
-    ), formatter=dedent(
-    """    formatter : callable
+        .. versionadded:: v0.13.0\
+    """),
+    formatter=dedent("""\
+    formatter : callable
         Function for converting categorical data into strings. Affects both grouping
         and tick labels.

-        .. versionadded:: v0.13.0    """
-    ), legend=dedent(
-    """    legend : "auto", "brief", "full", or False
+        .. versionadded:: v0.13.0\
+    """),
+    legend=dedent("""\
+    legend : "auto", "brief", "full", or False
         How to draw the legend. If "brief", numeric `hue` and `size`
         variables will be represented with a sample of evenly spaced values.
         If "full", every group will get an entry in the legend. If "auto",
         choose between brief or full representation based on number of levels.
         If `False`, no legend data is added and no legend is drawn.

-        .. versionadded:: v0.13.0    """
-    ), err_kws=dedent(
-    """    err_kws : dict
+        .. versionadded:: v0.13.0\
+    """),
+    err_kws=dedent("""\
+    err_kws : dict
         Parameters of :class:`matplotlib.lines.Line2D`, for the error bar artists.

-        .. versionadded:: v0.13.0    """
-    ), ax_in=dedent(
-    """    ax : matplotlib Axes
-        Axes object to draw the plot onto, otherwise uses the current Axes.    """
-    ), ax_out=dedent(
-    """    ax : matplotlib Axes
-        Returns the Axes object with the plot drawn onto it.    """
-    ), boxplot=dedent(
-    '    boxplot : A traditional box-and-whisker plot with a similar API.    '
-    ), violinplot=dedent(
-    '    violinplot : A combination of boxplot and kernel density estimation.    '
-    ), stripplot=dedent(
-    """    stripplot : A scatterplot where one variable is categorical. Can be used
-                in conjunction with other plots to show each observation.    """
-    ), swarmplot=dedent(
-    """    swarmplot : A categorical scatterplot where the points do not overlap. Can
-                be used with other plots to show each observation.    """
-    ), barplot=dedent(
-    '    barplot : Show point estimates and confidence intervals using bars.    '
-    ), countplot=dedent(
-    '    countplot : Show the counts of observations in each categorical bin.    '
-    ), pointplot=dedent(
-    '    pointplot : Show point estimates and confidence intervals using dots.    '
-    ), catplot=dedent(
-    '    catplot : Combine a categorical plot with a :class:`FacetGrid`.    '
-    ), boxenplot=dedent(
-    '    boxenplot : An enhanced boxplot for larger datasets.    '))
+        .. versionadded:: v0.13.0\
+    """),
+    ax_in=dedent("""\
+    ax : matplotlib Axes
+        Axes object to draw the plot onto, otherwise uses the current Axes.\
+    """),
+    ax_out=dedent("""\
+    ax : matplotlib Axes
+        Returns the Axes object with the plot drawn onto it.\
+    """),
+
+    # Shared see also
+    boxplot=dedent("""\
+    boxplot : A traditional box-and-whisker plot with a similar API.\
+    """),
+    violinplot=dedent("""\
+    violinplot : A combination of boxplot and kernel density estimation.\
+    """),
+    stripplot=dedent("""\
+    stripplot : A scatterplot where one variable is categorical. Can be used
+                in conjunction with other plots to show each observation.\
+    """),
+    swarmplot=dedent("""\
+    swarmplot : A categorical scatterplot where the points do not overlap. Can
+                be used with other plots to show each observation.\
+    """),
+    barplot=dedent("""\
+    barplot : Show point estimates and confidence intervals using bars.\
+    """),
+    countplot=dedent("""\
+    countplot : Show the counts of observations in each categorical bin.\
+    """),
+    pointplot=dedent("""\
+    pointplot : Show point estimates and confidence intervals using dots.\
+    """),
+    catplot=dedent("""\
+    catplot : Combine a categorical plot with a :class:`FacetGrid`.\
+    """),
+    boxenplot=dedent("""\
+    boxenplot : An enhanced boxplot for larger datasets.\
+    """),
+
+)
+
 _categorical_docs.update(_facet_docs)
-boxplot.__doc__ = dedent(
-    """    Draw a box plot to show distributions with respect to categories.
+
+
+def boxplot(
+    data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,
+    orient=None, color=None, palette=None, saturation=.75, fill=True,
+    dodge="auto", width=.8, gap=0, whis=1.5, linecolor="auto", linewidth=None,
+    fliersize=None, hue_norm=None, native_scale=False, log_scale=None, formatter=None,
+    legend="auto", ax=None, **kwargs
+):
+
+    p = _CategoricalPlotter(
+        data=data,
+        variables=dict(x=x, y=y, hue=hue),
+        order=order,
+        orient=orient,
+        color=color,
+        legend=legend,
+    )
+
+    if ax is None:
+        ax = plt.gca()
+
+    if p.plot_data.empty:
+        return ax
+
+    if dodge == "auto":
+        # Needs to be before scale_categorical changes the coordinate series dtype
+        dodge = p._dodge_needed()
+
+    if p.var_types.get(p.orient) == "categorical" or not native_scale:
+        p.scale_categorical(p.orient, order=order, formatter=formatter)
+
+    p._attach(ax, log_scale=log_scale)
+
+    # Deprecations to remove in v0.14.0.
+    hue_order = p._palette_without_hue_backcompat(palette, hue_order)
+    palette, hue_order = p._hue_backcompat(color, palette, hue_order)
+
+    saturation = saturation if fill else 1
+    p.map_hue(palette=palette, order=hue_order, norm=hue_norm, saturation=saturation)
+    color = _default_color(
+        ax.fill_between, hue, color,
+        {k: v for k, v in kwargs.items() if k in ["c", "color", "fc", "facecolor"]},
+        saturation=saturation,
+    )
+    linecolor = p._complement_color(linecolor, color, p._hue_map)
+
+    p.plot_boxes(
+        width=width,
+        dodge=dodge,
+        gap=gap,
+        fill=fill,
+        whis=whis,
+        color=color,
+        linecolor=linecolor,
+        linewidth=linewidth,
+        fliersize=fliersize,
+        plot_kws=kwargs,
+    )
+
+    p._add_axis_labels(ax)
+    p._adjust_cat_axis(ax, axis=p.orient)
+
+    return ax
+
+
+boxplot.__doc__ = dedent("""\
+    Draw a box plot to show distributions with respect to categories.

     A box plot (or box-and-whisker plot) shows the distribution of quantitative
     data in a way that facilitates comparisons between variables or across
@@ -375,10 +1708,90 @@ boxplot.__doc__ = dedent(
     --------
     .. include:: ../docstrings/boxplot.rst

-    """
-    ).format(**_categorical_docs)
-violinplot.__doc__ = dedent(
-    """    Draw a patch representing a KDE and add observations or box plot statistics.
+    """).format(**_categorical_docs)
+
+
+def violinplot(
+    data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,
+    orient=None, color=None, palette=None, saturation=.75, fill=True,
+    inner="box", split=False, width=.8, dodge="auto", gap=0,
+    linewidth=None, linecolor="auto", cut=2, gridsize=100,
+    bw_method="scott", bw_adjust=1, density_norm="area", common_norm=False,
+    hue_norm=None, formatter=None, log_scale=None, native_scale=False,
+    legend="auto", scale=deprecated, scale_hue=deprecated, bw=deprecated,
+    inner_kws=None, ax=None, **kwargs,
+):
+
+    p = _CategoricalPlotter(
+        data=data,
+        variables=dict(x=x, y=y, hue=hue),
+        order=order,
+        orient=orient,
+        color=color,
+        legend=legend,
+    )
+
+    if ax is None:
+        ax = plt.gca()
+
+    if p.plot_data.empty:
+        return ax
+
+    if dodge == "auto":
+        # Needs to be before scale_categorical changes the coordinate series dtype
+        dodge = p._dodge_needed()
+
+    if p.var_types.get(p.orient) == "categorical" or not native_scale:
+        p.scale_categorical(p.orient, order=order, formatter=formatter)
+
+    p._attach(ax, log_scale=log_scale)
+
+    # Deprecations to remove in v0.14.0.
+    hue_order = p._palette_without_hue_backcompat(palette, hue_order)
+    palette, hue_order = p._hue_backcompat(color, palette, hue_order)
+
+    saturation = saturation if fill else 1
+    p.map_hue(palette=palette, order=hue_order, norm=hue_norm, saturation=saturation)
+    color = _default_color(
+        ax.fill_between, hue, color,
+        {k: v for k, v in kwargs.items() if k in ["c", "color", "fc", "facecolor"]},
+        saturation=saturation,
+    )
+    linecolor = p._complement_color(linecolor, color, p._hue_map)
+
+    density_norm, common_norm = p._violin_scale_backcompat(
+        scale, scale_hue, density_norm, common_norm,
+    )
+
+    bw_method = p._violin_bw_backcompat(bw, bw_method)
+    kde_kws = dict(cut=cut, gridsize=gridsize, bw_method=bw_method, bw_adjust=bw_adjust)
+    inner_kws = {} if inner_kws is None else inner_kws.copy()
+
+    p.plot_violins(
+        width=width,
+        dodge=dodge,
+        gap=gap,
+        split=split,
+        color=color,
+        fill=fill,
+        linecolor=linecolor,
+        linewidth=linewidth,
+        inner=inner,
+        density_norm=density_norm,
+        common_norm=common_norm,
+        kde_kws=kde_kws,
+        inner_kws=inner_kws,
+        plot_kws=kwargs,
+    )
+
+    p._add_axis_labels(ax)
+    p._adjust_cat_axis(ax, axis=p.orient)
+
+    return ax
+
+
+violinplot.__doc__ = dedent("""\
+    Draw a patch representing a KDE and add observations or box plot statistics.

     A violin plot plays a similar role as a box-and-whisker plot. It shows the
     distribution of data points after grouping by one (or more) variables.
@@ -485,10 +1898,87 @@ violinplot.__doc__ = dedent(
     --------
     .. include:: ../docstrings/violinplot.rst

-    """
-    ).format(**_categorical_docs)
-boxenplot.__doc__ = dedent(
-    """    Draw an enhanced box plot for larger datasets.
+    """).format(**_categorical_docs)
+
+
+def boxenplot(
+    data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,
+    orient=None, color=None, palette=None, saturation=.75, fill=True,
+    dodge="auto", width=.8, gap=0, linewidth=None, linecolor=None,
+    width_method="exponential", k_depth="tukey", outlier_prop=0.007, trust_alpha=0.05,
+    showfliers=True, hue_norm=None, log_scale=None, native_scale=False, formatter=None,
+    legend="auto", scale=deprecated, box_kws=None, flier_kws=None, line_kws=None,
+    ax=None, **kwargs,
+):
+
+    p = _CategoricalPlotter(
+        data=data,
+        variables=dict(x=x, y=y, hue=hue),
+        order=order,
+        orient=orient,
+        color=color,
+        legend=legend,
+    )
+
+    if ax is None:
+        ax = plt.gca()
+
+    if p.plot_data.empty:
+        return ax
+
+    if dodge == "auto":
+        # Needs to be before scale_categorical changes the coordinate series dtype
+        dodge = p._dodge_needed()
+
+    if p.var_types.get(p.orient) == "categorical" or not native_scale:
+        p.scale_categorical(p.orient, order=order, formatter=formatter)
+
+    p._attach(ax, log_scale=log_scale)
+
+    # Deprecations to remove in v0.14.0.
+    hue_order = p._palette_without_hue_backcompat(palette, hue_order)
+    palette, hue_order = p._hue_backcompat(color, palette, hue_order)
+
+    # Longer-term deprecations
+    width_method = p._boxen_scale_backcompat(scale, width_method)
+
+    saturation = saturation if fill else 1
+    p.map_hue(palette=palette, order=hue_order, norm=hue_norm, saturation=saturation)
+    color = _default_color(
+        ax.fill_between, hue, color,
+        {},  # TODO how to get default color?
+        # {k: v for k, v in kwargs.items() if k in ["c", "color", "fc", "facecolor"]},
+        saturation=saturation,
+    )
+    linecolor = p._complement_color(linecolor, color, p._hue_map)
+
+    p.plot_boxens(
+        width=width,
+        dodge=dodge,
+        gap=gap,
+        fill=fill,
+        color=color,
+        linecolor=linecolor,
+        linewidth=linewidth,
+        width_method=width_method,
+        k_depth=k_depth,
+        outlier_prop=outlier_prop,
+        trust_alpha=trust_alpha,
+        showfliers=showfliers,
+        box_kws=box_kws,
+        flier_kws=flier_kws,
+        line_kws=line_kws,
+        plot_kws=kwargs,
+    )
+
+    p._add_axis_labels(ax)
+    p._adjust_cat_axis(ax, axis=p.orient)
+
+    return ax
+
+
+boxenplot.__doc__ = dedent("""\
+    Draw an enhanced box plot for larger datasets.

     This style of plot was originally named a "letter value" plot because it
     shows a large number of quantiles that are defined as "letter values".  It
@@ -578,10 +2068,72 @@ boxenplot.__doc__ = dedent(
     --------
     .. include:: ../docstrings/boxenplot.rst

-    """
-    ).format(**_categorical_docs)
-stripplot.__doc__ = dedent(
-    """    Draw a categorical scatterplot using jitter to reduce overplotting.
+    """).format(**_categorical_docs)
+
+
+def stripplot(
+    data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,
+    jitter=True, dodge=False, orient=None, color=None, palette=None,
+    size=5, edgecolor=default, linewidth=0,
+    hue_norm=None, log_scale=None, native_scale=False, formatter=None, legend="auto",
+    ax=None, **kwargs
+):
+
+    p = _CategoricalPlotter(
+        data=data,
+        variables=dict(x=x, y=y, hue=hue),
+        order=order,
+        orient=orient,
+        color=color,
+        legend=legend,
+    )
+
+    if ax is None:
+        ax = plt.gca()
+
+    if p.plot_data.empty:
+        return ax
+
+    if p.var_types.get(p.orient) == "categorical" or not native_scale:
+        p.scale_categorical(p.orient, order=order, formatter=formatter)
+
+    p._attach(ax, log_scale=log_scale)
+
+    # Deprecations to remove in v0.14.0.
+    hue_order = p._palette_without_hue_backcompat(palette, hue_order)
+    palette, hue_order = p._hue_backcompat(color, palette, hue_order)
+
+    p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
+    color = _default_color(ax.scatter, hue, color, kwargs)
+    edgecolor = p._complement_color(edgecolor, color, p._hue_map)
+
+    kwargs.setdefault("zorder", 3)
+    size = kwargs.get("s", size)
+
+    kwargs.update(
+        s=size ** 2,
+        edgecolor=edgecolor,
+        linewidth=linewidth,
+    )
+
+    p.plot_strips(
+        jitter=jitter,
+        dodge=dodge,
+        color=color,
+        plot_kws=kwargs,
+    )
+
+    # XXX this happens inside a plotting method in the distribution plots
+    # but maybe it's better out here? Alternatively, we have an open issue
+    # suggesting that _attach could add default axes labels, which seems smart.
+    p._add_axis_labels(ax)
+    p._adjust_cat_axis(ax, axis=p.orient)
+
+    return ax
+
+
+stripplot.__doc__ = dedent("""\
+    Draw a categorical scatterplot using jitter to reduce overplotting.

     A strip plot can be drawn on its own, but it is also a good complement
     to a box or violin plot in cases where you want to show all observations
@@ -641,10 +2193,75 @@ stripplot.__doc__ = dedent(
     --------
     .. include:: ../docstrings/stripplot.rst

-    """
-    ).format(**_categorical_docs)
-swarmplot.__doc__ = dedent(
-    """    Draw a categorical scatterplot with points adjusted to be non-overlapping.
+    """).format(**_categorical_docs)
+
+
+def swarmplot(
+    data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,
+    dodge=False, orient=None, color=None, palette=None,
+    size=5, edgecolor=None, linewidth=0, hue_norm=None, log_scale=None,
+    native_scale=False, formatter=None, legend="auto", warn_thresh=.05,
+    ax=None, **kwargs
+):
+
+    p = _CategoricalPlotter(
+        data=data,
+        variables=dict(x=x, y=y, hue=hue),
+        order=order,
+        orient=orient,
+        color=color,
+        legend=legend,
+    )
+
+    if ax is None:
+        ax = plt.gca()
+
+    if p.plot_data.empty:
+        return ax
+
+    if p.var_types.get(p.orient) == "categorical" or not native_scale:
+        p.scale_categorical(p.orient, order=order, formatter=formatter)
+
+    p._attach(ax, log_scale=log_scale)
+
+    if not p.has_xy_data:
+        return ax
+
+    # Deprecations to remove in v0.14.0.
+    hue_order = p._palette_without_hue_backcompat(palette, hue_order)
+    palette, hue_order = p._hue_backcompat(color, palette, hue_order)
+
+    p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
+    color = _default_color(ax.scatter, hue, color, kwargs)
+    edgecolor = p._complement_color(edgecolor, color, p._hue_map)
+
+    kwargs.setdefault("zorder", 3)
+    size = kwargs.get("s", size)
+
+    if linewidth is None:
+        linewidth = size / 10
+
+    kwargs.update(dict(
+        s=size ** 2,
+        edgecolor=edgecolor,
+        linewidth=linewidth,
+    ))
+
+    p.plot_swarms(
+        dodge=dodge,
+        color=color,
+        warn_thresh=warn_thresh,
+        plot_kws=kwargs,
+    )
+
+    p._add_axis_labels(ax)
+    p._adjust_cat_axis(ax, axis=p.orient)
+
+    return ax
+
+
+swarmplot.__doc__ = dedent("""\
+    Draw a categorical scatterplot with points adjusted to be non-overlapping.

     This function is similar to :func:`stripplot`, but the points are adjusted
     (only along the categorical axis) so that they don't overlap. This gives a
@@ -702,10 +2319,84 @@ swarmplot.__doc__ = dedent(
     --------
     .. include:: ../docstrings/swarmplot.rst

-    """
-    ).format(**_categorical_docs)
-barplot.__doc__ = dedent(
-    """    Show point estimates and errors as rectangular bars.
+    """).format(**_categorical_docs)
+
+
+def barplot(
+    data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,
+    estimator="mean", errorbar=("ci", 95), n_boot=1000, seed=None, units=None,
+    weights=None, orient=None, color=None, palette=None, saturation=.75,
+    fill=True, hue_norm=None, width=.8, dodge="auto", gap=0, log_scale=None,
+    native_scale=False, formatter=None, legend="auto", capsize=0, err_kws=None,
+    ci=deprecated, errcolor=deprecated, errwidth=deprecated, ax=None, **kwargs,
+):
+
+    errorbar = utils._deprecate_ci(errorbar, ci)
+
+    # Be backwards compatible with len passed directly, which
+    # does not work in Series.agg (maybe a pandas bug?)
+    if estimator is len:
+        estimator = "size"
+
+    p = _CategoricalAggPlotter(
+        data=data,
+        variables=dict(x=x, y=y, hue=hue, units=units, weight=weights),
+        order=order,
+        orient=orient,
+        color=color,
+        legend=legend,
+    )
+
+    if ax is None:
+        ax = plt.gca()
+
+    if p.plot_data.empty:
+        return ax
+
+    if dodge == "auto":
+        # Needs to be before scale_categorical changes the coordinate series dtype
+        dodge = p._dodge_needed()
+
+    if p.var_types.get(p.orient) == "categorical" or not native_scale:
+        p.scale_categorical(p.orient, order=order, formatter=formatter)
+
+    p._attach(ax, log_scale=log_scale)
+
+    # Deprecations to remove in v0.14.0.
+    hue_order = p._palette_without_hue_backcompat(palette, hue_order)
+    palette, hue_order = p._hue_backcompat(color, palette, hue_order)
+
+    saturation = saturation if fill else 1
+    p.map_hue(palette=palette, order=hue_order, norm=hue_norm, saturation=saturation)
+    color = _default_color(ax.bar, hue, color, kwargs, saturation=saturation)
+
+    agg_cls = WeightedAggregator if "weight" in p.plot_data else EstimateAggregator
+    aggregator = agg_cls(estimator, errorbar, n_boot=n_boot, seed=seed)
+    err_kws = {} if err_kws is None else normalize_kwargs(err_kws, mpl.lines.Line2D)
+
+    # Deprecations to remove in v0.15.0.
+    err_kws, capsize = p._err_kws_backcompat(err_kws, errcolor, errwidth, capsize)
+
+    p.plot_bars(
+        aggregator=aggregator,
+        dodge=dodge,
+        width=width,
+        gap=gap,
+        color=color,
+        fill=fill,
+        capsize=capsize,
+        err_kws=err_kws,
+        plot_kws=kwargs,
+    )
+
+    p._add_axis_labels(ax)
+    p._adjust_cat_axis(ax, axis=p.orient)
+
+    return ax
+
+
+barplot.__doc__ = dedent("""\
+    Show point estimates and errors as rectangular bars.

     A bar plot represents an aggregate or statistical estimate for a numeric
     variable with the height of each rectangle and indicates the uncertainty
@@ -770,10 +2461,77 @@ barplot.__doc__ = dedent(
     --------
     .. include:: ../docstrings/barplot.rst

-    """
-    ).format(**_categorical_docs)
-pointplot.__doc__ = dedent(
-    """    Show point estimates and errors using lines with markers.
+    """).format(**_categorical_docs)
+
+
+def pointplot(
+    data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,
+    estimator="mean", errorbar=("ci", 95), n_boot=1000, seed=None, units=None,
+    weights=None, color=None, palette=None, hue_norm=None, markers=default,
+    linestyles=default, dodge=False, log_scale=None, native_scale=False,
+    orient=None, capsize=0, formatter=None, legend="auto", err_kws=None,
+    ci=deprecated, errwidth=deprecated, join=deprecated, scale=deprecated,
+    ax=None, **kwargs,
+):
+
+    errorbar = utils._deprecate_ci(errorbar, ci)
+
+    p = _CategoricalAggPlotter(
+        data=data,
+        variables=dict(x=x, y=y, hue=hue, units=units, weight=weights),
+        order=order,
+        orient=orient,
+        # Handle special backwards compatibility where pointplot originally
+        # did *not* default to multi-colored unless a palette was specified.
+        color="C0" if (color is None and palette is None) else color,
+        legend=legend,
+    )
+
+    if ax is None:
+        ax = plt.gca()
+
+    if p.plot_data.empty:
+        return ax
+
+    if p.var_types.get(p.orient) == "categorical" or not native_scale:
+        p.scale_categorical(p.orient, order=order, formatter=formatter)
+
+    p._attach(ax, log_scale=log_scale)
+
+    # Deprecations to remove in v0.14.0.
+    hue_order = p._palette_without_hue_backcompat(palette, hue_order)
+    palette, hue_order = p._hue_backcompat(color, palette, hue_order)
+
+    p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
+    color = _default_color(ax.plot, hue, color, kwargs)
+
+    agg_cls = WeightedAggregator if "weight" in p.plot_data else EstimateAggregator
+    aggregator = agg_cls(estimator, errorbar, n_boot=n_boot, seed=seed)
+    err_kws = {} if err_kws is None else normalize_kwargs(err_kws, mpl.lines.Line2D)
+
+    # Deprecations to remove in v0.15.0.
+    p._point_kwargs_backcompat(scale, join, kwargs)
+    err_kws, capsize = p._err_kws_backcompat(err_kws, None, errwidth, capsize)
+
+    p.plot_points(
+        aggregator=aggregator,
+        markers=markers,
+        linestyles=linestyles,
+        dodge=dodge,
+        color=color,
+        capsize=capsize,
+        err_kws=err_kws,
+        plot_kws=kwargs,
+    )
+
+    p._add_axis_labels(ax)
+    p._adjust_cat_axis(ax, axis=p.orient)
+
+    return ax
+
+
+pointplot.__doc__ = dedent("""\
+    Show point estimates and errors using lines with markers.

     A point plot represents an estimate of central tendency for a numeric
     variable by the position of the dot and provides some indication of the
@@ -851,10 +2609,89 @@ pointplot.__doc__ = dedent(
     --------
     .. include:: ../docstrings/pointplot.rst

-    """
-    ).format(**_categorical_docs)
-countplot.__doc__ = dedent(
-    """    Show the counts of observations in each categorical bin using bars.
+    """).format(**_categorical_docs)
+
+
+def countplot(
+    data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,
+    orient=None, color=None, palette=None, saturation=.75, fill=True, hue_norm=None,
+    stat="count", width=.8, dodge="auto", gap=0, log_scale=None, native_scale=False,
+    formatter=None, legend="auto", ax=None, **kwargs
+):
+
+    if x is None and y is not None:
+        orient = "y"
+        x = 1 if list(y) else None
+    elif x is not None and y is None:
+        orient = "x"
+        y = 1 if list(x) else None
+    elif x is not None and y is not None:
+        raise TypeError("Cannot pass values for both `x` and `y`.")
+
+    p = _CategoricalAggPlotter(
+        data=data,
+        variables=dict(x=x, y=y, hue=hue),
+        order=order,
+        orient=orient,
+        color=color,
+        legend=legend,
+    )
+
+    if ax is None:
+        ax = plt.gca()
+
+    if p.plot_data.empty:
+        return ax
+
+    if dodge == "auto":
+        # Needs to be before scale_categorical changes the coordinate series dtype
+        dodge = p._dodge_needed()
+
+    if p.var_types.get(p.orient) == "categorical" or not native_scale:
+        p.scale_categorical(p.orient, order=order, formatter=formatter)
+
+    p._attach(ax, log_scale=log_scale)
+
+    # Deprecations to remove in v0.14.0.
+    hue_order = p._palette_without_hue_backcompat(palette, hue_order)
+    palette, hue_order = p._hue_backcompat(color, palette, hue_order)
+
+    saturation = saturation if fill else 1
+    p.map_hue(palette=palette, order=hue_order, norm=hue_norm, saturation=saturation)
+    color = _default_color(ax.bar, hue, color, kwargs, saturation)
+
+    count_axis = {"x": "y", "y": "x"}[p.orient]
+    if p.input_format == "wide":
+        p.plot_data[count_axis] = 1
+
+    _check_argument("stat", ["count", "percent", "probability", "proportion"], stat)
+    p.variables[count_axis] = stat
+    if stat != "count":
+        denom = 100 if stat == "percent" else 1
+        p.plot_data[count_axis] /= len(p.plot_data) / denom
+
+    aggregator = EstimateAggregator("sum", errorbar=None)
+
+    p.plot_bars(
+        aggregator=aggregator,
+        dodge=dodge,
+        width=width,
+        gap=gap,
+        color=color,
+        fill=fill,
+        capsize=0,
+        err_kws={},
+        plot_kws=kwargs,
+    )
+
+    p._add_axis_labels(ax)
+    p._adjust_cat_axis(ax, axis=p.orient)
+
+    return ax
+
+
+countplot.__doc__ = dedent("""\
+    Show the counts of observations in each categorical bin using bars.

     A count plot can be thought of as a histogram across a categorical, instead
     of quantitative, variable. The basic API and options are identical to those
@@ -904,10 +2741,401 @@ countplot.__doc__ = dedent(
     --------
     .. include:: ../docstrings/countplot.rst

-    """
-    ).format(**_categorical_docs)
-catplot.__doc__ = dedent(
-    """    Figure-level interface for drawing categorical plots onto a FacetGrid.
+    """).format(**_categorical_docs)
+
+
+def catplot(
+    data=None, *, x=None, y=None, hue=None, row=None, col=None, kind="strip",
+    estimator="mean", errorbar=("ci", 95), n_boot=1000, seed=None, units=None,
+    weights=None, order=None, hue_order=None, row_order=None, col_order=None,
+    col_wrap=None, height=5, aspect=1, log_scale=None, native_scale=False,
+    formatter=None, orient=None, color=None, palette=None, hue_norm=None,
+    legend="auto", legend_out=True, sharex=True, sharey=True,
+    margin_titles=False, facet_kws=None, ci=deprecated, **kwargs
+):
+
+    # Check for attempt to plot onto specific axes and warn
+    if "ax" in kwargs:
+        msg = ("catplot is a figure-level function and does not accept "
+               f"target axes. You may wish to try {kind}plot")
+        warnings.warn(msg, UserWarning)
+        kwargs.pop("ax")
+
+    desaturated_kinds = ["bar", "count", "box", "violin", "boxen"]
+    undodged_kinds = ["strip", "swarm", "point"]
+
+    if kind in ["bar", "point", "count"]:
+        Plotter = _CategoricalAggPlotter
+    else:
+        Plotter = _CategoricalPlotter
+
+    if kind == "count":
+        if x is None and y is not None:
+            orient = "y"
+            x = 1
+        elif x is not None and y is None:
+            orient = "x"
+            y = 1
+        elif x is not None and y is not None:
+            raise ValueError("Cannot pass values for both `x` and `y`.")
+
+    p = Plotter(
+        data=data,
+        variables=dict(
+            x=x, y=y, hue=hue, row=row, col=col, units=units, weight=weights
+        ),
+        order=order,
+        orient=orient,
+        # Handle special backwards compatibility where pointplot originally
+        # did *not* default to multi-colored unless a palette was specified.
+        color="C0" if kind == "point" and palette is None and color is None else color,
+        legend=legend,
+    )
+
+    for var in ["row", "col"]:
+        # Handle faceting variables that lack name information
+        if var in p.variables and p.variables[var] is None:
+            p.variables[var] = f"_{var}_"
+
+    # Adapt the plot_data dataframe for use with FacetGrid
+    facet_data = p.plot_data.rename(columns=p.variables)
+    facet_data = facet_data.loc[:, ~facet_data.columns.duplicated()]
+
+    col_name = p.variables.get("col", None)
+    row_name = p.variables.get("row", None)
+
+    if facet_kws is None:
+        facet_kws = {}
+
+    g = FacetGrid(
+        data=facet_data, row=row_name, col=col_name, col_wrap=col_wrap,
+        row_order=row_order, col_order=col_order, sharex=sharex, sharey=sharey,
+        legend_out=legend_out, margin_titles=margin_titles,
+        height=height, aspect=aspect,
+        **facet_kws,
+    )
+
+    # Capture this here because scale_categorical is going to insert a (null)
+    # x variable even if it is empty. It's not clear whether that needs to
+    # happen or if disabling that is the cleaner solution.
+    has_xy_data = p.has_xy_data
+
+    if not native_scale or p.var_types[p.orient] == "categorical":
+        p.scale_categorical(p.orient, order=order, formatter=formatter)
+
+    p._attach(g, log_scale=log_scale)
+
+    if not has_xy_data:
+        return g
+
+    # Deprecations to remove in v0.14.0.
+    hue_order = p._palette_without_hue_backcompat(palette, hue_order)
+    palette, hue_order = p._hue_backcompat(color, palette, hue_order)
+
+    # Othe deprecations
+    errorbar = utils._deprecate_ci(errorbar, ci)
+
+    saturation = kwargs.pop(
+        "saturation",
+        0.75 if kind in desaturated_kinds and kwargs.get("fill", True) else 1
+    )
+    p.map_hue(palette=palette, order=hue_order, norm=hue_norm, saturation=saturation)
+
+    # Set a default color
+    # Otherwise each artist will be plotted separately and trip the color cycle
+    if hue is None:
+        color = "C0" if color is None else color
+        if saturation < 1:
+            color = desaturate(color, saturation)
+
+    if kind in ["strip", "swarm"]:
+        kwargs = normalize_kwargs(kwargs, mpl.collections.PathCollection)
+        kwargs["edgecolor"] = p._complement_color(
+            kwargs.pop("edgecolor", default), color, p._hue_map
+        )
+
+    width = kwargs.pop("width", 0.8)
+    dodge = kwargs.pop("dodge", False if kind in undodged_kinds else "auto")
+    if dodge == "auto":
+        dodge = p._dodge_needed()
+
+    if "weight" in p.plot_data:
+        if kind not in ["bar", "point"]:
+            msg = f"The `weights` parameter has no effect with kind={kind!r}."
+            warnings.warn(msg, stacklevel=2)
+        agg_cls = WeightedAggregator
+    else:
+        agg_cls = EstimateAggregator
+
+    if kind == "strip":
+
+        jitter = kwargs.pop("jitter", True)
+        plot_kws = kwargs.copy()
+        plot_kws.setdefault("zorder", 3)
+        plot_kws.setdefault("linewidth", 0)
+        if "s" not in plot_kws:
+            plot_kws["s"] = plot_kws.pop("size", 5) ** 2
+
+        p.plot_strips(
+            jitter=jitter,
+            dodge=dodge,
+            color=color,
+            plot_kws=plot_kws,
+        )
+
+    elif kind == "swarm":
+
+        warn_thresh = kwargs.pop("warn_thresh", .05)
+        plot_kws = kwargs.copy()
+        plot_kws.setdefault("zorder", 3)
+        if "s" not in plot_kws:
+            plot_kws["s"] = plot_kws.pop("size", 5) ** 2
+
+        if plot_kws.setdefault("linewidth", 0) is None:
+            plot_kws["linewidth"] = np.sqrt(plot_kws["s"]) / 10
+
+        p.plot_swarms(
+            dodge=dodge,
+            color=color,
+            warn_thresh=warn_thresh,
+            plot_kws=plot_kws,
+        )
+
+    elif kind == "box":
+
+        plot_kws = kwargs.copy()
+        gap = plot_kws.pop("gap", 0)
+        fill = plot_kws.pop("fill", True)
+        whis = plot_kws.pop("whis", 1.5)
+        linewidth = plot_kws.pop("linewidth", None)
+        fliersize = plot_kws.pop("fliersize", 5)
+        linecolor = p._complement_color(
+            plot_kws.pop("linecolor", "auto"), color, p._hue_map
+        )
+
+        p.plot_boxes(
+            width=width,
+            dodge=dodge,
+            gap=gap,
+            fill=fill,
+            whis=whis,
+            color=color,
+            linecolor=linecolor,
+            linewidth=linewidth,
+            fliersize=fliersize,
+            plot_kws=plot_kws,
+        )
+
+    elif kind == "violin":
+
+        plot_kws = kwargs.copy()
+        gap = plot_kws.pop("gap", 0)
+        fill = plot_kws.pop("fill", True)
+        split = plot_kws.pop("split", False)
+        inner = plot_kws.pop("inner", "box")
+        density_norm = plot_kws.pop("density_norm", "area")
+        common_norm = plot_kws.pop("common_norm", False)
+
+        scale = plot_kws.pop("scale", deprecated)
+        scale_hue = plot_kws.pop("scale_hue", deprecated)
+        density_norm, common_norm = p._violin_scale_backcompat(
+            scale, scale_hue, density_norm, common_norm,
+        )
+
+        bw_method = p._violin_bw_backcompat(
+            plot_kws.pop("bw", deprecated), plot_kws.pop("bw_method", "scott")
+        )
+        kde_kws = dict(
+            cut=plot_kws.pop("cut", 2),
+            gridsize=plot_kws.pop("gridsize", 100),
+            bw_adjust=plot_kws.pop("bw_adjust", 1),
+            bw_method=bw_method,
+        )
+
+        inner_kws = plot_kws.pop("inner_kws", {}).copy()
+        linewidth = plot_kws.pop("linewidth", None)
+        linecolor = plot_kws.pop("linecolor", "auto")
+        linecolor = p._complement_color(linecolor, color, p._hue_map)
+
+        p.plot_violins(
+            width=width,
+            dodge=dodge,
+            gap=gap,
+            split=split,
+            color=color,
+            fill=fill,
+            linecolor=linecolor,
+            linewidth=linewidth,
+            inner=inner,
+            density_norm=density_norm,
+            common_norm=common_norm,
+            kde_kws=kde_kws,
+            inner_kws=inner_kws,
+            plot_kws=plot_kws,
+        )
+
+    elif kind == "boxen":
+
+        plot_kws = kwargs.copy()
+        gap = plot_kws.pop("gap", 0)
+        fill = plot_kws.pop("fill", True)
+        linecolor = plot_kws.pop("linecolor", "auto")
+        linewidth = plot_kws.pop("linewidth", None)
+        k_depth = plot_kws.pop("k_depth", "tukey")
+        width_method = plot_kws.pop("width_method", "exponential")
+        outlier_prop = plot_kws.pop("outlier_prop", 0.007)
+        trust_alpha = plot_kws.pop("trust_alpha", 0.05)
+        showfliers = plot_kws.pop("showfliers", True)
+        box_kws = plot_kws.pop("box_kws", {})
+        flier_kws = plot_kws.pop("flier_kws", {})
+        line_kws = plot_kws.pop("line_kws", {})
+        if "scale" in plot_kws:
+            width_method = p._boxen_scale_backcompat(
+                plot_kws["scale"], width_method
+            )
+        linecolor = p._complement_color(linecolor, color, p._hue_map)
+
+        p.plot_boxens(
+            width=width,
+            dodge=dodge,
+            gap=gap,
+            fill=fill,
+            color=color,
+            linecolor=linecolor,
+            linewidth=linewidth,
+            width_method=width_method,
+            k_depth=k_depth,
+            outlier_prop=outlier_prop,
+            trust_alpha=trust_alpha,
+            showfliers=showfliers,
+            box_kws=box_kws,
+            flier_kws=flier_kws,
+            line_kws=line_kws,
+            plot_kws=plot_kws,
+        )
+
+    elif kind == "point":
+
+        aggregator = agg_cls(estimator, errorbar, n_boot=n_boot, seed=seed)
+
+        markers = kwargs.pop("markers", default)
+        linestyles = kwargs.pop("linestyles", default)
+
+        # Deprecations to remove in v0.15.0.
+        # TODO Uncomment when removing deprecation backcompat
+        # capsize = kwargs.pop("capsize", 0)
+        # err_kws = normalize_kwargs(kwargs.pop("err_kws", {}), mpl.lines.Line2D)
+        p._point_kwargs_backcompat(
+            kwargs.pop("scale", deprecated),
+            kwargs.pop("join", deprecated),
+            kwargs
+        )
+        err_kws, capsize = p._err_kws_backcompat(
+            normalize_kwargs(kwargs.pop("err_kws", {}), mpl.lines.Line2D),
+            None,
+            errwidth=kwargs.pop("errwidth", deprecated),
+            capsize=kwargs.pop("capsize", 0),
+        )
+
+        p.plot_points(
+            aggregator=aggregator,
+            markers=markers,
+            linestyles=linestyles,
+            dodge=dodge,
+            color=color,
+            capsize=capsize,
+            err_kws=err_kws,
+            plot_kws=kwargs,
+        )
+
+    elif kind == "bar":
+
+        aggregator = agg_cls(estimator, errorbar, n_boot=n_boot, seed=seed)
+
+        err_kws, capsize = p._err_kws_backcompat(
+            normalize_kwargs(kwargs.pop("err_kws", {}), mpl.lines.Line2D),
+            errcolor=kwargs.pop("errcolor", deprecated),
+            errwidth=kwargs.pop("errwidth", deprecated),
+            capsize=kwargs.pop("capsize", 0),
+        )
+        gap = kwargs.pop("gap", 0)
+        fill = kwargs.pop("fill", True)
+
+        p.plot_bars(
+            aggregator=aggregator,
+            dodge=dodge,
+            width=width,
+            gap=gap,
+            color=color,
+            fill=fill,
+            capsize=capsize,
+            err_kws=err_kws,
+            plot_kws=kwargs,
+        )
+
+    elif kind == "count":
+
+        aggregator = EstimateAggregator("sum", errorbar=None)
+
+        count_axis = {"x": "y", "y": "x"}[p.orient]
+        p.plot_data[count_axis] = 1
+
+        stat_options = ["count", "percent", "probability", "proportion"]
+        stat = _check_argument("stat", stat_options, kwargs.pop("stat", "count"))
+        p.variables[count_axis] = stat
+        if stat != "count":
+            denom = 100 if stat == "percent" else 1
+            p.plot_data[count_axis] /= len(p.plot_data) / denom
+
+        gap = kwargs.pop("gap", 0)
+        fill = kwargs.pop("fill", True)
+
+        p.plot_bars(
+            aggregator=aggregator,
+            dodge=dodge,
+            width=width,
+            gap=gap,
+            color=color,
+            fill=fill,
+            capsize=0,
+            err_kws={},
+            plot_kws=kwargs,
+        )
+
+    else:
+        msg = (
+            f"Invalid `kind`: {kind!r}. Options are 'strip', 'swarm', "
+            "'box', 'boxen', 'violin', 'bar', 'count', and 'point'."
+        )
+        raise ValueError(msg)
+
+    for ax in g.axes.flat:
+        p._adjust_cat_axis(ax, axis=p.orient)
+
+    g.set_axis_labels(p.variables.get("x"), p.variables.get("y"))
+    g.set_titles()
+    g.tight_layout()
+
+    for ax in g.axes.flat:
+        g._update_legend_data(ax)
+        ax.legend_ = None
+
+    if legend == "auto":
+        show_legend = not p._redundant_hue and p.input_format != "wide"
+    else:
+        show_legend = bool(legend)
+    if show_legend:
+        g.add_legend(title=p.variables.get("hue"), label_order=hue_order)
+
+    if data is not None:
+        # Replace the dataframe on the FacetGrid for any subsequent maps
+        g.data = data
+
+    return g
+
+
+catplot.__doc__ = dedent("""\
+    Figure-level interface for drawing categorical plots onto a FacetGrid.

     This function provides access to several axes-level functions that
     show the relationship between a numerical and one or more categorical
@@ -983,102 +3211,246 @@ catplot.__doc__ = dedent(
     --------
     .. include:: ../docstrings/catplot.rst

-    """
-    ).format(**_categorical_docs)
+    """).format(**_categorical_docs)


 class Beeswarm:
     """Modifies a scatterplot artist to show a beeswarm plot."""
+    def __init__(self, orient="x", width=0.8, warn_thresh=.05):

-    def __init__(self, orient='x', width=0.8, warn_thresh=0.05):
         self.orient = orient
         self.width = width
         self.warn_thresh = warn_thresh

     def __call__(self, points, center):
         """Swarm `points`, a PathCollection, around the `center` position."""
+        # Convert from point size (area) to diameter
+
         ax = points.axes
         dpi = ax.figure.dpi
+
+        # Get the original positions of the points
         orig_xy_data = points.get_offsets()
-        cat_idx = 1 if self.orient == 'y' else 0
+
+        # Reset the categorical positions to the center line
+        cat_idx = 1 if self.orient == "y" else 0
         orig_xy_data[:, cat_idx] = center
+
+        # Transform the data coordinates to point coordinates.
+        # We'll figure out the swarm positions in the latter
+        # and then convert back to data coordinates and replot
         orig_x_data, orig_y_data = orig_xy_data.T
         orig_xy = ax.transData.transform(orig_xy_data)
-        if self.orient == 'y':
+
+        # Order the variables so that x is the categorical axis
+        if self.orient == "y":
             orig_xy = orig_xy[:, [1, 0]]
+
+        # Add a column with each point's radius
         sizes = points.get_sizes()
         if sizes.size == 1:
             sizes = np.repeat(sizes, orig_xy.shape[0])
         edge = points.get_linewidth().item()
         radii = (np.sqrt(sizes) + edge) / 2 * (dpi / 72)
         orig_xy = np.c_[orig_xy, radii]
+
+        # Sort along the value axis to facilitate the beeswarm
         sorter = np.argsort(orig_xy[:, 1])
         orig_xyr = orig_xy[sorter]
+
+        # Adjust points along the categorical axis to prevent overlaps
         new_xyr = np.empty_like(orig_xyr)
         new_xyr[sorter] = self.beeswarm(orig_xyr)
-        if self.orient == 'y':
+
+        # Transform the point coordinates back to data coordinates
+        if self.orient == "y":
             new_xy = new_xyr[:, [1, 0]]
         else:
             new_xy = new_xyr[:, :2]
         new_x_data, new_y_data = ax.transData.inverted().transform(new_xy).T
+
+        # Add gutters
         t_fwd, t_inv = _get_transform_functions(ax, self.orient)
-        if self.orient == 'y':
+        if self.orient == "y":
             self.add_gutters(new_y_data, center, t_fwd, t_inv)
         else:
             self.add_gutters(new_x_data, center, t_fwd, t_inv)
-        if self.orient == 'y':
+
+        # Reposition the points so they do not overlap
+        if self.orient == "y":
             points.set_offsets(np.c_[orig_x_data, new_y_data])
         else:
             points.set_offsets(np.c_[new_x_data, orig_y_data])

     def beeswarm(self, orig_xyr):
         """Adjust x position of points to avoid overlaps."""
-        pass
+        # In this method, `x` is always the categorical axis
+        # Center of the swarm, in point coordinates
+        midline = orig_xyr[0, 0]
+
+        # Start the swarm with the first point
+        swarm = np.atleast_2d(orig_xyr[0])
+
+        # Loop over the remaining points
+        for xyr_i in orig_xyr[1:]:
+
+            # Find the points in the swarm that could possibly
+            # overlap with the point we are currently placing
+            neighbors = self.could_overlap(xyr_i, swarm)
+
+            # Find positions that would be valid individually
+            # with respect to each of the swarm neighbors
+            candidates = self.position_candidates(xyr_i, neighbors)
+
+            # Sort candidates by their centrality
+            offsets = np.abs(candidates[:, 0] - midline)
+            candidates = candidates[np.argsort(offsets)]
+
+            # Find the first candidate that does not overlap any neighbors
+            new_xyr_i = self.first_non_overlapping_candidate(candidates, neighbors)
+
+            # Place it into the swarm
+            swarm = np.vstack([swarm, new_xyr_i])
+
+        return swarm

     def could_overlap(self, xyr_i, swarm):
         """Return a list of all swarm points that could overlap with target."""
-        pass
+        # Because we work backwards through the swarm and can short-circuit,
+        # the for-loop is faster than vectorization
+        _, y_i, r_i = xyr_i
+        neighbors = []
+        for xyr_j in reversed(swarm):
+            _, y_j, r_j = xyr_j
+            if (y_i - y_j) < (r_i + r_j):
+                neighbors.append(xyr_j)
+            else:
+                break
+        return np.array(neighbors)[::-1]

     def position_candidates(self, xyr_i, neighbors):
         """Return a list of coordinates that might be valid by adjusting x."""
-        pass
+        candidates = [xyr_i]
+        x_i, y_i, r_i = xyr_i
+        left_first = True
+        for x_j, y_j, r_j in neighbors:
+            dy = y_i - y_j
+            dx = np.sqrt(max((r_i + r_j) ** 2 - dy ** 2, 0)) * 1.05
+            cl, cr = (x_j - dx, y_i, r_i), (x_j + dx, y_i, r_i)
+            if left_first:
+                new_candidates = [cl, cr]
+            else:
+                new_candidates = [cr, cl]
+            candidates.extend(new_candidates)
+            left_first = not left_first
+        return np.array(candidates)

     def first_non_overlapping_candidate(self, candidates, neighbors):
         """Find the first candidate that does not overlap with the swarm."""
-        pass
+
+        # If we have no neighbors, all candidates are good.
+        if len(neighbors) == 0:
+            return candidates[0]
+
+        neighbors_x = neighbors[:, 0]
+        neighbors_y = neighbors[:, 1]
+        neighbors_r = neighbors[:, 2]
+
+        for xyr_i in candidates:
+
+            x_i, y_i, r_i = xyr_i
+
+            dx = neighbors_x - x_i
+            dy = neighbors_y - y_i
+            sq_distances = np.square(dx) + np.square(dy)
+
+            sep_needed = np.square(neighbors_r + r_i)
+
+            # Good candidate does not overlap any of neighbors which means that
+            # squared distance between candidate and any of the neighbors has
+            # to be at least square of the summed radii
+            good_candidate = np.all(sq_distances >= sep_needed)
+
+            if good_candidate:
+                return xyr_i
+
+        raise RuntimeError(
+            "No non-overlapping candidates found. This should not happen."
+        )

     def add_gutters(self, points, center, trans_fwd, trans_inv):
         """Stop points from extending beyond their territory."""
-        pass
+        half_width = self.width / 2
+        low_gutter = trans_inv(trans_fwd(center) - half_width)
+        off_low = points < low_gutter
+        if off_low.any():
+            points[off_low] = low_gutter
+        high_gutter = trans_inv(trans_fwd(center) + half_width)
+        off_high = points > high_gutter
+        if off_high.any():
+            points[off_high] = high_gutter
+
+        gutter_prop = (off_high + off_low).sum() / len(points)
+        if gutter_prop > self.warn_thresh:
+            msg = (
+                "{:.1%} of the points cannot be placed; you may want "
+                "to decrease the size of the markers or use stripplot."
+            ).format(gutter_prop)
+            warnings.warn(msg, UserWarning)
+
+        return points


-BoxPlotArtists = namedtuple('BoxPlotArtists',
-    'box median whiskers caps fliers mean')
+BoxPlotArtists = namedtuple("BoxPlotArtists", "box median whiskers caps fliers mean")


 class BoxPlotContainer:

     def __init__(self, artist_dict):
-        self.boxes = artist_dict['boxes']
-        self.medians = artist_dict['medians']
-        self.whiskers = artist_dict['whiskers']
-        self.caps = artist_dict['caps']
-        self.fliers = artist_dict['fliers']
-        self.means = artist_dict['means']
+
+        self.boxes = artist_dict["boxes"]
+        self.medians = artist_dict["medians"]
+        self.whiskers = artist_dict["whiskers"]
+        self.caps = artist_dict["caps"]
+        self.fliers = artist_dict["fliers"]
+        self.means = artist_dict["means"]
+
         self._label = None
-        self._children = [*self.boxes, *self.medians, *self.whiskers, *self
-            .caps, *self.fliers, *self.means]
+        self._children = [
+            *self.boxes,
+            *self.medians,
+            *self.whiskers,
+            *self.caps,
+            *self.fliers,
+            *self.means,
+        ]

     def __repr__(self):
-        return f'<BoxPlotContainer object with {len(self.boxes)} boxes>'
+        return f"<BoxPlotContainer object with {len(self.boxes)} boxes>"

     def __getitem__(self, idx):
         pair_slice = slice(2 * idx, 2 * idx + 2)
-        return BoxPlotArtists(self.boxes[idx] if self.boxes else [], self.
-            medians[idx] if self.medians else [], self.whiskers[pair_slice] if
-            self.whiskers else [], self.caps[pair_slice] if self.caps else
-            [], self.fliers[idx] if self.fliers else [], self.means[idx] if
-            self.means else [])
+        return BoxPlotArtists(
+            self.boxes[idx] if self.boxes else [],
+            self.medians[idx] if self.medians else [],
+            self.whiskers[pair_slice] if self.whiskers else [],
+            self.caps[pair_slice] if self.caps else [],
+            self.fliers[idx] if self.fliers else [],
+            self.means[idx]if self.means else [],
+        )

     def __iter__(self):
         yield from (self[i] for i in range(len(self.boxes)))
+
+    def get_label(self):
+        return self._label
+
+    def set_label(self, value):
+        self._label = value
+
+    def get_children(self):
+        return self._children
+
+    def remove(self):
+        for child in self._children:
+            child.remove()
diff --git a/seaborn/cm.py b/seaborn/cm.py
index af7a3d4b..df7ce619 100644
--- a/seaborn/cm.py
+++ b/seaborn/cm.py
@@ -1,908 +1,1586 @@
 from matplotlib import colors
 from seaborn._compat import register_colormap
-_rocket_lut = [[0.01060815, 0.01808215, 0.10018654], [0.01428972, 
-    0.02048237, 0.10374486], [0.01831941, 0.0229766, 0.10738511], [
-    0.02275049, 0.02554464, 0.11108639], [0.02759119, 0.02818316, 
-    0.11483751], [0.03285175, 0.03088792, 0.11863035], [0.03853466, 
-    0.03365771, 0.12245873], [0.04447016, 0.03648425, 0.12631831], [
-    0.05032105, 0.03936808, 0.13020508], [0.05611171, 0.04224835, 
-    0.13411624], [0.0618531, 0.04504866, 0.13804929], [0.06755457, 
-    0.04778179, 0.14200206], [0.0732236, 0.05045047, 0.14597263], [
-    0.0788708, 0.05305461, 0.14995981], [0.08450105, 0.05559631, 0.15396203
-    ], [0.09011319, 0.05808059, 0.15797687], [0.09572396, 0.06050127, 
-    0.16200507], [0.10132312, 0.06286782, 0.16604287], [0.10692823, 
-    0.06517224, 0.17009175], [0.1125315, 0.06742194, 0.17414848], [
-    0.11813947, 0.06961499, 0.17821272], [0.12375803, 0.07174938, 
-    0.18228425], [0.12938228, 0.07383015, 0.18636053], [0.13501631, 
-    0.07585609, 0.19044109], [0.14066867, 0.0778224, 0.19452676], [
-    0.14633406, 0.07973393, 0.1986151], [0.15201338, 0.08159108, 0.20270523
-    ], [0.15770877, 0.08339312, 0.20679668], [0.16342174, 0.0851396, 
-    0.21088893], [0.16915387, 0.08682996, 0.21498104], [0.17489524, 
-    0.08848235, 0.2190294], [0.18065495, 0.09009031, 0.22303512], [
-    0.18643324, 0.09165431, 0.22699705], [0.19223028, 0.09317479, 
-    0.23091409], [0.19804623, 0.09465217, 0.23478512], [0.20388117, 
-    0.09608689, 0.23860907], [0.20973515, 0.09747934, 0.24238489], [
-    0.21560818, 0.09882993, 0.24611154], [0.22150014, 0.10013944, 0.2497868
-    ], [0.22741085, 0.10140876, 0.25340813], [0.23334047, 0.10263737, 
-    0.25697736], [0.23928891, 0.10382562, 0.2604936], [0.24525608, 
-    0.10497384, 0.26395596], [0.25124182, 0.10608236, 0.26736359], [
-    0.25724602, 0.10715148, 0.27071569], [0.26326851, 0.1081815, 0.27401148
-    ], [0.26930915, 0.1091727, 0.2772502], [0.27536766, 0.11012568, 
-    0.28043021], [0.28144375, 0.11104133, 0.2835489], [0.2875374, 
-    0.11191896, 0.28660853], [0.29364846, 0.11275876, 0.2896085], [
-    0.29977678, 0.11356089, 0.29254823], [0.30592213, 0.11432553, 
-    0.29542718], [0.31208435, 0.11505284, 0.29824485], [0.31826327, 
-    0.1157429, 0.30100076], [0.32445869, 0.11639585, 0.30369448], [
-    0.33067031, 0.11701189, 0.30632563], [0.33689808, 0.11759095, 0.3088938
-    ], [0.34314168, 0.11813362, 0.31139721], [0.34940101, 0.11863987, 
-    0.3138355], [0.355676, 0.11910909, 0.31620996], [0.36196644, 0.1195413,
-    0.31852037], [0.36827206, 0.11993653, 0.32076656], [0.37459292, 
-    0.12029443, 0.32294825], [0.38092887, 0.12061482, 0.32506528], [
-    0.38727975, 0.12089756, 0.3271175], [0.39364518, 0.12114272, 0.32910494
-    ], [0.40002537, 0.12134964, 0.33102734], [0.40642019, 0.12151801, 
-    0.33288464], [0.41282936, 0.12164769, 0.33467689], [0.41925278, 
-    0.12173833, 0.33640407], [0.42569057, 0.12178916, 0.33806605], [
-    0.43214263, 0.12179973, 0.33966284], [0.43860848, 0.12177004, 
-    0.34119475], [0.44508855, 0.12169883, 0.34266151], [0.45158266, 
-    0.12158557, 0.34406324], [0.45809049, 0.12142996, 0.34540024], [
-    0.46461238, 0.12123063, 0.34667231], [0.47114798, 0.12098721, 
-    0.34787978], [0.47769736, 0.12069864, 0.34902273], [0.48426077, 
-    0.12036349, 0.35010104], [0.49083761, 0.11998161, 0.35111537], [
-    0.49742847, 0.11955087, 0.35206533], [0.50403286, 0.11907081, 
-    0.35295152], [0.51065109, 0.11853959, 0.35377385], [0.51728314, 
-    0.1179558, 0.35453252], [0.52392883, 0.11731817, 0.35522789], [
-    0.53058853, 0.11662445, 0.35585982], [0.53726173, 0.11587369, 
-    0.35642903], [0.54394898, 0.11506307, 0.35693521], [0.5506426, 
-    0.11420757, 0.35737863], [0.55734473, 0.11330456, 0.35775059], [
-    0.56405586, 0.11235265, 0.35804813], [0.57077365, 0.11135597, 
-    0.35827146], [0.5774991, 0.11031233, 0.35841679], [0.58422945, 
-    0.10922707, 0.35848469], [0.59096382, 0.10810205, 0.35847347], [
-    0.59770215, 0.10693774, 0.35838029], [0.60444226, 0.10573912, 
-    0.35820487], [0.61118304, 0.10450943, 0.35794557], [0.61792306, 
-    0.10325288, 0.35760108], [0.62466162, 0.10197244, 0.35716891], [
-    0.63139686, 0.10067417, 0.35664819], [0.63812122, 0.09938212, 
-    0.35603757], [0.64483795, 0.0980891, 0.35533555], [0.65154562, 
-    0.09680192, 0.35454107], [0.65824241, 0.09552918, 0.3536529], [
-    0.66492652, 0.09428017, 0.3526697], [0.67159578, 0.09306598, 0.35159077
-    ], [0.67824099, 0.09192342, 0.3504148], [0.684863, 0.09085633, 
-    0.34914061], [0.69146268, 0.0898675, 0.34776864], [0.69803757, 
-    0.08897226, 0.3462986], [0.70457834, 0.0882129, 0.34473046], [
-    0.71108138, 0.08761223, 0.3430635], [0.7175507, 0.08716212, 0.34129974],
-    [0.72398193, 0.08688725, 0.33943958], [0.73035829, 0.0868623, 
-    0.33748452], [0.73669146, 0.08704683, 0.33543669], [0.74297501, 
-    0.08747196, 0.33329799], [0.74919318, 0.08820542, 0.33107204], [
-    0.75535825, 0.08919792, 0.32876184], [0.76145589, 0.09050716, 
-    0.32637117], [0.76748424, 0.09213602, 0.32390525], [0.77344838, 
-    0.09405684, 0.32136808], [0.77932641, 0.09634794, 0.31876642], [
-    0.78513609, 0.09892473, 0.31610488], [0.79085854, 0.10184672, 0.313391],
-    [0.7965014, 0.10506637, 0.31063031], [0.80205987, 0.10858333, 0.30783],
-    [0.80752799, 0.11239964, 0.30499738], [0.81291606, 0.11645784, 
-    0.30213802], [0.81820481, 0.12080606, 0.29926105], [0.82341472, 
-    0.12535343, 0.2963705], [0.82852822, 0.13014118, 0.29347474], [
-    0.83355779, 0.13511035, 0.29057852], [0.83850183, 0.14025098, 0.2876878
-    ], [0.84335441, 0.14556683, 0.28480819], [0.84813096, 0.15099892, 
-    0.281943], [0.85281737, 0.15657772, 0.27909826], [0.85742602, 0.1622583,
-    0.27627462], [0.86196552, 0.16801239, 0.27346473], [0.86641628, 
-    0.17387796, 0.27070818], [0.87079129, 0.17982114, 0.26797378], [
-    0.87507281, 0.18587368, 0.26529697], [0.87925878, 0.19203259, 
-    0.26268136], [0.8833417, 0.19830556, 0.26014181], [0.88731387, 
-    0.20469941, 0.25769539], [0.89116859, 0.21121788, 0.2553592], [
-    0.89490337, 0.21785614, 0.25314362], [0.8985026, 0.22463251, 0.25108745
-    ], [0.90197527, 0.23152063, 0.24918223], [0.90530097, 0.23854541, 
-    0.24748098], [0.90848638, 0.24568473, 0.24598324], [0.911533, 
-    0.25292623, 0.24470258], [0.9144225, 0.26028902, 0.24369359], [
-    0.91717106, 0.26773821, 0.24294137], [0.91978131, 0.27526191, 
-    0.24245973], [0.92223947, 0.28287251, 0.24229568], [0.92456587, 
-    0.29053388, 0.24242622], [0.92676657, 0.29823282, 0.24285536], [
-    0.92882964, 0.30598085, 0.24362274], [0.93078135, 0.31373977, 
-    0.24468803], [0.93262051, 0.3215093, 0.24606461], [0.93435067, 
-    0.32928362, 0.24775328], [0.93599076, 0.33703942, 0.24972157], [
-    0.93752831, 0.34479177, 0.25199928], [0.93899289, 0.35250734, 
-    0.25452808], [0.94036561, 0.36020899, 0.25734661], [0.94167588, 
-    0.36786594, 0.2603949], [0.94291042, 0.37549479, 0.26369821], [
-    0.94408513, 0.3830811, 0.26722004], [0.94520419, 0.39062329, 0.27094924
-    ], [0.94625977, 0.39813168, 0.27489742], [0.94727016, 0.4055909, 
-    0.27902322], [0.94823505, 0.41300424, 0.28332283], [0.94914549, 
-    0.42038251, 0.28780969], [0.95001704, 0.42771398, 0.29244728], [
-    0.95085121, 0.43500005, 0.29722817], [0.95165009, 0.44224144, 
-    0.30214494], [0.9524044, 0.44944853, 0.3072105], [0.95312556, 
-    0.45661389, 0.31239776], [0.95381595, 0.46373781, 0.31769923], [
-    0.95447591, 0.47082238, 0.32310953], [0.95510255, 0.47787236, 
-    0.32862553], [0.95569679, 0.48489115, 0.33421404], [0.95626788, 
-    0.49187351, 0.33985601], [0.95681685, 0.49882008, 0.34555431], [
-    0.9573439, 0.50573243, 0.35130912], [0.95784842, 0.51261283, 0.35711942
-    ], [0.95833051, 0.51946267, 0.36298589], [0.95879054, 0.52628305, 
-    0.36890904], [0.95922872, 0.53307513, 0.3748895], [0.95964538, 
-    0.53983991, 0.38092784], [0.96004345, 0.54657593, 0.3870292], [
-    0.96042097, 0.55328624, 0.39319057], [0.96077819, 0.55997184, 
-    0.39941173], [0.9611152, 0.5666337, 0.40569343], [0.96143273, 
-    0.57327231, 0.41203603], [0.96173392, 0.57988594, 0.41844491], [
-    0.96201757, 0.58647675, 0.42491751], [0.96228344, 0.59304598, 
-    0.43145271], [0.96253168, 0.5995944, 0.43805131], [0.96276513, 
-    0.60612062, 0.44471698], [0.96298491, 0.6126247, 0.45145074], [
-    0.96318967, 0.61910879, 0.45824902], [0.96337949, 0.6255736, 0.46511271
-    ], [0.96355923, 0.63201624, 0.47204746], [0.96372785, 0.63843852, 
-    0.47905028], [0.96388426, 0.64484214, 0.4861196], [0.96403203, 
-    0.65122535, 0.4932578], [0.96417332, 0.65758729, 0.50046894], [
-    0.9643063, 0.66393045, 0.5077467], [0.96443322, 0.67025402, 0.51509334],
-    [0.96455845, 0.67655564, 0.52251447], [0.96467922, 0.68283846, 
-    0.53000231], [0.96479861, 0.68910113, 0.53756026], [0.96492035, 
-    0.69534192, 0.5451917], [0.96504223, 0.7015636, 0.5528892], [0.96516917,
-    0.70776351, 0.5606593], [0.96530224, 0.71394212, 0.56849894], [
-    0.96544032, 0.72010124, 0.57640375], [0.96559206, 0.72623592, 
-    0.58438387], [0.96575293, 0.73235058, 0.59242739], [0.96592829, 
-    0.73844258, 0.60053991], [0.96612013, 0.74451182, 0.60871954], [
-    0.96632832, 0.75055966, 0.61696136], [0.96656022, 0.75658231, 
-    0.62527295], [0.96681185, 0.76258381, 0.63364277], [0.96709183, 
-    0.76855969, 0.64207921], [0.96739773, 0.77451297, 0.65057302], [
-    0.96773482, 0.78044149, 0.65912731], [0.96810471, 0.78634563, 
-    0.66773889], [0.96850919, 0.79222565, 0.6764046], [0.96893132, 
-    0.79809112, 0.68512266], [0.96935926, 0.80395415, 0.69383201], [
-    0.9698028, 0.80981139, 0.70252255], [0.97025511, 0.81566605, 0.71120296
-    ], [0.97071849, 0.82151775, 0.71987163], [0.97120159, 0.82736371, 
-    0.72851999], [0.97169389, 0.83320847, 0.73716071], [0.97220061, 
-    0.83905052, 0.74578903], [0.97272597, 0.84488881, 0.75440141], [
-    0.97327085, 0.85072354, 0.76299805], [0.97383206, 0.85655639, 
-    0.77158353], [0.97441222, 0.86238689, 0.78015619], [0.97501782, 
-    0.86821321, 0.78871034], [0.97564391, 0.87403763, 0.79725261], [
-    0.97628674, 0.87986189, 0.8057883], [0.97696114, 0.88568129, 0.81430324
-    ], [0.97765722, 0.89149971, 0.82280948], [0.97837585, 0.89731727, 
-    0.83130786], [0.97912374, 0.90313207, 0.83979337], [0.979891, 
-    0.90894778, 0.84827858], [0.98067764, 0.91476465, 0.85676611], [
-    0.98137749, 0.92061729, 0.86536915]]
-_mako_lut = [[0.04503935, 0.01482344, 0.02092227], [0.04933018, 0.01709292,
-    0.02535719], [0.05356262, 0.01950702, 0.03018802], [0.05774337, 
-    0.02205989, 0.03545515], [0.06188095, 0.02474764, 0.04115287], [
-    0.06598247, 0.0275665, 0.04691409], [0.07005374, 0.03051278, 0.05264306
-    ], [0.07409947, 0.03358324, 0.05834631], [0.07812339, 0.03677446, 
-    0.06403249], [0.08212852, 0.0400833, 0.06970862], [0.08611731, 
-    0.04339148, 0.07538208], [0.09009161, 0.04664706, 0.08105568], [
-    0.09405308, 0.04985685, 0.08673591], [0.09800301, 0.05302279, 
-    0.09242646], [0.10194255, 0.05614641, 0.09813162], [0.10587261, 
-    0.05922941, 0.103854], [0.1097942, 0.06227277, 0.10959847], [0.11370826,
-    0.06527747, 0.11536893], [0.11761516, 0.06824548, 0.12116393], [
-    0.12151575, 0.07117741, 0.12698763], [0.12541095, 0.07407363, 0.1328442
-    ], [0.12930083, 0.07693611, 0.13873064], [0.13317849, 0.07976988, 
-    0.14465095], [0.13701138, 0.08259683, 0.15060265], [0.14079223, 
-    0.08542126, 0.15659379], [0.14452486, 0.08824175, 0.16262484], [
-    0.14820351, 0.09106304, 0.16869476], [0.15183185, 0.09388372, 
-    0.17480366], [0.15540398, 0.09670855, 0.18094993], [0.15892417, 
-    0.09953561, 0.18713384], [0.16238588, 0.10236998, 0.19335329], [
-    0.16579435, 0.10520905, 0.19960847], [0.16914226, 0.10805832, 
-    0.20589698], [0.17243586, 0.11091443, 0.21221911], [0.17566717, 
-    0.11378321, 0.21857219], [0.17884322, 0.11666074, 0.2249565], [
-    0.18195582, 0.11955283, 0.23136943], [0.18501213, 0.12245547, 
-    0.23781116], [0.18800459, 0.12537395, 0.24427914], [0.19093944, 
-    0.1283047, 0.25077369], [0.19381092, 0.13125179, 0.25729255], [
-    0.19662307, 0.13421303, 0.26383543], [0.19937337, 0.13719028, 
-    0.27040111], [0.20206187, 0.14018372, 0.27698891], [0.20469116, 
-    0.14319196, 0.28359861], [0.20725547, 0.14621882, 0.29022775], [
-    0.20976258, 0.14925954, 0.29687795], [0.21220409, 0.15231929, 
-    0.30354703], [0.21458611, 0.15539445, 0.31023563], [0.21690827, 
-    0.15848519, 0.31694355], [0.21916481, 0.16159489, 0.32366939], [
-    0.2213631, 0.16471913, 0.33041431], [0.22349947, 0.1678599, 0.33717781],
-    [0.2255714, 0.1710185, 0.34395925], [0.22758415, 0.17419169, 0.35075983
-    ], [0.22953569, 0.17738041, 0.35757941], [0.23142077, 0.18058733, 
-    0.3644173], [0.2332454, 0.18380872, 0.37127514], [0.2350092, 0.18704459,
-    0.3781528], [0.23670785, 0.190297, 0.38504973], [0.23834119, 0.19356547,
-    0.39196711], [0.23991189, 0.19684817, 0.39890581], [0.24141903, 
-    0.20014508, 0.4058667], [0.24286214, 0.20345642, 0.4128484], [
-    0.24423453, 0.20678459, 0.41985299], [0.24554109, 0.21012669, 
-    0.42688124], [0.2467815, 0.21348266, 0.43393244], [0.24795393, 
-    0.21685249, 0.4410088], [0.24905614, 0.22023618, 0.448113], [0.25007383,
-    0.22365053, 0.45519562], [0.25098926, 0.22710664, 0.46223892], [
-    0.25179696, 0.23060342, 0.46925447], [0.25249346, 0.23414353, 
-    0.47623196], [0.25307401, 0.23772973, 0.48316271], [0.25353152, 
-    0.24136961, 0.49001976], [0.25386167, 0.24506548, 0.49679407], [
-    0.25406082, 0.2488164, 0.50348932], [0.25412435, 0.25262843, 0.51007843
-    ], [0.25404842, 0.25650743, 0.51653282], [0.25383134, 0.26044852, 
-    0.52286845], [0.2534705, 0.26446165, 0.52903422], [0.25296722, 
-    0.2685428, 0.53503572], [0.2523226, 0.27269346, 0.54085315], [
-    0.25153974, 0.27691629, 0.54645752], [0.25062402, 0.28120467, 
-    0.55185939], [0.24958205, 0.28556371, 0.55701246], [0.24842386, 
-    0.28998148, 0.56194601], [0.24715928, 0.29446327, 0.56660884], [
-    0.24580099, 0.29899398, 0.57104399], [0.24436202, 0.30357852, 
-    0.57519929], [0.24285591, 0.30819938, 0.57913247], [0.24129828, 
-    0.31286235, 0.58278615], [0.23970131, 0.3175495, 0.5862272], [
-    0.23807973, 0.32226344, 0.58941872], [0.23644557, 0.32699241, 
-    0.59240198], [0.2348113, 0.33173196, 0.59518282], [0.23318874, 
-    0.33648036, 0.59775543], [0.2315855, 0.34122763, 0.60016456], [
-    0.23001121, 0.34597357, 0.60240251], [0.2284748, 0.35071512, 0.6044784],
-    [0.22698081, 0.35544612, 0.60642528], [0.22553305, 0.36016515, 
-    0.60825252], [0.22413977, 0.36487341, 0.60994938], [0.22280246, 
-    0.36956728, 0.61154118], [0.22152555, 0.37424409, 0.61304472], [
-    0.22030752, 0.37890437, 0.61446646], [0.2191538, 0.38354668, 0.61581561
-    ], [0.21806257, 0.38817169, 0.61709794], [0.21703799, 0.39277882, 
-    0.61831922], [0.21607792, 0.39736958, 0.61948028], [0.21518463, 
-    0.40194196, 0.62059763], [0.21435467, 0.40649717, 0.62167507], [
-    0.21358663, 0.41103579, 0.62271724], [0.21288172, 0.41555771, 
-    0.62373011], [0.21223835, 0.42006355, 0.62471794], [0.21165312, 
-    0.42455441, 0.62568371], [0.21112526, 0.42903064, 0.6266318], [
-    0.21065161, 0.43349321, 0.62756504], [0.21023306, 0.43794288, 
-    0.62848279], [0.20985996, 0.44238227, 0.62938329], [0.20951045, 
-    0.44680966, 0.63030696], [0.20916709, 0.45122981, 0.63124483], [
-    0.20882976, 0.45564335, 0.63219599], [0.20849798, 0.46005094, 
-    0.63315928], [0.20817199, 0.46445309, 0.63413391], [0.20785149, 
-    0.46885041, 0.63511876], [0.20753716, 0.47324327, 0.63611321], [
-    0.20722876, 0.47763224, 0.63711608], [0.20692679, 0.48201774, 
-    0.63812656], [0.20663156, 0.48640018, 0.63914367], [0.20634336, 
-    0.49078002, 0.64016638], [0.20606303, 0.49515755, 0.6411939], [
-    0.20578999, 0.49953341, 0.64222457], [0.20552612, 0.50390766, 
-    0.64325811], [0.20527189, 0.50828072, 0.64429331], [0.20502868, 
-    0.51265277, 0.64532947], [0.20479718, 0.51702417, 0.64636539], [
-    0.20457804, 0.52139527, 0.64739979], [0.20437304, 0.52576622, 
-    0.64843198], [0.20418396, 0.53013715, 0.64946117], [0.20401238, 
-    0.53450825, 0.65048638], [0.20385896, 0.53887991, 0.65150606], [
-    0.20372653, 0.54325208, 0.65251978], [0.20361709, 0.5476249, 0.6535266],
-    [0.20353258, 0.55199854, 0.65452542], [0.20347472, 0.55637318, 0.655515
-    ], [0.20344718, 0.56074869, 0.65649508], [0.20345161, 0.56512531, 
-    0.65746419], [0.20349089, 0.56950304, 0.65842151], [0.20356842, 
-    0.57388184, 0.65936642], [0.20368663, 0.57826181, 0.66029768], [
-    0.20384884, 0.58264293, 0.6612145], [0.20405904, 0.58702506, 0.66211645
-    ], [0.20431921, 0.59140842, 0.66300179], [0.20463464, 0.59579264, 
-    0.66387079], [0.20500731, 0.60017798, 0.66472159], [0.20544449, 
-    0.60456387, 0.66555409], [0.20596097, 0.60894927, 0.66636568], [
-    0.20654832, 0.61333521, 0.66715744], [0.20721003, 0.61772167, 
-    0.66792838], [0.20795035, 0.62210845, 0.66867802], [0.20877302, 
-    0.62649546, 0.66940555], [0.20968223, 0.63088252, 0.6701105], [
-    0.21068163, 0.63526951, 0.67079211], [0.21177544, 0.63965621, 
-    0.67145005], [0.21298582, 0.64404072, 0.67208182], [0.21430361, 
-    0.64842404, 0.67268861], [0.21572716, 0.65280655, 0.67326978], [
-    0.21726052, 0.65718791, 0.6738255], [0.21890636, 0.66156803, 0.67435491
-    ], [0.220668, 0.66594665, 0.67485792], [0.22255447, 0.67032297, 
-    0.67533374], [0.22458372, 0.67469531, 0.67578061], [0.22673713, 
-    0.67906542, 0.67620044], [0.22901625, 0.6834332, 0.67659251], [
-    0.23142316, 0.68779836, 0.67695703], [0.23395924, 0.69216072, 
-    0.67729378], [0.23663857, 0.69651881, 0.67760151], [0.23946645, 
-    0.70087194, 0.67788018], [0.24242624, 0.70522162, 0.67813088], [
-    0.24549008, 0.70957083, 0.67835215], [0.24863372, 0.71392166, 
-    0.67854868], [0.25187832, 0.71827158, 0.67872193], [0.25524083, 
-    0.72261873, 0.67887024], [0.25870947, 0.72696469, 0.67898912], [
-    0.26229238, 0.73130855, 0.67907645], [0.26604085, 0.73564353, 
-    0.67914062], [0.26993099, 0.73997282, 0.67917264], [0.27397488, 
-    0.74429484, 0.67917096], [0.27822463, 0.74860229, 0.67914468], [
-    0.28264201, 0.75290034, 0.67907959], [0.2873016, 0.75717817, 0.67899164
-    ], [0.29215894, 0.76144162, 0.67886578], [0.29729823, 0.76567816, 
-    0.67871894], [0.30268199, 0.76989232, 0.67853896], [0.30835665, 
-    0.77407636, 0.67833512], [0.31435139, 0.77822478, 0.67811118], [
-    0.3206671, 0.78233575, 0.67786729], [0.32733158, 0.78640315, 0.67761027
-    ], [0.33437168, 0.79042043, 0.67734882], [0.34182112, 0.79437948, 
-    0.67709394], [0.34968889, 0.79827511, 0.67685638], [0.35799244, 
-    0.80210037, 0.67664969], [0.36675371, 0.80584651, 0.67649539], [
-    0.3759816, 0.80950627, 0.67641393], [0.38566792, 0.81307432, 0.67642947
-    ], [0.39579804, 0.81654592, 0.67656899], [0.40634556, 0.81991799, 
-    0.67686215], [0.41730243, 0.82318339, 0.67735255], [0.4285828, 
-    0.82635051, 0.6780564], [0.44012728, 0.82942353, 0.67900049], [
-    0.45189421, 0.83240398, 0.68021733], [0.46378379, 0.83530763, 0.6817062
-    ], [0.47573199, 0.83814472, 0.68347352], [0.48769865, 0.84092197, 
-    0.68552698], [0.49962354, 0.84365379, 0.68783929], [0.5114027, 
-    0.8463718, 0.69029789], [0.52301693, 0.84908401, 0.69288545], [
-    0.53447549, 0.85179048, 0.69561066], [0.54578602, 0.8544913, 0.69848331
-    ], [0.55695565, 0.85718723, 0.70150427], [0.56798832, 0.85987893, 
-    0.70468261], [0.57888639, 0.86256715, 0.70802931], [0.5896541, 
-    0.8652532, 0.71154204], [0.60028928, 0.86793835, 0.71523675], [
-    0.61079441, 0.87062438, 0.71910895], [0.62116633, 0.87331311, 
-    0.72317003], [0.63140509, 0.87600675, 0.72741689], [0.64150735, 
-    0.87870746, 0.73185717], [0.65147219, 0.8814179, 0.73648495], [
-    0.66129632, 0.8841403, 0.74130658], [0.67097934, 0.88687758, 0.74631123
-    ], [0.68051833, 0.88963189, 0.75150483], [0.68991419, 0.89240612, 
-    0.75687187], [0.69916533, 0.89520211, 0.76241714], [0.70827373, 
-    0.89802257, 0.76812286], [0.71723995, 0.90086891, 0.77399039], [
-    0.72606665, 0.90374337, 0.7800041], [0.73475675, 0.90664718, 0.78615802
-    ], [0.74331358, 0.90958151, 0.79244474], [0.75174143, 0.91254787, 
-    0.79884925], [0.76004473, 0.91554656, 0.80536823], [0.76827704, 
-    0.91856549, 0.81196513], [0.77647029, 0.921603, 0.81855729], [
-    0.78462009, 0.92466151, 0.82514119], [0.79273542, 0.92773848, 
-    0.83172131], [0.8008109, 0.93083672, 0.83829355], [0.80885107, 
-    0.93395528, 0.84485982], [0.81685878, 0.9370938, 0.85142101], [
-    0.82483206, 0.94025378, 0.8579751], [0.83277661, 0.94343371, 0.86452477
-    ], [0.84069127, 0.94663473, 0.87106853], [0.84857662, 0.9498573, 
-    0.8776059], [0.8564431, 0.95309792, 0.88414253], [0.86429066, 
-    0.95635719, 0.89067759], [0.87218969, 0.95960708, 0.89725384]]
-_vlag_lut = [[0.13850039, 0.41331206, 0.74052025], [0.15077609, 0.41762684,
-    0.73970427], [0.16235219, 0.4219191, 0.7389667], [0.1733322, 0.42619024,
-    0.73832537], [0.18382538, 0.43044226, 0.73776764], [0.19394034, 
-    0.4346772, 0.73725867], [0.20367115, 0.43889576, 0.73685314], [
-    0.21313625, 0.44310003, 0.73648045], [0.22231173, 0.44729079, 
-    0.73619681], [0.23125148, 0.45146945, 0.73597803], [0.23998101, 
-    0.45563715, 0.7358223], [0.24853358, 0.45979489, 0.73571524], [
-    0.25691416, 0.4639437, 0.73566943], [0.26513894, 0.46808455, 0.73568319
-    ], [0.27322194, 0.47221835, 0.73575497], [0.28117543, 0.47634598, 
-    0.73588332], [0.28901021, 0.48046826, 0.73606686], [0.2967358, 
-    0.48458597, 0.73630433], [0.30436071, 0.48869986, 0.73659451], [
-    0.3118955, 0.49281055, 0.73693255], [0.31935389, 0.49691847, 0.73730851
-    ], [0.32672701, 0.5010247, 0.73774013], [0.33402607, 0.50512971, 
-    0.73821941], [0.34125337, 0.50923419, 0.73874905], [0.34840921, 
-    0.51333892, 0.73933402], [0.35551826, 0.51744353, 0.73994642], [
-    0.3625676, 0.52154929, 0.74060763], [0.36956356, 0.52565656, 0.74131327
-    ], [0.37649902, 0.52976642, 0.74207698], [0.38340273, 0.53387791, 
-    0.74286286], [0.39025859, 0.53799253, 0.7436962], [0.39706821, 
-    0.54211081, 0.744578], [0.40384046, 0.54623277, 0.74549872], [
-    0.41058241, 0.55035849, 0.74645094], [0.41728385, 0.55448919, 
-    0.74745174], [0.42395178, 0.55862494, 0.74849357], [0.4305964, 
-    0.56276546, 0.74956387], [0.4372044, 0.56691228, 0.75068412], [
-    0.4437909, 0.57106468, 0.75183427], [0.45035117, 0.5752235, 0.75302312],
-    [0.45687824, 0.57938983, 0.75426297], [0.46339713, 0.58356191, 
-    0.75551816], [0.46988778, 0.58774195, 0.75682037], [0.47635605, 
-    0.59192986, 0.75816245], [0.48281101, 0.5961252, 0.75953212], [
-    0.4892374, 0.60032986, 0.76095418], [0.49566225, 0.60454154, 0.76238852
-    ], [0.50206137, 0.60876307, 0.76387371], [0.50845128, 0.61299312, 
-    0.76538551], [0.5148258, 0.61723272, 0.76693475], [0.52118385, 
-    0.62148236, 0.76852436], [0.52753571, 0.62574126, 0.77013939], [
-    0.53386831, 0.63001125, 0.77180152], [0.54020159, 0.63429038, 0.7734803
-    ], [0.54651272, 0.63858165, 0.77521306], [0.55282975, 0.64288207, 
-    0.77695608], [0.55912585, 0.64719519, 0.77875327], [0.56542599, 
-    0.65151828, 0.78056551], [0.57170924, 0.65585426, 0.78242747], [
-    0.57799572, 0.6602009, 0.78430751], [0.58426817, 0.66456073, 0.78623458
-    ], [0.590544, 0.66893178, 0.78818117], [0.59680758, 0.67331643, 
-    0.79017369], [0.60307553, 0.67771273, 0.79218572], [0.60934065, 
-    0.68212194, 0.79422987], [0.61559495, 0.68654548, 0.7963202], [
-    0.62185554, 0.69098125, 0.79842918], [0.62810662, 0.69543176, 
-    0.80058381], [0.63436425, 0.69989499, 0.80275812], [0.64061445, 
-    0.70437326, 0.80497621], [0.6468706, 0.70886488, 0.80721641], [
-    0.65312213, 0.7133717, 0.80949719], [0.65937818, 0.71789261, 0.81180392
-    ], [0.66563334, 0.72242871, 0.81414642], [0.67189155, 0.72697967, 
-    0.81651872], [0.67815314, 0.73154569, 0.81892097], [0.68441395, 
-    0.73612771, 0.82136094], [0.69068321, 0.74072452, 0.82382353], [
-    0.69694776, 0.7453385, 0.82633199], [0.70322431, 0.74996721, 0.8288583],
-    [0.70949595, 0.75461368, 0.83143221], [0.7157774, 0.75927574, 
-    0.83402904], [0.72206299, 0.76395461, 0.83665922], [0.72835227, 
-    0.76865061, 0.8393242], [0.73465238, 0.7733628, 0.84201224], [
-    0.74094862, 0.77809393, 0.84474951], [0.74725683, 0.78284158, 
-    0.84750915], [0.75357103, 0.78760701, 0.85030217], [0.75988961, 
-    0.79239077, 0.85313207], [0.76621987, 0.79719185, 0.85598668], [
-    0.77255045, 0.8020125, 0.85888658], [0.77889241, 0.80685102, 0.86181298
-    ], [0.78524572, 0.81170768, 0.86476656], [0.79159841, 0.81658489, 
-    0.86776906], [0.79796459, 0.82148036, 0.8707962], [0.80434168, 
-    0.82639479, 0.87385315], [0.8107221, 0.83132983, 0.87695392], [
-    0.81711301, 0.8362844, 0.88008641], [0.82351479, 0.84125863, 0.88325045
-    ], [0.82992772, 0.84625263, 0.88644594], [0.83634359, 0.85126806, 
-    0.8896878], [0.84277295, 0.85630293, 0.89295721], [0.84921192, 
-    0.86135782, 0.89626076], [0.85566206, 0.866432, 0.89959467], [
-    0.86211514, 0.87152627, 0.90297183], [0.86857483, 0.87663856, 
-    0.90638248], [0.87504231, 0.88176648, 0.90981938], [0.88151194, 
-    0.88690782, 0.91328493], [0.88797938, 0.89205857, 0.91677544], [
-    0.89443865, 0.89721298, 0.9202854], [0.90088204, 0.90236294, 0.92380601
-    ], [0.90729768, 0.90749778, 0.92732797], [0.91367037, 0.91260329, 
-    0.93083814], [0.91998105, 0.91766106, 0.93431861], [0.92620596, 
-    0.92264789, 0.93774647], [0.93231683, 0.9275351, 0.94109192], [
-    0.93827772, 0.9322888, 0.94432312], [0.94404755, 0.93686925, 0.94740137
-    ], [0.94958284, 0.94123072, 0.95027696], [0.95482682, 0.9453245, 
-    0.95291103], [0.9597248, 0.94909728, 0.95525103], [0.96422552, 
-    0.95249273, 0.95723271], [0.96826161, 0.95545812, 0.95882188], [
-    0.97178458, 0.95793984, 0.95995705], [0.97474105, 0.95989142, 
-    0.96059997], [0.97708604, 0.96127366, 0.96071853], [0.97877855, 
-    0.96205832, 0.96030095], [0.97978484, 0.96222949, 0.95935496], [
-    0.9805997, 0.96155216, 0.95813083], [0.98152619, 0.95993719, 0.95639322
-    ], [0.9819726, 0.95766608, 0.95399269], [0.98191855, 0.9547873, 
-    0.95098107], [0.98138514, 0.95134771, 0.94740644], [0.98040845, 
-    0.94739906, 0.94332125], [0.97902107, 0.94300131, 0.93878672], [
-    0.97729348, 0.93820409, 0.93385135], [0.9752533, 0.933073, 0.92858252],
-    [0.97297834, 0.92765261, 0.92302309], [0.97049104, 0.92200317, 
-    0.91723505], [0.96784372, 0.91616744, 0.91126063], [0.96507281, 
-    0.91018664, 0.90514124], [0.96222034, 0.90409203, 0.89890756], [
-    0.9593079, 0.89791478, 0.89259122], [0.95635626, 0.89167908, 0.88621654
-    ], [0.95338303, 0.88540373, 0.87980238], [0.95040174, 0.87910333, 
-    0.87336339], [0.94742246, 0.87278899, 0.86691076], [0.94445249, 
-    0.86646893, 0.86045277], [0.94150476, 0.86014606, 0.85399191], [
-    0.93857394, 0.85382798, 0.84753642], [0.93566206, 0.84751766, 
-    0.84108935], [0.93277194, 0.8412164, 0.83465197], [0.92990106, 
-    0.83492672, 0.82822708], [0.92704736, 0.82865028, 0.82181656], [
-    0.92422703, 0.82238092, 0.81541333], [0.92142581, 0.81612448, 
-    0.80902415], [0.91864501, 0.80988032, 0.80264838], [0.91587578, 
-    0.80365187, 0.79629001], [0.9131367, 0.79743115, 0.78994], [0.91041602,
-    0.79122265, 0.78360361], [0.90771071, 0.78502727, 0.77728196], [
-    0.90501581, 0.77884674, 0.7709771], [0.90235365, 0.77267117, 0.76467793
-    ], [0.8997019, 0.76650962, 0.75839484], [0.89705346, 0.76036481, 
-    0.752131], [0.89444021, 0.75422253, 0.74587047], [0.89183355, 
-    0.74809474, 0.73962689], [0.88923216, 0.74198168, 0.73340061], [
-    0.88665892, 0.73587283, 0.72717995], [0.88408839, 0.72977904, 
-    0.72097718], [0.88153537, 0.72369332, 0.71478461], [0.87899389, 
-    0.7176179, 0.70860487], [0.87645157, 0.71155805, 0.7024439], [0.8739399,
-    0.70549893, 0.6962854], [0.87142626, 0.6994551, 0.69014561], [0.8689268,
-    0.69341868, 0.68401597], [0.86643562, 0.687392, 0.67789917], [
-    0.86394434, 0.68137863, 0.67179927], [0.86147586, 0.67536728, 0.665704],
-    [0.85899928, 0.66937226, 0.6596292], [0.85654668, 0.66337773, 0.6535577
-    ], [0.85408818, 0.65739772, 0.64750494], [0.85164413, 0.65142189, 
-    0.64145983], [0.84920091, 0.6454565, 0.63542932], [0.84676427, 
-    0.63949827, 0.62941], [0.84433231, 0.63354773, 0.62340261], [0.84190106,
-    0.62760645, 0.61740899], [0.83947935, 0.62166951, 0.61142404], [
-    0.8370538, 0.61574332, 0.60545478], [0.83463975, 0.60981951, 0.59949247
-    ], [0.83221877, 0.60390724, 0.593547], [0.82980985, 0.59799607, 
-    0.58760751], [0.82740268, 0.59209095, 0.58167944], [0.82498638, 
-    0.5861973, 0.57576866], [0.82258181, 0.5803034, 0.56986307], [
-    0.82016611, 0.57442123, 0.56397539], [0.81776305, 0.56853725, 
-    0.55809173], [0.81534551, 0.56266602, 0.55222741], [0.81294293, 
-    0.55679056, 0.5463651], [0.81052113, 0.55092973, 0.54052443], [
-    0.80811509, 0.54506305, 0.53468464], [0.80568952, 0.53921036, 
-    0.52886622], [0.80327506, 0.53335335, 0.52305077], [0.80084727, 
-    0.52750583, 0.51725256], [0.79842217, 0.5216578, 0.51146173], [
-    0.79599382, 0.51581223, 0.50568155], [0.79355781, 0.50997127, 
-    0.49991444], [0.79112596, 0.50412707, 0.49415289], [0.78867442, 
-    0.49829386, 0.48841129], [0.7862306, 0.49245398, 0.48267247], [
-    0.7837687, 0.48662309, 0.47695216], [0.78130809, 0.4807883, 0.47123805],
-    [0.77884467, 0.47495151, 0.46553236], [0.77636283, 0.46912235, 
-    0.45984473], [0.77388383, 0.46328617, 0.45416141], [0.77138912, 
-    0.45745466, 0.44849398], [0.76888874, 0.45162042, 0.44283573], [
-    0.76638802, 0.44577901, 0.43718292], [0.76386116, 0.43994762, 
-    0.43155211], [0.76133542, 0.43410655, 0.42592523], [0.75880631, 
-    0.42825801, 0.42030488], [0.75624913, 0.42241905, 0.41470727], [
-    0.7536919, 0.41656866, 0.40911347], [0.75112748, 0.41071104, 0.40352792
-    ], [0.74854331, 0.40485474, 0.3979589], [0.74594723, 0.39899309, 
-    0.39240088], [0.74334332, 0.39312199, 0.38685075], [0.74073277, 
-    0.38723941, 0.3813074], [0.73809409, 0.38136133, 0.37578553], [
-    0.73544692, 0.37547129, 0.37027123], [0.73278943, 0.36956954, 
-    0.36476549], [0.73011829, 0.36365761, 0.35927038], [0.72743485, 
-    0.35773314, 0.35378465], [0.72472722, 0.35180504, 0.34831662], [
-    0.72200473, 0.34586421, 0.34285937], [0.71927052, 0.33990649, 
-    0.33741033], [0.71652049, 0.33393396, 0.33197219], [0.71375362, 
-    0.32794602, 0.32654545], [0.71096951, 0.32194148, 0.32113016], [
-    0.70816772, 0.31591904, 0.31572637], [0.70534784, 0.30987734, 
-    0.31033414], [0.70250944, 0.30381489, 0.30495353], [0.69965211, 
-    0.2977301, 0.2995846], [0.6967754, 0.29162126, 0.29422741], [0.69388446,
-    0.28548074, 0.28887769], [0.69097561, 0.2793096, 0.28353795], [
-    0.68803513, 0.27311993, 0.27821876], [0.6850794, 0.26689144, 0.27290694
-    ], [0.682108, 0.26062114, 0.26760246], [0.67911013, 0.2543177, 
-    0.26231367], [0.67609393, 0.24796818, 0.25703372], [0.67305921, 
-    0.24156846, 0.25176238], [0.67000176, 0.23511902, 0.24650278], [
-    0.66693423, 0.22859879, 0.24124404], [0.6638441, 0.22201742, 0.2359961],
-    [0.66080672, 0.21526712, 0.23069468]]
-_icefire_lut = [[0.73936227, 0.90443867, 0.85757238], [0.72888063, 
-    0.89639109, 0.85488394], [0.71834255, 0.88842162, 0.8521605], [
-    0.70773866, 0.88052939, 0.849422], [0.69706215, 0.87271313, 0.84668315],
-    [0.68629021, 0.86497329, 0.84398721], [0.67543654, 0.85730617, 
-    0.84130969], [0.66448539, 0.84971123, 0.83868005], [0.65342679, 
-    0.84218728, 0.83611512], [0.64231804, 0.83471867, 0.83358584], [
-    0.63117745, 0.827294, 0.83113431], [0.62000484, 0.81991069, 0.82876741],
-    [0.60879435, 0.81256797, 0.82648905], [0.59754118, 0.80526458, 
-    0.82430414], [0.58624247, 0.79799884, 0.82221573], [0.57489525, 
-    0.7907688, 0.82022901], [0.56349779, 0.78357215, 0.81834861], [
-    0.55204294, 0.77640827, 0.81657563], [0.54052516, 0.76927562, 
-    0.81491462], [0.52894085, 0.76217215, 0.81336913], [0.51728854, 
-    0.75509528, 0.81194156], [0.50555676, 0.74804469, 0.81063503], [
-    0.49373871, 0.7410187, 0.80945242], [0.48183174, 0.73401449, 0.80839675
-    ], [0.46982587, 0.72703075, 0.80747097], [0.45770893, 0.72006648, 
-    0.80667756], [0.44547249, 0.71311941, 0.80601991], [0.43318643, 
-    0.70617126, 0.80549278], [0.42110294, 0.69916972, 0.80506683], [
-    0.40925101, 0.69211059, 0.80473246], [0.3976693, 0.68498786, 0.80448272
-    ], [0.38632002, 0.67781125, 0.80431024], [0.37523981, 0.67057537, 
-    0.80420832], [0.36442578, 0.66328229, 0.80417474], [0.35385939, 
-    0.65593699, 0.80420591], [0.34358916, 0.64853177, 0.8043], [0.33355526,
-    0.64107876, 0.80445484], [0.32383062, 0.63356578, 0.80467091], [
-    0.31434372, 0.62600624, 0.8049475], [0.30516161, 0.618389, 0.80528692],
-    [0.29623491, 0.61072284, 0.80569021], [0.28759072, 0.60300319, 
-    0.80616055], [0.27923924, 0.59522877, 0.80669803], [0.27114651, 
-    0.5874047, 0.80730545], [0.26337153, 0.57952055, 0.80799113], [
-    0.25588696, 0.57157984, 0.80875922], [0.248686, 0.56358255, 0.80961366],
-    [0.24180668, 0.55552289, 0.81055123], [0.23526251, 0.54739477, 
-    0.8115939], [0.22921445, 0.53918506, 0.81267292], [0.22397687, 
-    0.53086094, 0.8137141], [0.21977058, 0.52241482, 0.81457651], [
-    0.21658989, 0.51384321, 0.81528511], [0.21452772, 0.50514155, 
-    0.81577278], [0.21372783, 0.49630865, 0.81589566], [0.21409503, 
-    0.48734861, 0.81566163], [0.2157176, 0.47827123, 0.81487615], [
-    0.21842857, 0.46909168, 0.81351614], [0.22211705, 0.45983212, 
-    0.81146983], [0.22665681, 0.45052233, 0.80860217], [0.23176013, 
-    0.44119137, 0.80494325], [0.23727775, 0.43187704, 0.80038017], [
-    0.24298285, 0.42261123, 0.79493267], [0.24865068, 0.41341842, 
-    0.78869164], [0.25423116, 0.40433127, 0.78155831], [0.25950239, 
-    0.39535521, 0.77376848], [0.2644736, 0.38651212, 0.76524809], [
-    0.26901584, 0.37779582, 0.75621942], [0.27318141, 0.36922056, 0.746605],
-    [0.27690355, 0.3607736, 0.73659374], [0.28023585, 0.35244234, 
-    0.72622103], [0.28306009, 0.34438449, 0.71500731], [0.28535896, 
-    0.33660243, 0.70303975], [0.28708711, 0.32912157, 0.69034504], [
-    0.28816354, 0.32200604, 0.67684067], [0.28862749, 0.31519824, 
-    0.66278813], [0.28847904, 0.30869064, 0.6482815], [0.28770912, 
-    0.30250126, 0.63331265], [0.28640325, 0.29655509, 0.61811374], [
-    0.28458943, 0.29082155, 0.60280913], [0.28233561, 0.28527482, 
-    0.58742866], [0.27967038, 0.2798938, 0.57204225], [0.27665361, 
-    0.27465357, 0.55667809], [0.27332564, 0.2695165, 0.54145387], [
-    0.26973851, 0.26447054, 0.52634916], [0.2659204, 0.25949691, 0.511417],
-    [0.26190145, 0.25458123, 0.49668768], [0.2577151, 0.24971691, 
-    0.48214874], [0.25337618, 0.24490494, 0.46778758], [0.24890842, 
-    0.24013332, 0.45363816], [0.24433654, 0.23539226, 0.4397245], [
-    0.23967922, 0.23067729, 0.4260591], [0.23495608, 0.22598894, 0.41262952
-    ], [0.23018113, 0.22132414, 0.39945577], [0.22534609, 0.21670847, 
-    0.38645794], [0.22048761, 0.21211723, 0.37372555], [0.2156198, 
-    0.20755389, 0.36125301], [0.21074637, 0.20302717, 0.34903192], [
-    0.20586893, 0.19855368, 0.33701661], [0.20101757, 0.19411573, 
-    0.32529173], [0.19619947, 0.18972425, 0.31383846], [0.19140726, 
-    0.18540157, 0.30260777], [0.1866769, 0.1811332, 0.29166583], [
-    0.18201285, 0.17694992, 0.28088776], [0.17745228, 0.17282141, 
-    0.27044211], [0.17300684, 0.16876921, 0.26024893], [0.16868273, 
-    0.16479861, 0.25034479], [0.16448691, 0.16091728, 0.24075373], [
-    0.16043195, 0.15714351, 0.23141745], [0.15652427, 0.15348248, 
-    0.22238175], [0.15277065, 0.14994111, 0.21368395], [0.14918274, 
-    0.14653431, 0.20529486], [0.14577095, 0.14327403, 0.19720829], [
-    0.14254381, 0.14016944, 0.18944326], [0.13951035, 0.13723063, 
-    0.18201072], [0.13667798, 0.13446606, 0.17493774], [0.13405762, 
-    0.13188822, 0.16820842], [0.13165767, 0.12950667, 0.16183275], [
-    0.12948748, 0.12733187, 0.15580631], [0.12755435, 0.1253723, 0.15014098
-    ], [0.12586516, 0.12363617, 0.1448459], [0.12442647, 0.12213143, 
-    0.13992571], [0.12324241, 0.12086419, 0.13539995], [0.12232067, 
-    0.11984278, 0.13124644], [0.12166209, 0.11907077, 0.12749671], [
-    0.12126982, 0.11855309, 0.12415079], [0.12114244, 0.11829179, 0.1212385
-    ], [0.12127766, 0.11828837, 0.11878534], [0.12284806, 0.1179729, 
-    0.11772022], [0.12619498, 0.11721796, 0.11770203], [0.129968, 
-    0.11663788, 0.11792377], [0.13410011, 0.11625146, 0.11839138], [
-    0.13855459, 0.11606618, 0.11910584], [0.14333775, 0.11607038, 0.1200606
-    ], [0.148417, 0.11626929, 0.12125453], [0.15377389, 0.11666192, 
-    0.12268364], [0.15941427, 0.11723486, 0.12433911], [0.16533376, 
-    0.11797856, 0.12621303], [0.17152547, 0.11888403, 0.12829735], [
-    0.17797765, 0.11994436, 0.13058435], [0.18468769, 0.12114722, 
-    0.13306426], [0.19165663, 0.12247737, 0.13572616], [0.19884415, 
-    0.12394381, 0.1385669], [0.20627181, 0.12551883, 0.14157124], [
-    0.21394877, 0.12718055, 0.14472604], [0.22184572, 0.12893119, 
-    0.14802579], [0.22994394, 0.13076731, 0.15146314], [0.23823937, 
-    0.13267611, 0.15502793], [0.24676041, 0.13462172, 0.15870321], [
-    0.25546457, 0.13661751, 0.16248722], [0.26433628, 0.13865956, 
-    0.16637301], [0.27341345, 0.14070412, 0.17034221], [0.28264773, 
-    0.14277192, 0.1743957], [0.29202272, 0.14486161, 0.17852793], [
-    0.30159648, 0.14691224, 0.1827169], [0.31129002, 0.14897583, 0.18695213
-    ], [0.32111555, 0.15103351, 0.19119629], [0.33107961, 0.1530674, 
-    0.19543758], [0.34119892, 0.15504762, 0.1996803], [0.35142388, 
-    0.15701131, 0.20389086], [0.36178937, 0.1589124, 0.20807639], [
-    0.37229381, 0.16073993, 0.21223189], [0.38288348, 0.16254006, 0.2163249
-    ], [0.39359592, 0.16426336, 0.22036577], [0.40444332, 0.16588767, 
-    0.22434027], [0.41537995, 0.16745325, 0.2282297], [0.42640867, 
-    0.16894939, 0.23202755], [0.43754706, 0.17034847, 0.23572899], [
-    0.44878564, 0.1716535, 0.23932344], [0.4601126, 0.17287365, 0.24278607],
-    [0.47151732, 0.17401641, 0.24610337], [0.48300689, 0.17506676, 
-    0.2492737], [0.49458302, 0.17601892, 0.25227688], [0.50623876, 
-    0.17687777, 0.255096], [0.5179623, 0.17765528, 0.2577162], [0.52975234,
-    0.17835232, 0.2601134], [0.54159776, 0.17898292, 0.26226847], [
-    0.55348804, 0.17956232, 0.26416003], [0.56541729, 0.18010175, 
-    0.26575971], [0.57736669, 0.180631, 0.26704888], [0.58932081, 
-    0.18117827, 0.26800409], [0.60127582, 0.18175888, 0.26858488], [
-    0.61319563, 0.1824336, 0.2687872], [0.62506376, 0.18324015, 0.26858301],
-    [0.63681202, 0.18430173, 0.26795276], [0.64842603, 0.18565472, 
-    0.26689463], [0.65988195, 0.18734638, 0.26543435], [0.67111966, 
-    0.18948885, 0.26357955], [0.68209194, 0.19216636, 0.26137175], [
-    0.69281185, 0.19535326, 0.25887063], [0.70335022, 0.19891271, 
-    0.25617971], [0.71375229, 0.20276438, 0.25331365], [0.72401436, 
-    0.20691287, 0.25027366], [0.73407638, 0.21145051, 0.24710661], [
-    0.74396983, 0.21631913, 0.24380715], [0.75361506, 0.22163653, 
-    0.24043996], [0.7630579, 0.22731637, 0.23700095], [0.77222228, 
-    0.23346231, 0.23356628], [0.78115441, 0.23998404, 0.23013825], [
-    0.78979746, 0.24694858, 0.22678822], [0.79819286, 0.25427223, 
-    0.22352658], [0.80630444, 0.26198807, 0.22040877], [0.81417437, 
-    0.27001406, 0.21744645], [0.82177364, 0.27837336, 0.21468316], [
-    0.82915955, 0.28696963, 0.21210766], [0.83628628, 0.2958499, 0.20977813
-    ], [0.84322168, 0.30491136, 0.20766435], [0.84995458, 0.31415945, 
-    0.2057863], [0.85648867, 0.32358058, 0.20415327], [0.86286243, 
-    0.33312058, 0.20274969], [0.86908321, 0.34276705, 0.20157271], [
-    0.87512876, 0.3525416, 0.20064949], [0.88100349, 0.36243385, 0.19999078
-    ], [0.8866469, 0.37249496, 0.1997976], [0.89203964, 0.38273475, 
-    0.20013431], [0.89713496, 0.39318156, 0.20121514], [0.90195099, 
-    0.40380687, 0.20301555], [0.90648379, 0.41460191, 0.20558847], [
-    0.9106967, 0.42557857, 0.20918529], [0.91463791, 0.43668557, 0.21367954
-    ], [0.91830723, 0.44790913, 0.21916352], [0.92171507, 0.45922856, 
-    0.22568002], [0.92491786, 0.4705936, 0.23308207], [0.92790792, 
-    0.48200153, 0.24145932], [0.93073701, 0.49341219, 0.25065486], [
-    0.93343918, 0.5048017, 0.26056148], [0.93602064, 0.51616486, 0.27118485
-    ], [0.93850535, 0.52748892, 0.28242464], [0.94092933, 0.53875462, 
-    0.29416042], [0.94330011, 0.5499628, 0.30634189], [0.94563159, 
-    0.56110987, 0.31891624], [0.94792955, 0.57219822, 0.33184256], [
-    0.95020929, 0.5832232, 0.34508419], [0.95247324, 0.59419035, 0.35859866
-    ], [0.95471709, 0.60510869, 0.37236035], [0.95698411, 0.61595766, 
-    0.38629631], [0.95923863, 0.62676473, 0.40043317], [0.9615041, 
-    0.6375203, 0.41474106], [0.96371553, 0.64826619, 0.42928335], [
-    0.96591497, 0.65899621, 0.44380444], [0.96809871, 0.66971662, 
-    0.45830232], [0.9702495, 0.6804394, 0.47280492], [0.9723881, 0.69115622,
-    0.48729272], [0.97450723, 0.70187358, 0.50178034], [0.9766108, 0.712592,
-    0.51626837], [0.97871716, 0.72330511, 0.53074053], [0.98082222, 
-    0.73401769, 0.54520694], [0.9829001, 0.74474445, 0.5597019], [
-    0.98497466, 0.75547635, 0.57420239], [0.98705581, 0.76621129, 
-    0.58870185], [0.98913325, 0.77695637, 0.60321626], [0.99119918, 
-    0.78771716, 0.61775821], [0.9932672, 0.79848979, 0.63231691], [
-    0.99535958, 0.80926704, 0.64687278], [0.99740544, 0.82008078, 
-    0.66150571], [0.9992197, 0.83100723, 0.6764127]]
-_flare_lut = [[0.92907237, 0.68878959, 0.50411509], [0.92891402, 0.68494686,
-    0.50173994], [0.92864754, 0.68116207, 0.4993754], [0.92836112, 
-    0.67738527, 0.49701572], [0.9280599, 0.67361354, 0.49466044], [
-    0.92775569, 0.66983999, 0.49230866], [0.9274375, 0.66607098, 0.48996097
-    ], [0.927111, 0.66230315, 0.48761688], [0.92677996, 0.6585342, 0.485276
-    ], [0.92644317, 0.65476476, 0.48293832], [0.92609759, 0.65099658, 
-    0.48060392], [0.925747, 0.64722729, 0.47827244], [0.92539502, 
-    0.64345456, 0.47594352], [0.92503106, 0.6396848, 0.47361782], [
-    0.92466877, 0.6359095, 0.47129427], [0.92429828, 0.63213463, 0.46897349
-    ], [0.92392172, 0.62835879, 0.46665526], [0.92354597, 0.62457749, 
-    0.46433898], [0.9231622, 0.6207962, 0.46202524], [0.92277222, 
-    0.61701365, 0.45971384], [0.92237978, 0.61322733, 0.45740444], [
-    0.92198615, 0.60943622, 0.45509686], [0.92158735, 0.60564276, 
-    0.45279137], [0.92118373, 0.60184659, 0.45048789], [0.92077582, 
-    0.59804722, 0.44818634], [0.92036413, 0.59424414, 0.44588663], [
-    0.91994924, 0.5904368, 0.44358868], [0.91952943, 0.58662619, 0.4412926],
-    [0.91910675, 0.58281075, 0.43899817], [0.91868096, 0.57899046, 
-    0.4367054], [0.91825103, 0.57516584, 0.43441436], [0.91781857, 
-    0.57133556, 0.43212486], [0.9173814, 0.56750099, 0.4298371], [
-    0.91694139, 0.56366058, 0.42755089], [0.91649756, 0.55981483, 
-    0.42526631], [0.91604942, 0.55596387, 0.42298339], [0.9155979, 
-    0.55210684, 0.42070204], [0.9151409, 0.54824485, 0.4184247], [
-    0.91466138, 0.54438817, 0.41617858], [0.91416896, 0.54052962, 
-    0.41396347], [0.91366559, 0.53666778, 0.41177769], [0.91315173, 
-    0.53280208, 0.40962196], [0.91262605, 0.52893336, 0.40749715], [
-    0.91208866, 0.52506133, 0.40540404], [0.91153952, 0.52118582, 
-    0.40334346], [0.91097732, 0.51730767, 0.4013163], [0.910403, 0.51342591,
-    0.39932342], [0.90981494, 0.50954168, 0.39736571], [0.90921368, 
-    0.5056543, 0.39544411], [0.90859797, 0.50176463, 0.39355952], [
-    0.90796841, 0.49787195, 0.39171297], [0.90732341, 0.4939774, 0.38990532
-    ], [0.90666382, 0.49008006, 0.38813773], [0.90598815, 0.486181, 
-    0.38641107], [0.90529624, 0.48228017, 0.38472641], [0.90458808, 
-    0.47837738, 0.38308489], [0.90386248, 0.47447348, 0.38148746], [
-    0.90311921, 0.4705685, 0.37993524], [0.90235809, 0.46666239, 0.37842943
-    ], [0.90157824, 0.46275577, 0.37697105], [0.90077904, 0.45884905, 
-    0.37556121], [0.89995995, 0.45494253, 0.37420106], [0.89912041, 
-    0.4510366, 0.37289175], [0.8982602, 0.44713126, 0.37163458], [
-    0.89737819, 0.44322747, 0.37043052], [0.89647387, 0.43932557, 
-    0.36928078], [0.89554477, 0.43542759, 0.36818855], [0.89458871, 
-    0.4315354, 0.36715654], [0.89360794, 0.42764714, 0.36618273], [
-    0.89260152, 0.42376366, 0.36526813], [0.8915687, 0.41988565, 0.36441384
-    ], [0.89050882, 0.41601371, 0.36362102], [0.8894159, 0.41215334, 
-    0.36289639], [0.888292, 0.40830288, 0.36223756], [0.88713784, 
-    0.40446193, 0.36164328], [0.88595253, 0.40063149, 0.36111438], [
-    0.88473115, 0.39681635, 0.3606566], [0.88347246, 0.39301805, 0.36027074
-    ], [0.88217931, 0.38923439, 0.35995244], [0.880851, 0.38546632, 
-    0.35970244], [0.87947728, 0.38172422, 0.35953127], [0.87806542, 
-    0.37800172, 0.35942941], [0.87661509, 0.37429964, 0.35939659], [
-    0.87511668, 0.37062819, 0.35944178], [0.87357554, 0.36698279, 
-    0.35955811], [0.87199254, 0.3633634, 0.35974223], [0.87035691, 
-    0.35978174, 0.36000516], [0.86867647, 0.35623087, 0.36033559], [
-    0.86694949, 0.35271349, 0.36073358], [0.86516775, 0.34923921, 
-    0.36120624], [0.86333996, 0.34580008, 0.36174113], [0.86145909, 
-    0.3424046, 0.36234402], [0.85952586, 0.33905327, 0.36301129], [
-    0.85754536, 0.33574168, 0.36373567], [0.855514, 0.33247568, 0.36451271],
-    [0.85344392, 0.32924217, 0.36533344], [0.8513284, 0.32604977, 
-    0.36620106], [0.84916723, 0.32289973, 0.36711424], [0.84696243, 
-    0.31979068, 0.36806976], [0.84470627, 0.31673295, 0.36907066], [
-    0.84240761, 0.31371695, 0.37010969], [0.84005337, 0.31075974, 
-    0.37119284], [0.83765537, 0.30784814, 0.3723105], [0.83520234, 
-    0.30499724, 0.37346726], [0.83270291, 0.30219766, 0.37465552], [
-    0.83014895, 0.29946081, 0.37587769], [0.82754694, 0.29677989, 
-    0.37712733], [0.82489111, 0.29416352, 0.37840532], [0.82218644, 
-    0.29160665, 0.37970606], [0.81942908, 0.28911553, 0.38102921], [
-    0.81662276, 0.28668665, 0.38236999], [0.81376555, 0.28432371, 0.383727],
-    [0.81085964, 0.28202508, 0.38509649], [0.8079055, 0.27979128, 
-    0.38647583], [0.80490309, 0.27762348, 0.3878626], [0.80185613, 
-    0.2755178, 0.38925253], [0.79876118, 0.27347974, 0.39064559], [
-    0.79562644, 0.27149928, 0.39203532], [0.79244362, 0.2695883, 0.39342447
-    ], [0.78922456, 0.26773176, 0.3948046], [0.78596161, 0.26594053, 
-    0.39617873], [0.7826624, 0.26420493, 0.39754146], [0.77932717, 
-    0.26252522, 0.39889102], [0.77595363, 0.2609049, 0.4002279], [
-    0.77254999, 0.25933319, 0.40154704], [0.76911107, 0.25781758, 
-    0.40284959], [0.76564158, 0.25635173, 0.40413341], [0.76214598, 
-    0.25492998, 0.40539471], [0.75861834, 0.25356035, 0.40663694], [
-    0.75506533, 0.25223402, 0.40785559], [0.75148963, 0.2509473, 0.40904966
-    ], [0.74788835, 0.24970413, 0.41022028], [0.74426345, 0.24850191, 
-    0.41136599], [0.74061927, 0.24733457, 0.41248516], [0.73695678, 
-    0.24620072, 0.41357737], [0.73327278, 0.24510469, 0.41464364], [
-    0.72957096, 0.24404127, 0.4156828], [0.72585394, 0.24300672, 0.41669383
-    ], [0.7221226, 0.24199971, 0.41767651], [0.71837612, 0.24102046, 
-    0.41863486], [0.71463236, 0.24004289, 0.41956983], [0.7108932, 
-    0.23906316, 0.42048681], [0.70715842, 0.23808142, 0.42138647], [
-    0.70342811, 0.2370976, 0.42226844], [0.69970218, 0.23611179, 0.42313282
-    ], [0.69598055, 0.2351247, 0.42397678], [0.69226314, 0.23413578, 
-    0.42480327], [0.68854988, 0.23314511, 0.42561234], [0.68484064, 
-    0.23215279, 0.42640419], [0.68113541, 0.23115942, 0.42717615], [
-    0.67743412, 0.23016472, 0.42792989], [0.67373662, 0.22916861, 
-    0.42866642], [0.67004287, 0.22817117, 0.42938576], [0.66635279, 
-    0.22717328, 0.43008427], [0.66266621, 0.22617435, 0.43076552], [
-    0.65898313, 0.22517434, 0.43142956], [0.65530349, 0.22417381, 
-    0.43207427], [0.65162696, 0.22317307, 0.4327001], [0.64795375, 
-    0.22217149, 0.43330852], [0.64428351, 0.22116972, 0.43389854], [
-    0.64061624, 0.22016818, 0.43446845], [0.63695183, 0.21916625, 
-    0.43502123], [0.63329016, 0.21816454, 0.43555493], [0.62963102, 
-    0.2171635, 0.43606881], [0.62597451, 0.21616235, 0.43656529], [
-    0.62232019, 0.21516239, 0.43704153], [0.61866821, 0.21416307, 
-    0.43749868], [0.61501835, 0.21316435, 0.43793808], [0.61137029, 
-    0.21216761, 0.4383556], [0.60772426, 0.2111715, 0.43875552], [
-    0.60407977, 0.21017746, 0.43913439], [0.60043678, 0.20918503, 
-    0.43949412], [0.59679524, 0.20819447, 0.43983393], [0.59315487, 
-    0.20720639, 0.44015254], [0.58951566, 0.20622027, 0.44045213], [
-    0.58587715, 0.20523751, 0.44072926], [0.5822395, 0.20425693, 0.44098758
-    ], [0.57860222, 0.20328034, 0.44122241], [0.57496549, 0.20230637, 
-    0.44143805], [0.57132875, 0.20133689, 0.4416298], [0.56769215, 
-    0.20037071, 0.44180142], [0.5640552, 0.19940936, 0.44194923], [
-    0.56041794, 0.19845221, 0.44207535], [0.55678004, 0.1975, 0.44217824],
-    [0.55314129, 0.19655316, 0.44225723], [0.54950166, 0.19561118, 
-    0.44231412], [0.54585987, 0.19467771, 0.44234111], [0.54221157, 
-    0.19375869, 0.44233698], [0.5385549, 0.19285696, 0.44229959], [
-    0.5348913, 0.19197036, 0.44222958], [0.53122177, 0.1910974, 0.44212735],
-    [0.52754464, 0.19024042, 0.44199159], [0.52386353, 0.18939409, 
-    0.44182449], [0.52017476, 0.18856368, 0.44162345], [0.51648277, 
-    0.18774266, 0.44139128], [0.51278481, 0.18693492, 0.44112605], [
-    0.50908361, 0.18613639, 0.4408295], [0.50537784, 0.18534893, 0.44050064
-    ], [0.50166912, 0.18457008, 0.44014054], [0.49795686, 0.18380056, 
-    0.43974881], [0.49424218, 0.18303865, 0.43932623], [0.49052472, 
-    0.18228477, 0.43887255], [0.48680565, 0.1815371, 0.43838867], [
-    0.48308419, 0.18079663, 0.43787408], [0.47936222, 0.18006056, 
-    0.43733022], [0.47563799, 0.17933127, 0.43675585], [0.47191466, 
-    0.17860416, 0.43615337], [0.46818879, 0.17788392, 0.43552047], [
-    0.46446454, 0.17716458, 0.43486036], [0.46073893, 0.17645017, 
-    0.43417097], [0.45701462, 0.17573691, 0.43345429], [0.45329097, 
-    0.17502549, 0.43271025], [0.44956744, 0.17431649, 0.4319386], [
-    0.44584668, 0.17360625, 0.43114133], [0.44212538, 0.17289906, 
-    0.43031642], [0.43840678, 0.17219041, 0.42946642], [0.43469046, 
-    0.17148074, 0.42859124], [0.4309749, 0.17077192, 0.42769008], [
-    0.42726297, 0.17006003, 0.42676519], [0.42355299, 0.16934709, 
-    0.42581586], [0.41984535, 0.16863258, 0.42484219], [0.41614149, 
-    0.16791429, 0.42384614], [0.41244029, 0.16719372, 0.42282661], [
-    0.40874177, 0.16647061, 0.42178429], [0.40504765, 0.16574261, 
-    0.42072062], [0.401357, 0.16501079, 0.41963528], [0.397669, 0.16427607,
-    0.418528], [0.39398585, 0.16353554, 0.41740053], [0.39030735, 
-    0.16278924, 0.41625344], [0.3866314, 0.16203977, 0.41508517], [
-    0.38295904, 0.16128519, 0.41389849], [0.37928736, 0.16052483, 
-    0.41270599], [0.37562649, 0.15974704, 0.41151182], [0.37197803, 
-    0.15895049, 0.41031532], [0.36833779, 0.15813871, 0.40911916], [
-    0.36470944, 0.15730861, 0.40792149], [0.36109117, 0.15646169, 
-    0.40672362], [0.35748213, 0.15559861, 0.40552633], [0.353885, 
-    0.15471714, 0.40432831], [0.35029682, 0.15381967, 0.4031316], [
-    0.34671861, 0.1529053, 0.40193587], [0.34315191, 0.15197275, 0.40074049
-    ], [0.33959331, 0.15102466, 0.3995478], [0.33604378, 0.15006017, 
-    0.39835754], [0.33250529, 0.14907766, 0.39716879], [0.32897621, 
-    0.14807831, 0.39598285], [0.3254559, 0.14706248, 0.39480044], [
-    0.32194567, 0.14602909, 0.39362106], [0.31844477, 0.14497857, 
-    0.39244549], [0.31494974, 0.14391333, 0.39127626], [0.31146605, 
-    0.14282918, 0.39011024], [0.30798857, 0.1417297, 0.38895105], [
-    0.30451661, 0.14061515, 0.38779953], [0.30105136, 0.13948445, 
-    0.38665531], [0.2975886, 0.1383403, 0.38552159], [0.29408557, 
-    0.13721193, 0.38442775]]
-_crest_lut = [[0.6468274, 0.80289262, 0.56592265], [0.64233318, 0.80081141,
-    0.56639461], [0.63791969, 0.7987162, 0.56674976], [0.6335316, 
-    0.79661833, 0.56706128], [0.62915226, 0.7945212, 0.56735066], [
-    0.62477862, 0.79242543, 0.56762143], [0.62042003, 0.79032918, 
-    0.56786129], [0.61606327, 0.78823508, 0.56808666], [0.61171322, 
-    0.78614216, 0.56829092], [0.60736933, 0.78405055, 0.56847436], [
-    0.60302658, 0.78196121, 0.56864272], [0.59868708, 0.77987374, 
-    0.56879289], [0.59435366, 0.77778758, 0.56892099], [0.59001953, 
-    0.77570403, 0.56903477], [0.58568753, 0.77362254, 0.56913028], [
-    0.58135593, 0.77154342, 0.56920908], [0.57702623, 0.76946638, 
-    0.56926895], [0.57269165, 0.76739266, 0.5693172], [0.56835934, 
-    0.76532092, 0.56934507], [0.56402533, 0.76325185, 0.56935664], [
-    0.55968429, 0.76118643, 0.56935732], [0.55534159, 0.75912361, 
-    0.56934052], [0.55099572, 0.75706366, 0.56930743], [0.54664626, 
-    0.75500662, 0.56925799], [0.54228969, 0.75295306, 0.56919546], [
-    0.53792417, 0.75090328, 0.56912118], [0.53355172, 0.74885687, 0.5690324
-    ], [0.52917169, 0.74681387, 0.56892926], [0.52478243, 0.74477453, 
-    0.56881287], [0.52038338, 0.74273888, 0.56868323], [0.5159739, 
-    0.74070697, 0.56854039], [0.51155269, 0.73867895, 0.56838507], [
-    0.50711872, 0.73665492, 0.56821764], [0.50267118, 0.73463494, 
-    0.56803826], [0.49822926, 0.73261388, 0.56785146], [0.49381422, 
-    0.73058524, 0.56767484], [0.48942421, 0.72854938, 0.56751036], [
-    0.48505993, 0.72650623, 0.56735752], [0.48072207, 0.72445575, 
-    0.56721583], [0.4764113, 0.72239788, 0.56708475], [0.47212827, 
-    0.72033258, 0.56696376], [0.46787361, 0.71825983, 0.56685231], [
-    0.46364792, 0.71617961, 0.56674986], [0.45945271, 0.71409167, 
-    0.56665625], [0.45528878, 0.71199595, 0.56657103], [0.45115557, 
-    0.70989276, 0.5664931], [0.44705356, 0.70778212, 0.56642189], [
-    0.44298321, 0.70566406, 0.56635683], [0.43894492, 0.70353863, 
-    0.56629734], [0.43493911, 0.70140588, 0.56624286], [0.43096612, 
-    0.69926587, 0.5661928], [0.42702625, 0.69711868, 0.56614659], [
-    0.42311977, 0.69496438, 0.56610368], [0.41924689, 0.69280308, 
-    0.56606355], [0.41540778, 0.69063486, 0.56602564], [0.41160259, 
-    0.68845984, 0.56598944], [0.40783143, 0.68627814, 0.56595436], [
-    0.40409434, 0.68408988, 0.56591994], [0.40039134, 0.68189518, 
-    0.56588564], [0.39672238, 0.6796942, 0.56585103], [0.39308781, 
-    0.67748696, 0.56581581], [0.38949137, 0.67527276, 0.56578084], [
-    0.38592889, 0.67305266, 0.56574422], [0.38240013, 0.67082685, 
-    0.56570561], [0.37890483, 0.66859548, 0.56566462], [0.37544276, 
-    0.66635871, 0.56562081], [0.37201365, 0.66411673, 0.56557372], [
-    0.36861709, 0.6618697, 0.5655231], [0.36525264, 0.65961782, 0.56546873],
-    [0.36191986, 0.65736125, 0.56541032], [0.35861935, 0.65509998, 
-    0.56534768], [0.35535621, 0.65283302, 0.56528211], [0.35212361, 
-    0.65056188, 0.56521171], [0.34892097, 0.64828676, 0.56513633], [
-    0.34574785, 0.64600783, 0.56505539], [0.34260357, 0.64372528, 0.5649689
-    ], [0.33948744, 0.64143931, 0.56487679], [0.33639887, 0.6391501, 
-    0.56477869], [0.33334501, 0.63685626, 0.56467661], [0.33031952, 
-    0.63455911, 0.564569], [0.3273199, 0.63225924, 0.56445488], [0.32434526,
-    0.62995682, 0.56433457], [0.32139487, 0.62765201, 0.56420795], [
-    0.31846807, 0.62534504, 0.56407446], [0.3155731, 0.62303426, 0.56393695
-    ], [0.31270304, 0.62072111, 0.56379321], [0.30985436, 0.61840624, 
-    0.56364307], [0.30702635, 0.61608984, 0.56348606], [0.30421803, 
-    0.61377205, 0.56332267], [0.30143611, 0.61145167, 0.56315419], [
-    0.29867863, 0.60912907, 0.56298054], [0.29593872, 0.60680554, 
-    0.56280022], [0.29321538, 0.60448121, 0.56261376], [0.2905079, 
-    0.60215628, 0.56242036], [0.28782827, 0.5998285, 0.56222366], [
-    0.28516521, 0.59749996, 0.56202093], [0.28251558, 0.59517119, 
-    0.56181204], [0.27987847, 0.59284232, 0.56159709], [0.27726216, 
-    0.59051189, 0.56137785], [0.27466434, 0.58818027, 0.56115433], [
-    0.2720767, 0.58584893, 0.56092486], [0.26949829, 0.58351797, 0.56068983
-    ], [0.26693801, 0.58118582, 0.56045121], [0.26439366, 0.57885288, 
-    0.56020858], [0.26185616, 0.57652063, 0.55996077], [0.25932459, 
-    0.57418919, 0.55970795], [0.25681303, 0.57185614, 0.55945297], [
-    0.25431024, 0.56952337, 0.55919385], [0.25180492, 0.56719255, 0.5589305
-    ], [0.24929311, 0.56486397, 0.5586654], [0.24678356, 0.56253666, 
-    0.55839491], [0.24426587, 0.56021153, 0.55812473], [0.24174022, 
-    0.55788852, 0.55785448], [0.23921167, 0.55556705, 0.55758211], [
-    0.23668315, 0.55324675, 0.55730676], [0.23414742, 0.55092825, 
-    0.55703167], [0.23160473, 0.54861143, 0.5567573], [0.22905996, 
-    0.54629572, 0.55648168], [0.22651648, 0.54398082, 0.5562029], [
-    0.22396709, 0.54166721, 0.55592542], [0.22141221, 0.53935481, 
-    0.55564885], [0.21885269, 0.53704347, 0.55537294], [0.21629986, 
-    0.53473208, 0.55509319], [0.21374297, 0.53242154, 0.5548144], [
-    0.21118255, 0.53011166, 0.55453708], [0.2086192, 0.52780237, 0.55426067
-    ], [0.20605624, 0.52549322, 0.55398479], [0.20350004, 0.5231837, 
-    0.55370601], [0.20094292, 0.52087429, 0.55342884], [0.19838567, 
-    0.51856489, 0.55315283], [0.19582911, 0.51625531, 0.55287818], [
-    0.19327413, 0.51394542, 0.55260469], [0.19072933, 0.51163448, 0.5523289
-    ], [0.18819045, 0.50932268, 0.55205372], [0.18565609, 0.50701014, 
-    0.55177937], [0.18312739, 0.50469666, 0.55150597], [0.18060561, 
-    0.50238204, 0.55123374], [0.178092, 0.50006616, 0.55096224], [
-    0.17558808, 0.49774882, 0.55069118], [0.17310341, 0.49542924, 0.5504176
-    ], [0.17063111, 0.49310789, 0.55014445], [0.1681728, 0.49078458, 
-    0.54987159], [0.1657302, 0.48845913, 0.54959882], [0.16330517, 
-    0.48613135, 0.54932605], [0.16089963, 0.48380104, 0.54905306], [
-    0.15851561, 0.48146803, 0.54877953], [0.15615526, 0.47913212, 
-    0.54850526], [0.15382083, 0.47679313, 0.54822991], [0.15151471, 
-    0.47445087, 0.54795318], [0.14924112, 0.47210502, 0.54767411], [
-    0.1470032, 0.46975537, 0.54739226], [0.14480101, 0.46740187, 0.54710832
-    ], [0.14263736, 0.46504434, 0.54682188], [0.14051521, 0.46268258, 
-    0.54653253], [0.13843761, 0.46031639, 0.54623985], [0.13640774, 
-    0.45794558, 0.5459434], [0.13442887, 0.45556994, 0.54564272], [
-    0.1325044, 0.45318928, 0.54533736], [0.13063777, 0.4508034, 0.54502674],
-    [0.12883252, 0.44841211, 0.5447104], [0.12709242, 0.44601517, 
-    0.54438795], [0.1254209, 0.44361244, 0.54405855], [0.12382162, 
-    0.44120373, 0.54372156], [0.12229818, 0.43878887, 0.54337634], [
-    0.12085453, 0.4363676, 0.54302253], [0.11949938, 0.43393955, 0.54265715
-    ], [0.11823166, 0.43150478, 0.54228104], [0.11705496, 0.42906306, 
-    0.54189388], [0.115972, 0.42661431, 0.54149449], [0.11498598, 
-    0.42415835, 0.54108222], [0.11409965, 0.42169502, 0.54065622], [
-    0.11331533, 0.41922424, 0.5402155], [0.11263542, 0.41674582, 0.53975931
-    ], [0.1120615, 0.4142597, 0.53928656], [0.11159738, 0.41176567, 
-    0.53879549], [0.11125248, 0.40926325, 0.53828203], [0.11101698, 
-    0.40675289, 0.53774864], [0.11089152, 0.40423445, 0.53719455], [
-    0.11085121, 0.4017095, 0.53662425], [0.11087217, 0.39917938, 0.53604354
-    ], [0.11095515, 0.39664394, 0.53545166], [0.11110676, 0.39410282, 
-    0.53484509], [0.11131735, 0.39155635, 0.53422678], [0.11158595, 
-    0.38900446, 0.53359634], [0.11191139, 0.38644711, 0.5329534], [
-    0.11229224, 0.38388426, 0.53229748], [0.11273683, 0.38131546, 
-    0.53162393], [0.11323438, 0.37874109, 0.53093619], [0.11378271, 
-    0.37616112, 0.53023413], [0.11437992, 0.37357557, 0.52951727], [
-    0.11502681, 0.37098429, 0.52878396], [0.11572661, 0.36838709, 
-    0.52803124], [0.11646936, 0.36578429, 0.52726234], [0.11725299, 
-    0.3631759, 0.52647685], [0.1180755, 0.36056193, 0.52567436], [0.1189438,
-    0.35794203, 0.5248497], [0.11984752, 0.35531657, 0.52400649], [
-    0.1207833, 0.35268564, 0.52314492], [0.12174895, 0.35004927, 0.52226461
-    ], [0.12274959, 0.34740723, 0.52136104], [0.12377809, 0.34475975, 
-    0.52043639], [0.12482961, 0.34210702, 0.51949179], [0.125902, 
-    0.33944908, 0.51852688], [0.12699998, 0.33678574, 0.51753708], [
-    0.12811691, 0.33411727, 0.51652464], [0.12924811, 0.33144384, 
-    0.51549084], [0.13039157, 0.32876552, 0.51443538], [0.13155228, 
-    0.32608217, 0.51335321], [0.13272282, 0.32339407, 0.51224759], [
-    0.13389954, 0.32070138, 0.51111946], [0.13508064, 0.31800419, 
-    0.50996862], [0.13627149, 0.31530238, 0.50878942], [0.13746376, 
-    0.31259627, 0.50758645], [0.13865499, 0.30988598, 0.50636017], [
-    0.13984364, 0.30717161, 0.50511042], [0.14103515, 0.30445309, 
-    0.50383119], [0.14222093, 0.30173071, 0.50252813], [0.14339946, 
-    0.2990046, 0.50120127], [0.14456941, 0.29627483, 0.49985054], [
-    0.14573579, 0.29354139, 0.49847009], [0.14689091, 0.29080452, 
-    0.49706566], [0.1480336, 0.28806432, 0.49563732], [0.1491628, 
-    0.28532086, 0.49418508], [0.15028228, 0.28257418, 0.49270402], [
-    0.15138673, 0.27982444, 0.49119848], [0.15247457, 0.27707172, 
-    0.48966925], [0.15354487, 0.2743161, 0.48811641], [0.15459955, 
-    0.27155765, 0.4865371], [0.15563716, 0.26879642, 0.4849321], [0.1566572,
-    0.26603191, 0.48330429], [0.15765823, 0.26326032, 0.48167456], [
-    0.15862147, 0.26048295, 0.48005785], [0.15954301, 0.25770084, 
-    0.47845341], [0.16043267, 0.25491144, 0.4768626], [0.16129262, 
-    0.25211406, 0.4752857], [0.1621119, 0.24931169, 0.47372076], [
-    0.16290577, 0.24649998, 0.47217025], [0.16366819, 0.24368054, 
-    0.47063302], [0.1644021, 0.24085237, 0.46910949], [0.16510882, 
-    0.2380149, 0.46759982], [0.16579015, 0.23516739, 0.46610429], [
-    0.1664433, 0.2323105, 0.46462219], [0.16707586, 0.22944155, 0.46315508],
-    [0.16768475, 0.22656122, 0.46170223], [0.16826815, 0.22366984, 
-    0.46026308], [0.16883174, 0.22076514, 0.45883891], [0.16937589, 
-    0.21784655, 0.45742976], [0.16990129, 0.21491339, 0.45603578], [
-    0.1704074, 0.21196535, 0.45465677], [0.17089473, 0.20900176, 0.4532928],
-    [0.17136819, 0.20602012, 0.45194524], [0.17182683, 0.20302012, 
-    0.45061386], [0.17227059, 0.20000106, 0.44929865], [0.17270583, 
-    0.19695949, 0.44800165], [0.17313804, 0.19389201, 0.44672488], [
-    0.17363177, 0.19076859, 0.44549087]]
-_lut_dict = dict(rocket=_rocket_lut, mako=_mako_lut, icefire=_icefire_lut,
-    vlag=_vlag_lut, flare=_flare_lut, crest=_crest_lut)
+
+
+_rocket_lut = [
+    [ 0.01060815, 0.01808215, 0.10018654],
+    [ 0.01428972, 0.02048237, 0.10374486],
+    [ 0.01831941, 0.0229766 , 0.10738511],
+    [ 0.02275049, 0.02554464, 0.11108639],
+    [ 0.02759119, 0.02818316, 0.11483751],
+    [ 0.03285175, 0.03088792, 0.11863035],
+    [ 0.03853466, 0.03365771, 0.12245873],
+    [ 0.04447016, 0.03648425, 0.12631831],
+    [ 0.05032105, 0.03936808, 0.13020508],
+    [ 0.05611171, 0.04224835, 0.13411624],
+    [ 0.0618531 , 0.04504866, 0.13804929],
+    [ 0.06755457, 0.04778179, 0.14200206],
+    [ 0.0732236 , 0.05045047, 0.14597263],
+    [ 0.0788708 , 0.05305461, 0.14995981],
+    [ 0.08450105, 0.05559631, 0.15396203],
+    [ 0.09011319, 0.05808059, 0.15797687],
+    [ 0.09572396, 0.06050127, 0.16200507],
+    [ 0.10132312, 0.06286782, 0.16604287],
+    [ 0.10692823, 0.06517224, 0.17009175],
+    [ 0.1125315 , 0.06742194, 0.17414848],
+    [ 0.11813947, 0.06961499, 0.17821272],
+    [ 0.12375803, 0.07174938, 0.18228425],
+    [ 0.12938228, 0.07383015, 0.18636053],
+    [ 0.13501631, 0.07585609, 0.19044109],
+    [ 0.14066867, 0.0778224 , 0.19452676],
+    [ 0.14633406, 0.07973393, 0.1986151 ],
+    [ 0.15201338, 0.08159108, 0.20270523],
+    [ 0.15770877, 0.08339312, 0.20679668],
+    [ 0.16342174, 0.0851396 , 0.21088893],
+    [ 0.16915387, 0.08682996, 0.21498104],
+    [ 0.17489524, 0.08848235, 0.2190294 ],
+    [ 0.18065495, 0.09009031, 0.22303512],
+    [ 0.18643324, 0.09165431, 0.22699705],
+    [ 0.19223028, 0.09317479, 0.23091409],
+    [ 0.19804623, 0.09465217, 0.23478512],
+    [ 0.20388117, 0.09608689, 0.23860907],
+    [ 0.20973515, 0.09747934, 0.24238489],
+    [ 0.21560818, 0.09882993, 0.24611154],
+    [ 0.22150014, 0.10013944, 0.2497868 ],
+    [ 0.22741085, 0.10140876, 0.25340813],
+    [ 0.23334047, 0.10263737, 0.25697736],
+    [ 0.23928891, 0.10382562, 0.2604936 ],
+    [ 0.24525608, 0.10497384, 0.26395596],
+    [ 0.25124182, 0.10608236, 0.26736359],
+    [ 0.25724602, 0.10715148, 0.27071569],
+    [ 0.26326851, 0.1081815 , 0.27401148],
+    [ 0.26930915, 0.1091727 , 0.2772502 ],
+    [ 0.27536766, 0.11012568, 0.28043021],
+    [ 0.28144375, 0.11104133, 0.2835489 ],
+    [ 0.2875374 , 0.11191896, 0.28660853],
+    [ 0.29364846, 0.11275876, 0.2896085 ],
+    [ 0.29977678, 0.11356089, 0.29254823],
+    [ 0.30592213, 0.11432553, 0.29542718],
+    [ 0.31208435, 0.11505284, 0.29824485],
+    [ 0.31826327, 0.1157429 , 0.30100076],
+    [ 0.32445869, 0.11639585, 0.30369448],
+    [ 0.33067031, 0.11701189, 0.30632563],
+    [ 0.33689808, 0.11759095, 0.3088938 ],
+    [ 0.34314168, 0.11813362, 0.31139721],
+    [ 0.34940101, 0.11863987, 0.3138355 ],
+    [ 0.355676  , 0.11910909, 0.31620996],
+    [ 0.36196644, 0.1195413 , 0.31852037],
+    [ 0.36827206, 0.11993653, 0.32076656],
+    [ 0.37459292, 0.12029443, 0.32294825],
+    [ 0.38092887, 0.12061482, 0.32506528],
+    [ 0.38727975, 0.12089756, 0.3271175 ],
+    [ 0.39364518, 0.12114272, 0.32910494],
+    [ 0.40002537, 0.12134964, 0.33102734],
+    [ 0.40642019, 0.12151801, 0.33288464],
+    [ 0.41282936, 0.12164769, 0.33467689],
+    [ 0.41925278, 0.12173833, 0.33640407],
+    [ 0.42569057, 0.12178916, 0.33806605],
+    [ 0.43214263, 0.12179973, 0.33966284],
+    [ 0.43860848, 0.12177004, 0.34119475],
+    [ 0.44508855, 0.12169883, 0.34266151],
+    [ 0.45158266, 0.12158557, 0.34406324],
+    [ 0.45809049, 0.12142996, 0.34540024],
+    [ 0.46461238, 0.12123063, 0.34667231],
+    [ 0.47114798, 0.12098721, 0.34787978],
+    [ 0.47769736, 0.12069864, 0.34902273],
+    [ 0.48426077, 0.12036349, 0.35010104],
+    [ 0.49083761, 0.11998161, 0.35111537],
+    [ 0.49742847, 0.11955087, 0.35206533],
+    [ 0.50403286, 0.11907081, 0.35295152],
+    [ 0.51065109, 0.11853959, 0.35377385],
+    [ 0.51728314, 0.1179558 , 0.35453252],
+    [ 0.52392883, 0.11731817, 0.35522789],
+    [ 0.53058853, 0.11662445, 0.35585982],
+    [ 0.53726173, 0.11587369, 0.35642903],
+    [ 0.54394898, 0.11506307, 0.35693521],
+    [ 0.5506426 , 0.11420757, 0.35737863],
+    [ 0.55734473, 0.11330456, 0.35775059],
+    [ 0.56405586, 0.11235265, 0.35804813],
+    [ 0.57077365, 0.11135597, 0.35827146],
+    [ 0.5774991 , 0.11031233, 0.35841679],
+    [ 0.58422945, 0.10922707, 0.35848469],
+    [ 0.59096382, 0.10810205, 0.35847347],
+    [ 0.59770215, 0.10693774, 0.35838029],
+    [ 0.60444226, 0.10573912, 0.35820487],
+    [ 0.61118304, 0.10450943, 0.35794557],
+    [ 0.61792306, 0.10325288, 0.35760108],
+    [ 0.62466162, 0.10197244, 0.35716891],
+    [ 0.63139686, 0.10067417, 0.35664819],
+    [ 0.63812122, 0.09938212, 0.35603757],
+    [ 0.64483795, 0.0980891 , 0.35533555],
+    [ 0.65154562, 0.09680192, 0.35454107],
+    [ 0.65824241, 0.09552918, 0.3536529 ],
+    [ 0.66492652, 0.09428017, 0.3526697 ],
+    [ 0.67159578, 0.09306598, 0.35159077],
+    [ 0.67824099, 0.09192342, 0.3504148 ],
+    [ 0.684863  , 0.09085633, 0.34914061],
+    [ 0.69146268, 0.0898675 , 0.34776864],
+    [ 0.69803757, 0.08897226, 0.3462986 ],
+    [ 0.70457834, 0.0882129 , 0.34473046],
+    [ 0.71108138, 0.08761223, 0.3430635 ],
+    [ 0.7175507 , 0.08716212, 0.34129974],
+    [ 0.72398193, 0.08688725, 0.33943958],
+    [ 0.73035829, 0.0868623 , 0.33748452],
+    [ 0.73669146, 0.08704683, 0.33543669],
+    [ 0.74297501, 0.08747196, 0.33329799],
+    [ 0.74919318, 0.08820542, 0.33107204],
+    [ 0.75535825, 0.08919792, 0.32876184],
+    [ 0.76145589, 0.09050716, 0.32637117],
+    [ 0.76748424, 0.09213602, 0.32390525],
+    [ 0.77344838, 0.09405684, 0.32136808],
+    [ 0.77932641, 0.09634794, 0.31876642],
+    [ 0.78513609, 0.09892473, 0.31610488],
+    [ 0.79085854, 0.10184672, 0.313391  ],
+    [ 0.7965014 , 0.10506637, 0.31063031],
+    [ 0.80205987, 0.10858333, 0.30783   ],
+    [ 0.80752799, 0.11239964, 0.30499738],
+    [ 0.81291606, 0.11645784, 0.30213802],
+    [ 0.81820481, 0.12080606, 0.29926105],
+    [ 0.82341472, 0.12535343, 0.2963705 ],
+    [ 0.82852822, 0.13014118, 0.29347474],
+    [ 0.83355779, 0.13511035, 0.29057852],
+    [ 0.83850183, 0.14025098, 0.2876878 ],
+    [ 0.84335441, 0.14556683, 0.28480819],
+    [ 0.84813096, 0.15099892, 0.281943  ],
+    [ 0.85281737, 0.15657772, 0.27909826],
+    [ 0.85742602, 0.1622583 , 0.27627462],
+    [ 0.86196552, 0.16801239, 0.27346473],
+    [ 0.86641628, 0.17387796, 0.27070818],
+    [ 0.87079129, 0.17982114, 0.26797378],
+    [ 0.87507281, 0.18587368, 0.26529697],
+    [ 0.87925878, 0.19203259, 0.26268136],
+    [ 0.8833417 , 0.19830556, 0.26014181],
+    [ 0.88731387, 0.20469941, 0.25769539],
+    [ 0.89116859, 0.21121788, 0.2553592 ],
+    [ 0.89490337, 0.21785614, 0.25314362],
+    [ 0.8985026 , 0.22463251, 0.25108745],
+    [ 0.90197527, 0.23152063, 0.24918223],
+    [ 0.90530097, 0.23854541, 0.24748098],
+    [ 0.90848638, 0.24568473, 0.24598324],
+    [ 0.911533  , 0.25292623, 0.24470258],
+    [ 0.9144225 , 0.26028902, 0.24369359],
+    [ 0.91717106, 0.26773821, 0.24294137],
+    [ 0.91978131, 0.27526191, 0.24245973],
+    [ 0.92223947, 0.28287251, 0.24229568],
+    [ 0.92456587, 0.29053388, 0.24242622],
+    [ 0.92676657, 0.29823282, 0.24285536],
+    [ 0.92882964, 0.30598085, 0.24362274],
+    [ 0.93078135, 0.31373977, 0.24468803],
+    [ 0.93262051, 0.3215093 , 0.24606461],
+    [ 0.93435067, 0.32928362, 0.24775328],
+    [ 0.93599076, 0.33703942, 0.24972157],
+    [ 0.93752831, 0.34479177, 0.25199928],
+    [ 0.93899289, 0.35250734, 0.25452808],
+    [ 0.94036561, 0.36020899, 0.25734661],
+    [ 0.94167588, 0.36786594, 0.2603949 ],
+    [ 0.94291042, 0.37549479, 0.26369821],
+    [ 0.94408513, 0.3830811 , 0.26722004],
+    [ 0.94520419, 0.39062329, 0.27094924],
+    [ 0.94625977, 0.39813168, 0.27489742],
+    [ 0.94727016, 0.4055909 , 0.27902322],
+    [ 0.94823505, 0.41300424, 0.28332283],
+    [ 0.94914549, 0.42038251, 0.28780969],
+    [ 0.95001704, 0.42771398, 0.29244728],
+    [ 0.95085121, 0.43500005, 0.29722817],
+    [ 0.95165009, 0.44224144, 0.30214494],
+    [ 0.9524044 , 0.44944853, 0.3072105 ],
+    [ 0.95312556, 0.45661389, 0.31239776],
+    [ 0.95381595, 0.46373781, 0.31769923],
+    [ 0.95447591, 0.47082238, 0.32310953],
+    [ 0.95510255, 0.47787236, 0.32862553],
+    [ 0.95569679, 0.48489115, 0.33421404],
+    [ 0.95626788, 0.49187351, 0.33985601],
+    [ 0.95681685, 0.49882008, 0.34555431],
+    [ 0.9573439 , 0.50573243, 0.35130912],
+    [ 0.95784842, 0.51261283, 0.35711942],
+    [ 0.95833051, 0.51946267, 0.36298589],
+    [ 0.95879054, 0.52628305, 0.36890904],
+    [ 0.95922872, 0.53307513, 0.3748895 ],
+    [ 0.95964538, 0.53983991, 0.38092784],
+    [ 0.96004345, 0.54657593, 0.3870292 ],
+    [ 0.96042097, 0.55328624, 0.39319057],
+    [ 0.96077819, 0.55997184, 0.39941173],
+    [ 0.9611152 , 0.5666337 , 0.40569343],
+    [ 0.96143273, 0.57327231, 0.41203603],
+    [ 0.96173392, 0.57988594, 0.41844491],
+    [ 0.96201757, 0.58647675, 0.42491751],
+    [ 0.96228344, 0.59304598, 0.43145271],
+    [ 0.96253168, 0.5995944 , 0.43805131],
+    [ 0.96276513, 0.60612062, 0.44471698],
+    [ 0.96298491, 0.6126247 , 0.45145074],
+    [ 0.96318967, 0.61910879, 0.45824902],
+    [ 0.96337949, 0.6255736 , 0.46511271],
+    [ 0.96355923, 0.63201624, 0.47204746],
+    [ 0.96372785, 0.63843852, 0.47905028],
+    [ 0.96388426, 0.64484214, 0.4861196 ],
+    [ 0.96403203, 0.65122535, 0.4932578 ],
+    [ 0.96417332, 0.65758729, 0.50046894],
+    [ 0.9643063 , 0.66393045, 0.5077467 ],
+    [ 0.96443322, 0.67025402, 0.51509334],
+    [ 0.96455845, 0.67655564, 0.52251447],
+    [ 0.96467922, 0.68283846, 0.53000231],
+    [ 0.96479861, 0.68910113, 0.53756026],
+    [ 0.96492035, 0.69534192, 0.5451917 ],
+    [ 0.96504223, 0.7015636 , 0.5528892 ],
+    [ 0.96516917, 0.70776351, 0.5606593 ],
+    [ 0.96530224, 0.71394212, 0.56849894],
+    [ 0.96544032, 0.72010124, 0.57640375],
+    [ 0.96559206, 0.72623592, 0.58438387],
+    [ 0.96575293, 0.73235058, 0.59242739],
+    [ 0.96592829, 0.73844258, 0.60053991],
+    [ 0.96612013, 0.74451182, 0.60871954],
+    [ 0.96632832, 0.75055966, 0.61696136],
+    [ 0.96656022, 0.75658231, 0.62527295],
+    [ 0.96681185, 0.76258381, 0.63364277],
+    [ 0.96709183, 0.76855969, 0.64207921],
+    [ 0.96739773, 0.77451297, 0.65057302],
+    [ 0.96773482, 0.78044149, 0.65912731],
+    [ 0.96810471, 0.78634563, 0.66773889],
+    [ 0.96850919, 0.79222565, 0.6764046 ],
+    [ 0.96893132, 0.79809112, 0.68512266],
+    [ 0.96935926, 0.80395415, 0.69383201],
+    [ 0.9698028 , 0.80981139, 0.70252255],
+    [ 0.97025511, 0.81566605, 0.71120296],
+    [ 0.97071849, 0.82151775, 0.71987163],
+    [ 0.97120159, 0.82736371, 0.72851999],
+    [ 0.97169389, 0.83320847, 0.73716071],
+    [ 0.97220061, 0.83905052, 0.74578903],
+    [ 0.97272597, 0.84488881, 0.75440141],
+    [ 0.97327085, 0.85072354, 0.76299805],
+    [ 0.97383206, 0.85655639, 0.77158353],
+    [ 0.97441222, 0.86238689, 0.78015619],
+    [ 0.97501782, 0.86821321, 0.78871034],
+    [ 0.97564391, 0.87403763, 0.79725261],
+    [ 0.97628674, 0.87986189, 0.8057883 ],
+    [ 0.97696114, 0.88568129, 0.81430324],
+    [ 0.97765722, 0.89149971, 0.82280948],
+    [ 0.97837585, 0.89731727, 0.83130786],
+    [ 0.97912374, 0.90313207, 0.83979337],
+    [ 0.979891  , 0.90894778, 0.84827858],
+    [ 0.98067764, 0.91476465, 0.85676611],
+    [ 0.98137749, 0.92061729, 0.86536915]
+]
+
+
+_mako_lut = [
+    [ 0.04503935, 0.01482344, 0.02092227],
+    [ 0.04933018, 0.01709292, 0.02535719],
+    [ 0.05356262, 0.01950702, 0.03018802],
+    [ 0.05774337, 0.02205989, 0.03545515],
+    [ 0.06188095, 0.02474764, 0.04115287],
+    [ 0.06598247, 0.0275665 , 0.04691409],
+    [ 0.07005374, 0.03051278, 0.05264306],
+    [ 0.07409947, 0.03358324, 0.05834631],
+    [ 0.07812339, 0.03677446, 0.06403249],
+    [ 0.08212852, 0.0400833 , 0.06970862],
+    [ 0.08611731, 0.04339148, 0.07538208],
+    [ 0.09009161, 0.04664706, 0.08105568],
+    [ 0.09405308, 0.04985685, 0.08673591],
+    [ 0.09800301, 0.05302279, 0.09242646],
+    [ 0.10194255, 0.05614641, 0.09813162],
+    [ 0.10587261, 0.05922941, 0.103854  ],
+    [ 0.1097942 , 0.06227277, 0.10959847],
+    [ 0.11370826, 0.06527747, 0.11536893],
+    [ 0.11761516, 0.06824548, 0.12116393],
+    [ 0.12151575, 0.07117741, 0.12698763],
+    [ 0.12541095, 0.07407363, 0.1328442 ],
+    [ 0.12930083, 0.07693611, 0.13873064],
+    [ 0.13317849, 0.07976988, 0.14465095],
+    [ 0.13701138, 0.08259683, 0.15060265],
+    [ 0.14079223, 0.08542126, 0.15659379],
+    [ 0.14452486, 0.08824175, 0.16262484],
+    [ 0.14820351, 0.09106304, 0.16869476],
+    [ 0.15183185, 0.09388372, 0.17480366],
+    [ 0.15540398, 0.09670855, 0.18094993],
+    [ 0.15892417, 0.09953561, 0.18713384],
+    [ 0.16238588, 0.10236998, 0.19335329],
+    [ 0.16579435, 0.10520905, 0.19960847],
+    [ 0.16914226, 0.10805832, 0.20589698],
+    [ 0.17243586, 0.11091443, 0.21221911],
+    [ 0.17566717, 0.11378321, 0.21857219],
+    [ 0.17884322, 0.11666074, 0.2249565 ],
+    [ 0.18195582, 0.11955283, 0.23136943],
+    [ 0.18501213, 0.12245547, 0.23781116],
+    [ 0.18800459, 0.12537395, 0.24427914],
+    [ 0.19093944, 0.1283047 , 0.25077369],
+    [ 0.19381092, 0.13125179, 0.25729255],
+    [ 0.19662307, 0.13421303, 0.26383543],
+    [ 0.19937337, 0.13719028, 0.27040111],
+    [ 0.20206187, 0.14018372, 0.27698891],
+    [ 0.20469116, 0.14319196, 0.28359861],
+    [ 0.20725547, 0.14621882, 0.29022775],
+    [ 0.20976258, 0.14925954, 0.29687795],
+    [ 0.21220409, 0.15231929, 0.30354703],
+    [ 0.21458611, 0.15539445, 0.31023563],
+    [ 0.21690827, 0.15848519, 0.31694355],
+    [ 0.21916481, 0.16159489, 0.32366939],
+    [ 0.2213631 , 0.16471913, 0.33041431],
+    [ 0.22349947, 0.1678599 , 0.33717781],
+    [ 0.2255714 , 0.1710185 , 0.34395925],
+    [ 0.22758415, 0.17419169, 0.35075983],
+    [ 0.22953569, 0.17738041, 0.35757941],
+    [ 0.23142077, 0.18058733, 0.3644173 ],
+    [ 0.2332454 , 0.18380872, 0.37127514],
+    [ 0.2350092 , 0.18704459, 0.3781528 ],
+    [ 0.23670785, 0.190297  , 0.38504973],
+    [ 0.23834119, 0.19356547, 0.39196711],
+    [ 0.23991189, 0.19684817, 0.39890581],
+    [ 0.24141903, 0.20014508, 0.4058667 ],
+    [ 0.24286214, 0.20345642, 0.4128484 ],
+    [ 0.24423453, 0.20678459, 0.41985299],
+    [ 0.24554109, 0.21012669, 0.42688124],
+    [ 0.2467815 , 0.21348266, 0.43393244],
+    [ 0.24795393, 0.21685249, 0.4410088 ],
+    [ 0.24905614, 0.22023618, 0.448113  ],
+    [ 0.25007383, 0.22365053, 0.45519562],
+    [ 0.25098926, 0.22710664, 0.46223892],
+    [ 0.25179696, 0.23060342, 0.46925447],
+    [ 0.25249346, 0.23414353, 0.47623196],
+    [ 0.25307401, 0.23772973, 0.48316271],
+    [ 0.25353152, 0.24136961, 0.49001976],
+    [ 0.25386167, 0.24506548, 0.49679407],
+    [ 0.25406082, 0.2488164 , 0.50348932],
+    [ 0.25412435, 0.25262843, 0.51007843],
+    [ 0.25404842, 0.25650743, 0.51653282],
+    [ 0.25383134, 0.26044852, 0.52286845],
+    [ 0.2534705 , 0.26446165, 0.52903422],
+    [ 0.25296722, 0.2685428 , 0.53503572],
+    [ 0.2523226 , 0.27269346, 0.54085315],
+    [ 0.25153974, 0.27691629, 0.54645752],
+    [ 0.25062402, 0.28120467, 0.55185939],
+    [ 0.24958205, 0.28556371, 0.55701246],
+    [ 0.24842386, 0.28998148, 0.56194601],
+    [ 0.24715928, 0.29446327, 0.56660884],
+    [ 0.24580099, 0.29899398, 0.57104399],
+    [ 0.24436202, 0.30357852, 0.57519929],
+    [ 0.24285591, 0.30819938, 0.57913247],
+    [ 0.24129828, 0.31286235, 0.58278615],
+    [ 0.23970131, 0.3175495 , 0.5862272 ],
+    [ 0.23807973, 0.32226344, 0.58941872],
+    [ 0.23644557, 0.32699241, 0.59240198],
+    [ 0.2348113 , 0.33173196, 0.59518282],
+    [ 0.23318874, 0.33648036, 0.59775543],
+    [ 0.2315855 , 0.34122763, 0.60016456],
+    [ 0.23001121, 0.34597357, 0.60240251],
+    [ 0.2284748 , 0.35071512, 0.6044784 ],
+    [ 0.22698081, 0.35544612, 0.60642528],
+    [ 0.22553305, 0.36016515, 0.60825252],
+    [ 0.22413977, 0.36487341, 0.60994938],
+    [ 0.22280246, 0.36956728, 0.61154118],
+    [ 0.22152555, 0.37424409, 0.61304472],
+    [ 0.22030752, 0.37890437, 0.61446646],
+    [ 0.2191538 , 0.38354668, 0.61581561],
+    [ 0.21806257, 0.38817169, 0.61709794],
+    [ 0.21703799, 0.39277882, 0.61831922],
+    [ 0.21607792, 0.39736958, 0.61948028],
+    [ 0.21518463, 0.40194196, 0.62059763],
+    [ 0.21435467, 0.40649717, 0.62167507],
+    [ 0.21358663, 0.41103579, 0.62271724],
+    [ 0.21288172, 0.41555771, 0.62373011],
+    [ 0.21223835, 0.42006355, 0.62471794],
+    [ 0.21165312, 0.42455441, 0.62568371],
+    [ 0.21112526, 0.42903064, 0.6266318 ],
+    [ 0.21065161, 0.43349321, 0.62756504],
+    [ 0.21023306, 0.43794288, 0.62848279],
+    [ 0.20985996, 0.44238227, 0.62938329],
+    [ 0.20951045, 0.44680966, 0.63030696],
+    [ 0.20916709, 0.45122981, 0.63124483],
+    [ 0.20882976, 0.45564335, 0.63219599],
+    [ 0.20849798, 0.46005094, 0.63315928],
+    [ 0.20817199, 0.46445309, 0.63413391],
+    [ 0.20785149, 0.46885041, 0.63511876],
+    [ 0.20753716, 0.47324327, 0.63611321],
+    [ 0.20722876, 0.47763224, 0.63711608],
+    [ 0.20692679, 0.48201774, 0.63812656],
+    [ 0.20663156, 0.48640018, 0.63914367],
+    [ 0.20634336, 0.49078002, 0.64016638],
+    [ 0.20606303, 0.49515755, 0.6411939 ],
+    [ 0.20578999, 0.49953341, 0.64222457],
+    [ 0.20552612, 0.50390766, 0.64325811],
+    [ 0.20527189, 0.50828072, 0.64429331],
+    [ 0.20502868, 0.51265277, 0.64532947],
+    [ 0.20479718, 0.51702417, 0.64636539],
+    [ 0.20457804, 0.52139527, 0.64739979],
+    [ 0.20437304, 0.52576622, 0.64843198],
+    [ 0.20418396, 0.53013715, 0.64946117],
+    [ 0.20401238, 0.53450825, 0.65048638],
+    [ 0.20385896, 0.53887991, 0.65150606],
+    [ 0.20372653, 0.54325208, 0.65251978],
+    [ 0.20361709, 0.5476249 , 0.6535266 ],
+    [ 0.20353258, 0.55199854, 0.65452542],
+    [ 0.20347472, 0.55637318, 0.655515  ],
+    [ 0.20344718, 0.56074869, 0.65649508],
+    [ 0.20345161, 0.56512531, 0.65746419],
+    [ 0.20349089, 0.56950304, 0.65842151],
+    [ 0.20356842, 0.57388184, 0.65936642],
+    [ 0.20368663, 0.57826181, 0.66029768],
+    [ 0.20384884, 0.58264293, 0.6612145 ],
+    [ 0.20405904, 0.58702506, 0.66211645],
+    [ 0.20431921, 0.59140842, 0.66300179],
+    [ 0.20463464, 0.59579264, 0.66387079],
+    [ 0.20500731, 0.60017798, 0.66472159],
+    [ 0.20544449, 0.60456387, 0.66555409],
+    [ 0.20596097, 0.60894927, 0.66636568],
+    [ 0.20654832, 0.61333521, 0.66715744],
+    [ 0.20721003, 0.61772167, 0.66792838],
+    [ 0.20795035, 0.62210845, 0.66867802],
+    [ 0.20877302, 0.62649546, 0.66940555],
+    [ 0.20968223, 0.63088252, 0.6701105 ],
+    [ 0.21068163, 0.63526951, 0.67079211],
+    [ 0.21177544, 0.63965621, 0.67145005],
+    [ 0.21298582, 0.64404072, 0.67208182],
+    [ 0.21430361, 0.64842404, 0.67268861],
+    [ 0.21572716, 0.65280655, 0.67326978],
+    [ 0.21726052, 0.65718791, 0.6738255 ],
+    [ 0.21890636, 0.66156803, 0.67435491],
+    [ 0.220668  , 0.66594665, 0.67485792],
+    [ 0.22255447, 0.67032297, 0.67533374],
+    [ 0.22458372, 0.67469531, 0.67578061],
+    [ 0.22673713, 0.67906542, 0.67620044],
+    [ 0.22901625, 0.6834332 , 0.67659251],
+    [ 0.23142316, 0.68779836, 0.67695703],
+    [ 0.23395924, 0.69216072, 0.67729378],
+    [ 0.23663857, 0.69651881, 0.67760151],
+    [ 0.23946645, 0.70087194, 0.67788018],
+    [ 0.24242624, 0.70522162, 0.67813088],
+    [ 0.24549008, 0.70957083, 0.67835215],
+    [ 0.24863372, 0.71392166, 0.67854868],
+    [ 0.25187832, 0.71827158, 0.67872193],
+    [ 0.25524083, 0.72261873, 0.67887024],
+    [ 0.25870947, 0.72696469, 0.67898912],
+    [ 0.26229238, 0.73130855, 0.67907645],
+    [ 0.26604085, 0.73564353, 0.67914062],
+    [ 0.26993099, 0.73997282, 0.67917264],
+    [ 0.27397488, 0.74429484, 0.67917096],
+    [ 0.27822463, 0.74860229, 0.67914468],
+    [ 0.28264201, 0.75290034, 0.67907959],
+    [ 0.2873016 , 0.75717817, 0.67899164],
+    [ 0.29215894, 0.76144162, 0.67886578],
+    [ 0.29729823, 0.76567816, 0.67871894],
+    [ 0.30268199, 0.76989232, 0.67853896],
+    [ 0.30835665, 0.77407636, 0.67833512],
+    [ 0.31435139, 0.77822478, 0.67811118],
+    [ 0.3206671 , 0.78233575, 0.67786729],
+    [ 0.32733158, 0.78640315, 0.67761027],
+    [ 0.33437168, 0.79042043, 0.67734882],
+    [ 0.34182112, 0.79437948, 0.67709394],
+    [ 0.34968889, 0.79827511, 0.67685638],
+    [ 0.35799244, 0.80210037, 0.67664969],
+    [ 0.36675371, 0.80584651, 0.67649539],
+    [ 0.3759816 , 0.80950627, 0.67641393],
+    [ 0.38566792, 0.81307432, 0.67642947],
+    [ 0.39579804, 0.81654592, 0.67656899],
+    [ 0.40634556, 0.81991799, 0.67686215],
+    [ 0.41730243, 0.82318339, 0.67735255],
+    [ 0.4285828 , 0.82635051, 0.6780564 ],
+    [ 0.44012728, 0.82942353, 0.67900049],
+    [ 0.45189421, 0.83240398, 0.68021733],
+    [ 0.46378379, 0.83530763, 0.6817062 ],
+    [ 0.47573199, 0.83814472, 0.68347352],
+    [ 0.48769865, 0.84092197, 0.68552698],
+    [ 0.49962354, 0.84365379, 0.68783929],
+    [ 0.5114027 , 0.8463718 , 0.69029789],
+    [ 0.52301693, 0.84908401, 0.69288545],
+    [ 0.53447549, 0.85179048, 0.69561066],
+    [ 0.54578602, 0.8544913 , 0.69848331],
+    [ 0.55695565, 0.85718723, 0.70150427],
+    [ 0.56798832, 0.85987893, 0.70468261],
+    [ 0.57888639, 0.86256715, 0.70802931],
+    [ 0.5896541 , 0.8652532 , 0.71154204],
+    [ 0.60028928, 0.86793835, 0.71523675],
+    [ 0.61079441, 0.87062438, 0.71910895],
+    [ 0.62116633, 0.87331311, 0.72317003],
+    [ 0.63140509, 0.87600675, 0.72741689],
+    [ 0.64150735, 0.87870746, 0.73185717],
+    [ 0.65147219, 0.8814179 , 0.73648495],
+    [ 0.66129632, 0.8841403 , 0.74130658],
+    [ 0.67097934, 0.88687758, 0.74631123],
+    [ 0.68051833, 0.88963189, 0.75150483],
+    [ 0.68991419, 0.89240612, 0.75687187],
+    [ 0.69916533, 0.89520211, 0.76241714],
+    [ 0.70827373, 0.89802257, 0.76812286],
+    [ 0.71723995, 0.90086891, 0.77399039],
+    [ 0.72606665, 0.90374337, 0.7800041 ],
+    [ 0.73475675, 0.90664718, 0.78615802],
+    [ 0.74331358, 0.90958151, 0.79244474],
+    [ 0.75174143, 0.91254787, 0.79884925],
+    [ 0.76004473, 0.91554656, 0.80536823],
+    [ 0.76827704, 0.91856549, 0.81196513],
+    [ 0.77647029, 0.921603  , 0.81855729],
+    [ 0.78462009, 0.92466151, 0.82514119],
+    [ 0.79273542, 0.92773848, 0.83172131],
+    [ 0.8008109 , 0.93083672, 0.83829355],
+    [ 0.80885107, 0.93395528, 0.84485982],
+    [ 0.81685878, 0.9370938 , 0.85142101],
+    [ 0.82483206, 0.94025378, 0.8579751 ],
+    [ 0.83277661, 0.94343371, 0.86452477],
+    [ 0.84069127, 0.94663473, 0.87106853],
+    [ 0.84857662, 0.9498573 , 0.8776059 ],
+    [ 0.8564431 , 0.95309792, 0.88414253],
+    [ 0.86429066, 0.95635719, 0.89067759],
+    [ 0.87218969, 0.95960708, 0.89725384]
+]
+
+
+_vlag_lut = [
+    [ 0.13850039, 0.41331206, 0.74052025],
+    [ 0.15077609, 0.41762684, 0.73970427],
+    [ 0.16235219, 0.4219191 , 0.7389667 ],
+    [ 0.1733322 , 0.42619024, 0.73832537],
+    [ 0.18382538, 0.43044226, 0.73776764],
+    [ 0.19394034, 0.4346772 , 0.73725867],
+    [ 0.20367115, 0.43889576, 0.73685314],
+    [ 0.21313625, 0.44310003, 0.73648045],
+    [ 0.22231173, 0.44729079, 0.73619681],
+    [ 0.23125148, 0.45146945, 0.73597803],
+    [ 0.23998101, 0.45563715, 0.7358223 ],
+    [ 0.24853358, 0.45979489, 0.73571524],
+    [ 0.25691416, 0.4639437 , 0.73566943],
+    [ 0.26513894, 0.46808455, 0.73568319],
+    [ 0.27322194, 0.47221835, 0.73575497],
+    [ 0.28117543, 0.47634598, 0.73588332],
+    [ 0.28901021, 0.48046826, 0.73606686],
+    [ 0.2967358 , 0.48458597, 0.73630433],
+    [ 0.30436071, 0.48869986, 0.73659451],
+    [ 0.3118955 , 0.49281055, 0.73693255],
+    [ 0.31935389, 0.49691847, 0.73730851],
+    [ 0.32672701, 0.5010247 , 0.73774013],
+    [ 0.33402607, 0.50512971, 0.73821941],
+    [ 0.34125337, 0.50923419, 0.73874905],
+    [ 0.34840921, 0.51333892, 0.73933402],
+    [ 0.35551826, 0.51744353, 0.73994642],
+    [ 0.3625676 , 0.52154929, 0.74060763],
+    [ 0.36956356, 0.52565656, 0.74131327],
+    [ 0.37649902, 0.52976642, 0.74207698],
+    [ 0.38340273, 0.53387791, 0.74286286],
+    [ 0.39025859, 0.53799253, 0.7436962 ],
+    [ 0.39706821, 0.54211081, 0.744578  ],
+    [ 0.40384046, 0.54623277, 0.74549872],
+    [ 0.41058241, 0.55035849, 0.74645094],
+    [ 0.41728385, 0.55448919, 0.74745174],
+    [ 0.42395178, 0.55862494, 0.74849357],
+    [ 0.4305964 , 0.56276546, 0.74956387],
+    [ 0.4372044 , 0.56691228, 0.75068412],
+    [ 0.4437909 , 0.57106468, 0.75183427],
+    [ 0.45035117, 0.5752235 , 0.75302312],
+    [ 0.45687824, 0.57938983, 0.75426297],
+    [ 0.46339713, 0.58356191, 0.75551816],
+    [ 0.46988778, 0.58774195, 0.75682037],
+    [ 0.47635605, 0.59192986, 0.75816245],
+    [ 0.48281101, 0.5961252 , 0.75953212],
+    [ 0.4892374 , 0.60032986, 0.76095418],
+    [ 0.49566225, 0.60454154, 0.76238852],
+    [ 0.50206137, 0.60876307, 0.76387371],
+    [ 0.50845128, 0.61299312, 0.76538551],
+    [ 0.5148258 , 0.61723272, 0.76693475],
+    [ 0.52118385, 0.62148236, 0.76852436],
+    [ 0.52753571, 0.62574126, 0.77013939],
+    [ 0.53386831, 0.63001125, 0.77180152],
+    [ 0.54020159, 0.63429038, 0.7734803 ],
+    [ 0.54651272, 0.63858165, 0.77521306],
+    [ 0.55282975, 0.64288207, 0.77695608],
+    [ 0.55912585, 0.64719519, 0.77875327],
+    [ 0.56542599, 0.65151828, 0.78056551],
+    [ 0.57170924, 0.65585426, 0.78242747],
+    [ 0.57799572, 0.6602009 , 0.78430751],
+    [ 0.58426817, 0.66456073, 0.78623458],
+    [ 0.590544  , 0.66893178, 0.78818117],
+    [ 0.59680758, 0.67331643, 0.79017369],
+    [ 0.60307553, 0.67771273, 0.79218572],
+    [ 0.60934065, 0.68212194, 0.79422987],
+    [ 0.61559495, 0.68654548, 0.7963202 ],
+    [ 0.62185554, 0.69098125, 0.79842918],
+    [ 0.62810662, 0.69543176, 0.80058381],
+    [ 0.63436425, 0.69989499, 0.80275812],
+    [ 0.64061445, 0.70437326, 0.80497621],
+    [ 0.6468706 , 0.70886488, 0.80721641],
+    [ 0.65312213, 0.7133717 , 0.80949719],
+    [ 0.65937818, 0.71789261, 0.81180392],
+    [ 0.66563334, 0.72242871, 0.81414642],
+    [ 0.67189155, 0.72697967, 0.81651872],
+    [ 0.67815314, 0.73154569, 0.81892097],
+    [ 0.68441395, 0.73612771, 0.82136094],
+    [ 0.69068321, 0.74072452, 0.82382353],
+    [ 0.69694776, 0.7453385 , 0.82633199],
+    [ 0.70322431, 0.74996721, 0.8288583 ],
+    [ 0.70949595, 0.75461368, 0.83143221],
+    [ 0.7157774 , 0.75927574, 0.83402904],
+    [ 0.72206299, 0.76395461, 0.83665922],
+    [ 0.72835227, 0.76865061, 0.8393242 ],
+    [ 0.73465238, 0.7733628 , 0.84201224],
+    [ 0.74094862, 0.77809393, 0.84474951],
+    [ 0.74725683, 0.78284158, 0.84750915],
+    [ 0.75357103, 0.78760701, 0.85030217],
+    [ 0.75988961, 0.79239077, 0.85313207],
+    [ 0.76621987, 0.79719185, 0.85598668],
+    [ 0.77255045, 0.8020125 , 0.85888658],
+    [ 0.77889241, 0.80685102, 0.86181298],
+    [ 0.78524572, 0.81170768, 0.86476656],
+    [ 0.79159841, 0.81658489, 0.86776906],
+    [ 0.79796459, 0.82148036, 0.8707962 ],
+    [ 0.80434168, 0.82639479, 0.87385315],
+    [ 0.8107221 , 0.83132983, 0.87695392],
+    [ 0.81711301, 0.8362844 , 0.88008641],
+    [ 0.82351479, 0.84125863, 0.88325045],
+    [ 0.82992772, 0.84625263, 0.88644594],
+    [ 0.83634359, 0.85126806, 0.8896878 ],
+    [ 0.84277295, 0.85630293, 0.89295721],
+    [ 0.84921192, 0.86135782, 0.89626076],
+    [ 0.85566206, 0.866432  , 0.89959467],
+    [ 0.86211514, 0.87152627, 0.90297183],
+    [ 0.86857483, 0.87663856, 0.90638248],
+    [ 0.87504231, 0.88176648, 0.90981938],
+    [ 0.88151194, 0.88690782, 0.91328493],
+    [ 0.88797938, 0.89205857, 0.91677544],
+    [ 0.89443865, 0.89721298, 0.9202854 ],
+    [ 0.90088204, 0.90236294, 0.92380601],
+    [ 0.90729768, 0.90749778, 0.92732797],
+    [ 0.91367037, 0.91260329, 0.93083814],
+    [ 0.91998105, 0.91766106, 0.93431861],
+    [ 0.92620596, 0.92264789, 0.93774647],
+    [ 0.93231683, 0.9275351 , 0.94109192],
+    [ 0.93827772, 0.9322888 , 0.94432312],
+    [ 0.94404755, 0.93686925, 0.94740137],
+    [ 0.94958284, 0.94123072, 0.95027696],
+    [ 0.95482682, 0.9453245 , 0.95291103],
+    [ 0.9597248 , 0.94909728, 0.95525103],
+    [ 0.96422552, 0.95249273, 0.95723271],
+    [ 0.96826161, 0.95545812, 0.95882188],
+    [ 0.97178458, 0.95793984, 0.95995705],
+    [ 0.97474105, 0.95989142, 0.96059997],
+    [ 0.97708604, 0.96127366, 0.96071853],
+    [ 0.97877855, 0.96205832, 0.96030095],
+    [ 0.97978484, 0.96222949, 0.95935496],
+    [ 0.9805997 , 0.96155216, 0.95813083],
+    [ 0.98152619, 0.95993719, 0.95639322],
+    [ 0.9819726 , 0.95766608, 0.95399269],
+    [ 0.98191855, 0.9547873 , 0.95098107],
+    [ 0.98138514, 0.95134771, 0.94740644],
+    [ 0.98040845, 0.94739906, 0.94332125],
+    [ 0.97902107, 0.94300131, 0.93878672],
+    [ 0.97729348, 0.93820409, 0.93385135],
+    [ 0.9752533 , 0.933073  , 0.92858252],
+    [ 0.97297834, 0.92765261, 0.92302309],
+    [ 0.97049104, 0.92200317, 0.91723505],
+    [ 0.96784372, 0.91616744, 0.91126063],
+    [ 0.96507281, 0.91018664, 0.90514124],
+    [ 0.96222034, 0.90409203, 0.89890756],
+    [ 0.9593079 , 0.89791478, 0.89259122],
+    [ 0.95635626, 0.89167908, 0.88621654],
+    [ 0.95338303, 0.88540373, 0.87980238],
+    [ 0.95040174, 0.87910333, 0.87336339],
+    [ 0.94742246, 0.87278899, 0.86691076],
+    [ 0.94445249, 0.86646893, 0.86045277],
+    [ 0.94150476, 0.86014606, 0.85399191],
+    [ 0.93857394, 0.85382798, 0.84753642],
+    [ 0.93566206, 0.84751766, 0.84108935],
+    [ 0.93277194, 0.8412164 , 0.83465197],
+    [ 0.92990106, 0.83492672, 0.82822708],
+    [ 0.92704736, 0.82865028, 0.82181656],
+    [ 0.92422703, 0.82238092, 0.81541333],
+    [ 0.92142581, 0.81612448, 0.80902415],
+    [ 0.91864501, 0.80988032, 0.80264838],
+    [ 0.91587578, 0.80365187, 0.79629001],
+    [ 0.9131367 , 0.79743115, 0.78994   ],
+    [ 0.91041602, 0.79122265, 0.78360361],
+    [ 0.90771071, 0.78502727, 0.77728196],
+    [ 0.90501581, 0.77884674, 0.7709771 ],
+    [ 0.90235365, 0.77267117, 0.76467793],
+    [ 0.8997019 , 0.76650962, 0.75839484],
+    [ 0.89705346, 0.76036481, 0.752131  ],
+    [ 0.89444021, 0.75422253, 0.74587047],
+    [ 0.89183355, 0.74809474, 0.73962689],
+    [ 0.88923216, 0.74198168, 0.73340061],
+    [ 0.88665892, 0.73587283, 0.72717995],
+    [ 0.88408839, 0.72977904, 0.72097718],
+    [ 0.88153537, 0.72369332, 0.71478461],
+    [ 0.87899389, 0.7176179 , 0.70860487],
+    [ 0.87645157, 0.71155805, 0.7024439 ],
+    [ 0.8739399 , 0.70549893, 0.6962854 ],
+    [ 0.87142626, 0.6994551 , 0.69014561],
+    [ 0.8689268 , 0.69341868, 0.68401597],
+    [ 0.86643562, 0.687392  , 0.67789917],
+    [ 0.86394434, 0.68137863, 0.67179927],
+    [ 0.86147586, 0.67536728, 0.665704  ],
+    [ 0.85899928, 0.66937226, 0.6596292 ],
+    [ 0.85654668, 0.66337773, 0.6535577 ],
+    [ 0.85408818, 0.65739772, 0.64750494],
+    [ 0.85164413, 0.65142189, 0.64145983],
+    [ 0.84920091, 0.6454565 , 0.63542932],
+    [ 0.84676427, 0.63949827, 0.62941   ],
+    [ 0.84433231, 0.63354773, 0.62340261],
+    [ 0.84190106, 0.62760645, 0.61740899],
+    [ 0.83947935, 0.62166951, 0.61142404],
+    [ 0.8370538 , 0.61574332, 0.60545478],
+    [ 0.83463975, 0.60981951, 0.59949247],
+    [ 0.83221877, 0.60390724, 0.593547  ],
+    [ 0.82980985, 0.59799607, 0.58760751],
+    [ 0.82740268, 0.59209095, 0.58167944],
+    [ 0.82498638, 0.5861973 , 0.57576866],
+    [ 0.82258181, 0.5803034 , 0.56986307],
+    [ 0.82016611, 0.57442123, 0.56397539],
+    [ 0.81776305, 0.56853725, 0.55809173],
+    [ 0.81534551, 0.56266602, 0.55222741],
+    [ 0.81294293, 0.55679056, 0.5463651 ],
+    [ 0.81052113, 0.55092973, 0.54052443],
+    [ 0.80811509, 0.54506305, 0.53468464],
+    [ 0.80568952, 0.53921036, 0.52886622],
+    [ 0.80327506, 0.53335335, 0.52305077],
+    [ 0.80084727, 0.52750583, 0.51725256],
+    [ 0.79842217, 0.5216578 , 0.51146173],
+    [ 0.79599382, 0.51581223, 0.50568155],
+    [ 0.79355781, 0.50997127, 0.49991444],
+    [ 0.79112596, 0.50412707, 0.49415289],
+    [ 0.78867442, 0.49829386, 0.48841129],
+    [ 0.7862306 , 0.49245398, 0.48267247],
+    [ 0.7837687 , 0.48662309, 0.47695216],
+    [ 0.78130809, 0.4807883 , 0.47123805],
+    [ 0.77884467, 0.47495151, 0.46553236],
+    [ 0.77636283, 0.46912235, 0.45984473],
+    [ 0.77388383, 0.46328617, 0.45416141],
+    [ 0.77138912, 0.45745466, 0.44849398],
+    [ 0.76888874, 0.45162042, 0.44283573],
+    [ 0.76638802, 0.44577901, 0.43718292],
+    [ 0.76386116, 0.43994762, 0.43155211],
+    [ 0.76133542, 0.43410655, 0.42592523],
+    [ 0.75880631, 0.42825801, 0.42030488],
+    [ 0.75624913, 0.42241905, 0.41470727],
+    [ 0.7536919 , 0.41656866, 0.40911347],
+    [ 0.75112748, 0.41071104, 0.40352792],
+    [ 0.74854331, 0.40485474, 0.3979589 ],
+    [ 0.74594723, 0.39899309, 0.39240088],
+    [ 0.74334332, 0.39312199, 0.38685075],
+    [ 0.74073277, 0.38723941, 0.3813074 ],
+    [ 0.73809409, 0.38136133, 0.37578553],
+    [ 0.73544692, 0.37547129, 0.37027123],
+    [ 0.73278943, 0.36956954, 0.36476549],
+    [ 0.73011829, 0.36365761, 0.35927038],
+    [ 0.72743485, 0.35773314, 0.35378465],
+    [ 0.72472722, 0.35180504, 0.34831662],
+    [ 0.72200473, 0.34586421, 0.34285937],
+    [ 0.71927052, 0.33990649, 0.33741033],
+    [ 0.71652049, 0.33393396, 0.33197219],
+    [ 0.71375362, 0.32794602, 0.32654545],
+    [ 0.71096951, 0.32194148, 0.32113016],
+    [ 0.70816772, 0.31591904, 0.31572637],
+    [ 0.70534784, 0.30987734, 0.31033414],
+    [ 0.70250944, 0.30381489, 0.30495353],
+    [ 0.69965211, 0.2977301 , 0.2995846 ],
+    [ 0.6967754 , 0.29162126, 0.29422741],
+    [ 0.69388446, 0.28548074, 0.28887769],
+    [ 0.69097561, 0.2793096 , 0.28353795],
+    [ 0.68803513, 0.27311993, 0.27821876],
+    [ 0.6850794 , 0.26689144, 0.27290694],
+    [ 0.682108  , 0.26062114, 0.26760246],
+    [ 0.67911013, 0.2543177 , 0.26231367],
+    [ 0.67609393, 0.24796818, 0.25703372],
+    [ 0.67305921, 0.24156846, 0.25176238],
+    [ 0.67000176, 0.23511902, 0.24650278],
+    [ 0.66693423, 0.22859879, 0.24124404],
+    [ 0.6638441 , 0.22201742, 0.2359961 ],
+    [ 0.66080672, 0.21526712, 0.23069468]
+]
+
+
+_icefire_lut = [
+    [ 0.73936227, 0.90443867, 0.85757238],
+    [ 0.72888063, 0.89639109, 0.85488394],
+    [ 0.71834255, 0.88842162, 0.8521605 ],
+    [ 0.70773866, 0.88052939, 0.849422  ],
+    [ 0.69706215, 0.87271313, 0.84668315],
+    [ 0.68629021, 0.86497329, 0.84398721],
+    [ 0.67543654, 0.85730617, 0.84130969],
+    [ 0.66448539, 0.84971123, 0.83868005],
+    [ 0.65342679, 0.84218728, 0.83611512],
+    [ 0.64231804, 0.83471867, 0.83358584],
+    [ 0.63117745, 0.827294  , 0.83113431],
+    [ 0.62000484, 0.81991069, 0.82876741],
+    [ 0.60879435, 0.81256797, 0.82648905],
+    [ 0.59754118, 0.80526458, 0.82430414],
+    [ 0.58624247, 0.79799884, 0.82221573],
+    [ 0.57489525, 0.7907688 , 0.82022901],
+    [ 0.56349779, 0.78357215, 0.81834861],
+    [ 0.55204294, 0.77640827, 0.81657563],
+    [ 0.54052516, 0.76927562, 0.81491462],
+    [ 0.52894085, 0.76217215, 0.81336913],
+    [ 0.51728854, 0.75509528, 0.81194156],
+    [ 0.50555676, 0.74804469, 0.81063503],
+    [ 0.49373871, 0.7410187 , 0.80945242],
+    [ 0.48183174, 0.73401449, 0.80839675],
+    [ 0.46982587, 0.72703075, 0.80747097],
+    [ 0.45770893, 0.72006648, 0.80667756],
+    [ 0.44547249, 0.71311941, 0.80601991],
+    [ 0.43318643, 0.70617126, 0.80549278],
+    [ 0.42110294, 0.69916972, 0.80506683],
+    [ 0.40925101, 0.69211059, 0.80473246],
+    [ 0.3976693 , 0.68498786, 0.80448272],
+    [ 0.38632002, 0.67781125, 0.80431024],
+    [ 0.37523981, 0.67057537, 0.80420832],
+    [ 0.36442578, 0.66328229, 0.80417474],
+    [ 0.35385939, 0.65593699, 0.80420591],
+    [ 0.34358916, 0.64853177, 0.8043    ],
+    [ 0.33355526, 0.64107876, 0.80445484],
+    [ 0.32383062, 0.63356578, 0.80467091],
+    [ 0.31434372, 0.62600624, 0.8049475 ],
+    [ 0.30516161, 0.618389  , 0.80528692],
+    [ 0.29623491, 0.61072284, 0.80569021],
+    [ 0.28759072, 0.60300319, 0.80616055],
+    [ 0.27923924, 0.59522877, 0.80669803],
+    [ 0.27114651, 0.5874047 , 0.80730545],
+    [ 0.26337153, 0.57952055, 0.80799113],
+    [ 0.25588696, 0.57157984, 0.80875922],
+    [ 0.248686  , 0.56358255, 0.80961366],
+    [ 0.24180668, 0.55552289, 0.81055123],
+    [ 0.23526251, 0.54739477, 0.8115939 ],
+    [ 0.22921445, 0.53918506, 0.81267292],
+    [ 0.22397687, 0.53086094, 0.8137141 ],
+    [ 0.21977058, 0.52241482, 0.81457651],
+    [ 0.21658989, 0.51384321, 0.81528511],
+    [ 0.21452772, 0.50514155, 0.81577278],
+    [ 0.21372783, 0.49630865, 0.81589566],
+    [ 0.21409503, 0.48734861, 0.81566163],
+    [ 0.2157176 , 0.47827123, 0.81487615],
+    [ 0.21842857, 0.46909168, 0.81351614],
+    [ 0.22211705, 0.45983212, 0.81146983],
+    [ 0.22665681, 0.45052233, 0.80860217],
+    [ 0.23176013, 0.44119137, 0.80494325],
+    [ 0.23727775, 0.43187704, 0.80038017],
+    [ 0.24298285, 0.42261123, 0.79493267],
+    [ 0.24865068, 0.41341842, 0.78869164],
+    [ 0.25423116, 0.40433127, 0.78155831],
+    [ 0.25950239, 0.39535521, 0.77376848],
+    [ 0.2644736 , 0.38651212, 0.76524809],
+    [ 0.26901584, 0.37779582, 0.75621942],
+    [ 0.27318141, 0.36922056, 0.746605  ],
+    [ 0.27690355, 0.3607736 , 0.73659374],
+    [ 0.28023585, 0.35244234, 0.72622103],
+    [ 0.28306009, 0.34438449, 0.71500731],
+    [ 0.28535896, 0.33660243, 0.70303975],
+    [ 0.28708711, 0.32912157, 0.69034504],
+    [ 0.28816354, 0.32200604, 0.67684067],
+    [ 0.28862749, 0.31519824, 0.66278813],
+    [ 0.28847904, 0.30869064, 0.6482815 ],
+    [ 0.28770912, 0.30250126, 0.63331265],
+    [ 0.28640325, 0.29655509, 0.61811374],
+    [ 0.28458943, 0.29082155, 0.60280913],
+    [ 0.28233561, 0.28527482, 0.58742866],
+    [ 0.27967038, 0.2798938 , 0.57204225],
+    [ 0.27665361, 0.27465357, 0.55667809],
+    [ 0.27332564, 0.2695165 , 0.54145387],
+    [ 0.26973851, 0.26447054, 0.52634916],
+    [ 0.2659204 , 0.25949691, 0.511417  ],
+    [ 0.26190145, 0.25458123, 0.49668768],
+    [ 0.2577151 , 0.24971691, 0.48214874],
+    [ 0.25337618, 0.24490494, 0.46778758],
+    [ 0.24890842, 0.24013332, 0.45363816],
+    [ 0.24433654, 0.23539226, 0.4397245 ],
+    [ 0.23967922, 0.23067729, 0.4260591 ],
+    [ 0.23495608, 0.22598894, 0.41262952],
+    [ 0.23018113, 0.22132414, 0.39945577],
+    [ 0.22534609, 0.21670847, 0.38645794],
+    [ 0.22048761, 0.21211723, 0.37372555],
+    [ 0.2156198 , 0.20755389, 0.36125301],
+    [ 0.21074637, 0.20302717, 0.34903192],
+    [ 0.20586893, 0.19855368, 0.33701661],
+    [ 0.20101757, 0.19411573, 0.32529173],
+    [ 0.19619947, 0.18972425, 0.31383846],
+    [ 0.19140726, 0.18540157, 0.30260777],
+    [ 0.1866769 , 0.1811332 , 0.29166583],
+    [ 0.18201285, 0.17694992, 0.28088776],
+    [ 0.17745228, 0.17282141, 0.27044211],
+    [ 0.17300684, 0.16876921, 0.26024893],
+    [ 0.16868273, 0.16479861, 0.25034479],
+    [ 0.16448691, 0.16091728, 0.24075373],
+    [ 0.16043195, 0.15714351, 0.23141745],
+    [ 0.15652427, 0.15348248, 0.22238175],
+    [ 0.15277065, 0.14994111, 0.21368395],
+    [ 0.14918274, 0.14653431, 0.20529486],
+    [ 0.14577095, 0.14327403, 0.19720829],
+    [ 0.14254381, 0.14016944, 0.18944326],
+    [ 0.13951035, 0.13723063, 0.18201072],
+    [ 0.13667798, 0.13446606, 0.17493774],
+    [ 0.13405762, 0.13188822, 0.16820842],
+    [ 0.13165767, 0.12950667, 0.16183275],
+    [ 0.12948748, 0.12733187, 0.15580631],
+    [ 0.12755435, 0.1253723 , 0.15014098],
+    [ 0.12586516, 0.12363617, 0.1448459 ],
+    [ 0.12442647, 0.12213143, 0.13992571],
+    [ 0.12324241, 0.12086419, 0.13539995],
+    [ 0.12232067, 0.11984278, 0.13124644],
+    [ 0.12166209, 0.11907077, 0.12749671],
+    [ 0.12126982, 0.11855309, 0.12415079],
+    [ 0.12114244, 0.11829179, 0.1212385 ],
+    [ 0.12127766, 0.11828837, 0.11878534],
+    [ 0.12284806, 0.1179729 , 0.11772022],
+    [ 0.12619498, 0.11721796, 0.11770203],
+    [ 0.129968  , 0.11663788, 0.11792377],
+    [ 0.13410011, 0.11625146, 0.11839138],
+    [ 0.13855459, 0.11606618, 0.11910584],
+    [ 0.14333775, 0.11607038, 0.1200606 ],
+    [ 0.148417  , 0.11626929, 0.12125453],
+    [ 0.15377389, 0.11666192, 0.12268364],
+    [ 0.15941427, 0.11723486, 0.12433911],
+    [ 0.16533376, 0.11797856, 0.12621303],
+    [ 0.17152547, 0.11888403, 0.12829735],
+    [ 0.17797765, 0.11994436, 0.13058435],
+    [ 0.18468769, 0.12114722, 0.13306426],
+    [ 0.19165663, 0.12247737, 0.13572616],
+    [ 0.19884415, 0.12394381, 0.1385669 ],
+    [ 0.20627181, 0.12551883, 0.14157124],
+    [ 0.21394877, 0.12718055, 0.14472604],
+    [ 0.22184572, 0.12893119, 0.14802579],
+    [ 0.22994394, 0.13076731, 0.15146314],
+    [ 0.23823937, 0.13267611, 0.15502793],
+    [ 0.24676041, 0.13462172, 0.15870321],
+    [ 0.25546457, 0.13661751, 0.16248722],
+    [ 0.26433628, 0.13865956, 0.16637301],
+    [ 0.27341345, 0.14070412, 0.17034221],
+    [ 0.28264773, 0.14277192, 0.1743957 ],
+    [ 0.29202272, 0.14486161, 0.17852793],
+    [ 0.30159648, 0.14691224, 0.1827169 ],
+    [ 0.31129002, 0.14897583, 0.18695213],
+    [ 0.32111555, 0.15103351, 0.19119629],
+    [ 0.33107961, 0.1530674 , 0.19543758],
+    [ 0.34119892, 0.15504762, 0.1996803 ],
+    [ 0.35142388, 0.15701131, 0.20389086],
+    [ 0.36178937, 0.1589124 , 0.20807639],
+    [ 0.37229381, 0.16073993, 0.21223189],
+    [ 0.38288348, 0.16254006, 0.2163249 ],
+    [ 0.39359592, 0.16426336, 0.22036577],
+    [ 0.40444332, 0.16588767, 0.22434027],
+    [ 0.41537995, 0.16745325, 0.2282297 ],
+    [ 0.42640867, 0.16894939, 0.23202755],
+    [ 0.43754706, 0.17034847, 0.23572899],
+    [ 0.44878564, 0.1716535 , 0.23932344],
+    [ 0.4601126 , 0.17287365, 0.24278607],
+    [ 0.47151732, 0.17401641, 0.24610337],
+    [ 0.48300689, 0.17506676, 0.2492737 ],
+    [ 0.49458302, 0.17601892, 0.25227688],
+    [ 0.50623876, 0.17687777, 0.255096  ],
+    [ 0.5179623 , 0.17765528, 0.2577162 ],
+    [ 0.52975234, 0.17835232, 0.2601134 ],
+    [ 0.54159776, 0.17898292, 0.26226847],
+    [ 0.55348804, 0.17956232, 0.26416003],
+    [ 0.56541729, 0.18010175, 0.26575971],
+    [ 0.57736669, 0.180631  , 0.26704888],
+    [ 0.58932081, 0.18117827, 0.26800409],
+    [ 0.60127582, 0.18175888, 0.26858488],
+    [ 0.61319563, 0.1824336 , 0.2687872 ],
+    [ 0.62506376, 0.18324015, 0.26858301],
+    [ 0.63681202, 0.18430173, 0.26795276],
+    [ 0.64842603, 0.18565472, 0.26689463],
+    [ 0.65988195, 0.18734638, 0.26543435],
+    [ 0.67111966, 0.18948885, 0.26357955],
+    [ 0.68209194, 0.19216636, 0.26137175],
+    [ 0.69281185, 0.19535326, 0.25887063],
+    [ 0.70335022, 0.19891271, 0.25617971],
+    [ 0.71375229, 0.20276438, 0.25331365],
+    [ 0.72401436, 0.20691287, 0.25027366],
+    [ 0.73407638, 0.21145051, 0.24710661],
+    [ 0.74396983, 0.21631913, 0.24380715],
+    [ 0.75361506, 0.22163653, 0.24043996],
+    [ 0.7630579 , 0.22731637, 0.23700095],
+    [ 0.77222228, 0.23346231, 0.23356628],
+    [ 0.78115441, 0.23998404, 0.23013825],
+    [ 0.78979746, 0.24694858, 0.22678822],
+    [ 0.79819286, 0.25427223, 0.22352658],
+    [ 0.80630444, 0.26198807, 0.22040877],
+    [ 0.81417437, 0.27001406, 0.21744645],
+    [ 0.82177364, 0.27837336, 0.21468316],
+    [ 0.82915955, 0.28696963, 0.21210766],
+    [ 0.83628628, 0.2958499 , 0.20977813],
+    [ 0.84322168, 0.30491136, 0.20766435],
+    [ 0.84995458, 0.31415945, 0.2057863 ],
+    [ 0.85648867, 0.32358058, 0.20415327],
+    [ 0.86286243, 0.33312058, 0.20274969],
+    [ 0.86908321, 0.34276705, 0.20157271],
+    [ 0.87512876, 0.3525416 , 0.20064949],
+    [ 0.88100349, 0.36243385, 0.19999078],
+    [ 0.8866469 , 0.37249496, 0.1997976 ],
+    [ 0.89203964, 0.38273475, 0.20013431],
+    [ 0.89713496, 0.39318156, 0.20121514],
+    [ 0.90195099, 0.40380687, 0.20301555],
+    [ 0.90648379, 0.41460191, 0.20558847],
+    [ 0.9106967 , 0.42557857, 0.20918529],
+    [ 0.91463791, 0.43668557, 0.21367954],
+    [ 0.91830723, 0.44790913, 0.21916352],
+    [ 0.92171507, 0.45922856, 0.22568002],
+    [ 0.92491786, 0.4705936 , 0.23308207],
+    [ 0.92790792, 0.48200153, 0.24145932],
+    [ 0.93073701, 0.49341219, 0.25065486],
+    [ 0.93343918, 0.5048017 , 0.26056148],
+    [ 0.93602064, 0.51616486, 0.27118485],
+    [ 0.93850535, 0.52748892, 0.28242464],
+    [ 0.94092933, 0.53875462, 0.29416042],
+    [ 0.94330011, 0.5499628 , 0.30634189],
+    [ 0.94563159, 0.56110987, 0.31891624],
+    [ 0.94792955, 0.57219822, 0.33184256],
+    [ 0.95020929, 0.5832232 , 0.34508419],
+    [ 0.95247324, 0.59419035, 0.35859866],
+    [ 0.95471709, 0.60510869, 0.37236035],
+    [ 0.95698411, 0.61595766, 0.38629631],
+    [ 0.95923863, 0.62676473, 0.40043317],
+    [ 0.9615041 , 0.6375203 , 0.41474106],
+    [ 0.96371553, 0.64826619, 0.42928335],
+    [ 0.96591497, 0.65899621, 0.44380444],
+    [ 0.96809871, 0.66971662, 0.45830232],
+    [ 0.9702495 , 0.6804394 , 0.47280492],
+    [ 0.9723881 , 0.69115622, 0.48729272],
+    [ 0.97450723, 0.70187358, 0.50178034],
+    [ 0.9766108 , 0.712592  , 0.51626837],
+    [ 0.97871716, 0.72330511, 0.53074053],
+    [ 0.98082222, 0.73401769, 0.54520694],
+    [ 0.9829001 , 0.74474445, 0.5597019 ],
+    [ 0.98497466, 0.75547635, 0.57420239],
+    [ 0.98705581, 0.76621129, 0.58870185],
+    [ 0.98913325, 0.77695637, 0.60321626],
+    [ 0.99119918, 0.78771716, 0.61775821],
+    [ 0.9932672 , 0.79848979, 0.63231691],
+    [ 0.99535958, 0.80926704, 0.64687278],
+    [ 0.99740544, 0.82008078, 0.66150571],
+    [ 0.9992197 , 0.83100723, 0.6764127 ]
+]
+
+
+_flare_lut = [
+    [0.92907237, 0.68878959, 0.50411509],
+    [0.92891402, 0.68494686, 0.50173994],
+    [0.92864754, 0.68116207, 0.4993754],
+    [0.92836112, 0.67738527, 0.49701572],
+    [0.9280599, 0.67361354, 0.49466044],
+    [0.92775569, 0.66983999, 0.49230866],
+    [0.9274375, 0.66607098, 0.48996097],
+    [0.927111, 0.66230315, 0.48761688],
+    [0.92677996, 0.6585342, 0.485276],
+    [0.92644317, 0.65476476, 0.48293832],
+    [0.92609759, 0.65099658, 0.48060392],
+    [0.925747, 0.64722729, 0.47827244],
+    [0.92539502, 0.64345456, 0.47594352],
+    [0.92503106, 0.6396848, 0.47361782],
+    [0.92466877, 0.6359095, 0.47129427],
+    [0.92429828, 0.63213463, 0.46897349],
+    [0.92392172, 0.62835879, 0.46665526],
+    [0.92354597, 0.62457749, 0.46433898],
+    [0.9231622, 0.6207962, 0.46202524],
+    [0.92277222, 0.61701365, 0.45971384],
+    [0.92237978, 0.61322733, 0.45740444],
+    [0.92198615, 0.60943622, 0.45509686],
+    [0.92158735, 0.60564276, 0.45279137],
+    [0.92118373, 0.60184659, 0.45048789],
+    [0.92077582, 0.59804722, 0.44818634],
+    [0.92036413, 0.59424414, 0.44588663],
+    [0.91994924, 0.5904368, 0.44358868],
+    [0.91952943, 0.58662619, 0.4412926],
+    [0.91910675, 0.58281075, 0.43899817],
+    [0.91868096, 0.57899046, 0.4367054],
+    [0.91825103, 0.57516584, 0.43441436],
+    [0.91781857, 0.57133556, 0.43212486],
+    [0.9173814, 0.56750099, 0.4298371],
+    [0.91694139, 0.56366058, 0.42755089],
+    [0.91649756, 0.55981483, 0.42526631],
+    [0.91604942, 0.55596387, 0.42298339],
+    [0.9155979, 0.55210684, 0.42070204],
+    [0.9151409, 0.54824485, 0.4184247],
+    [0.91466138, 0.54438817, 0.41617858],
+    [0.91416896, 0.54052962, 0.41396347],
+    [0.91366559, 0.53666778, 0.41177769],
+    [0.91315173, 0.53280208, 0.40962196],
+    [0.91262605, 0.52893336, 0.40749715],
+    [0.91208866, 0.52506133, 0.40540404],
+    [0.91153952, 0.52118582, 0.40334346],
+    [0.91097732, 0.51730767, 0.4013163],
+    [0.910403, 0.51342591, 0.39932342],
+    [0.90981494, 0.50954168, 0.39736571],
+    [0.90921368, 0.5056543, 0.39544411],
+    [0.90859797, 0.50176463, 0.39355952],
+    [0.90796841, 0.49787195, 0.39171297],
+    [0.90732341, 0.4939774, 0.38990532],
+    [0.90666382, 0.49008006, 0.38813773],
+    [0.90598815, 0.486181, 0.38641107],
+    [0.90529624, 0.48228017, 0.38472641],
+    [0.90458808, 0.47837738, 0.38308489],
+    [0.90386248, 0.47447348, 0.38148746],
+    [0.90311921, 0.4705685, 0.37993524],
+    [0.90235809, 0.46666239, 0.37842943],
+    [0.90157824, 0.46275577, 0.37697105],
+    [0.90077904, 0.45884905, 0.37556121],
+    [0.89995995, 0.45494253, 0.37420106],
+    [0.89912041, 0.4510366, 0.37289175],
+    [0.8982602, 0.44713126, 0.37163458],
+    [0.89737819, 0.44322747, 0.37043052],
+    [0.89647387, 0.43932557, 0.36928078],
+    [0.89554477, 0.43542759, 0.36818855],
+    [0.89458871, 0.4315354, 0.36715654],
+    [0.89360794, 0.42764714, 0.36618273],
+    [0.89260152, 0.42376366, 0.36526813],
+    [0.8915687, 0.41988565, 0.36441384],
+    [0.89050882, 0.41601371, 0.36362102],
+    [0.8894159, 0.41215334, 0.36289639],
+    [0.888292, 0.40830288, 0.36223756],
+    [0.88713784, 0.40446193, 0.36164328],
+    [0.88595253, 0.40063149, 0.36111438],
+    [0.88473115, 0.39681635, 0.3606566],
+    [0.88347246, 0.39301805, 0.36027074],
+    [0.88217931, 0.38923439, 0.35995244],
+    [0.880851, 0.38546632, 0.35970244],
+    [0.87947728, 0.38172422, 0.35953127],
+    [0.87806542, 0.37800172, 0.35942941],
+    [0.87661509, 0.37429964, 0.35939659],
+    [0.87511668, 0.37062819, 0.35944178],
+    [0.87357554, 0.36698279, 0.35955811],
+    [0.87199254, 0.3633634, 0.35974223],
+    [0.87035691, 0.35978174, 0.36000516],
+    [0.86867647, 0.35623087, 0.36033559],
+    [0.86694949, 0.35271349, 0.36073358],
+    [0.86516775, 0.34923921, 0.36120624],
+    [0.86333996, 0.34580008, 0.36174113],
+    [0.86145909, 0.3424046, 0.36234402],
+    [0.85952586, 0.33905327, 0.36301129],
+    [0.85754536, 0.33574168, 0.36373567],
+    [0.855514, 0.33247568, 0.36451271],
+    [0.85344392, 0.32924217, 0.36533344],
+    [0.8513284, 0.32604977, 0.36620106],
+    [0.84916723, 0.32289973, 0.36711424],
+    [0.84696243, 0.31979068, 0.36806976],
+    [0.84470627, 0.31673295, 0.36907066],
+    [0.84240761, 0.31371695, 0.37010969],
+    [0.84005337, 0.31075974, 0.37119284],
+    [0.83765537, 0.30784814, 0.3723105],
+    [0.83520234, 0.30499724, 0.37346726],
+    [0.83270291, 0.30219766, 0.37465552],
+    [0.83014895, 0.29946081, 0.37587769],
+    [0.82754694, 0.29677989, 0.37712733],
+    [0.82489111, 0.29416352, 0.37840532],
+    [0.82218644, 0.29160665, 0.37970606],
+    [0.81942908, 0.28911553, 0.38102921],
+    [0.81662276, 0.28668665, 0.38236999],
+    [0.81376555, 0.28432371, 0.383727],
+    [0.81085964, 0.28202508, 0.38509649],
+    [0.8079055, 0.27979128, 0.38647583],
+    [0.80490309, 0.27762348, 0.3878626],
+    [0.80185613, 0.2755178, 0.38925253],
+    [0.79876118, 0.27347974, 0.39064559],
+    [0.79562644, 0.27149928, 0.39203532],
+    [0.79244362, 0.2695883, 0.39342447],
+    [0.78922456, 0.26773176, 0.3948046],
+    [0.78596161, 0.26594053, 0.39617873],
+    [0.7826624, 0.26420493, 0.39754146],
+    [0.77932717, 0.26252522, 0.39889102],
+    [0.77595363, 0.2609049, 0.4002279],
+    [0.77254999, 0.25933319, 0.40154704],
+    [0.76911107, 0.25781758, 0.40284959],
+    [0.76564158, 0.25635173, 0.40413341],
+    [0.76214598, 0.25492998, 0.40539471],
+    [0.75861834, 0.25356035, 0.40663694],
+    [0.75506533, 0.25223402, 0.40785559],
+    [0.75148963, 0.2509473, 0.40904966],
+    [0.74788835, 0.24970413, 0.41022028],
+    [0.74426345, 0.24850191, 0.41136599],
+    [0.74061927, 0.24733457, 0.41248516],
+    [0.73695678, 0.24620072, 0.41357737],
+    [0.73327278, 0.24510469, 0.41464364],
+    [0.72957096, 0.24404127, 0.4156828],
+    [0.72585394, 0.24300672, 0.41669383],
+    [0.7221226, 0.24199971, 0.41767651],
+    [0.71837612, 0.24102046, 0.41863486],
+    [0.71463236, 0.24004289, 0.41956983],
+    [0.7108932, 0.23906316, 0.42048681],
+    [0.70715842, 0.23808142, 0.42138647],
+    [0.70342811, 0.2370976, 0.42226844],
+    [0.69970218, 0.23611179, 0.42313282],
+    [0.69598055, 0.2351247, 0.42397678],
+    [0.69226314, 0.23413578, 0.42480327],
+    [0.68854988, 0.23314511, 0.42561234],
+    [0.68484064, 0.23215279, 0.42640419],
+    [0.68113541, 0.23115942, 0.42717615],
+    [0.67743412, 0.23016472, 0.42792989],
+    [0.67373662, 0.22916861, 0.42866642],
+    [0.67004287, 0.22817117, 0.42938576],
+    [0.66635279, 0.22717328, 0.43008427],
+    [0.66266621, 0.22617435, 0.43076552],
+    [0.65898313, 0.22517434, 0.43142956],
+    [0.65530349, 0.22417381, 0.43207427],
+    [0.65162696, 0.22317307, 0.4327001],
+    [0.64795375, 0.22217149, 0.43330852],
+    [0.64428351, 0.22116972, 0.43389854],
+    [0.64061624, 0.22016818, 0.43446845],
+    [0.63695183, 0.21916625, 0.43502123],
+    [0.63329016, 0.21816454, 0.43555493],
+    [0.62963102, 0.2171635, 0.43606881],
+    [0.62597451, 0.21616235, 0.43656529],
+    [0.62232019, 0.21516239, 0.43704153],
+    [0.61866821, 0.21416307, 0.43749868],
+    [0.61501835, 0.21316435, 0.43793808],
+    [0.61137029, 0.21216761, 0.4383556],
+    [0.60772426, 0.2111715, 0.43875552],
+    [0.60407977, 0.21017746, 0.43913439],
+    [0.60043678, 0.20918503, 0.43949412],
+    [0.59679524, 0.20819447, 0.43983393],
+    [0.59315487, 0.20720639, 0.44015254],
+    [0.58951566, 0.20622027, 0.44045213],
+    [0.58587715, 0.20523751, 0.44072926],
+    [0.5822395, 0.20425693, 0.44098758],
+    [0.57860222, 0.20328034, 0.44122241],
+    [0.57496549, 0.20230637, 0.44143805],
+    [0.57132875, 0.20133689, 0.4416298],
+    [0.56769215, 0.20037071, 0.44180142],
+    [0.5640552, 0.19940936, 0.44194923],
+    [0.56041794, 0.19845221, 0.44207535],
+    [0.55678004, 0.1975, 0.44217824],
+    [0.55314129, 0.19655316, 0.44225723],
+    [0.54950166, 0.19561118, 0.44231412],
+    [0.54585987, 0.19467771, 0.44234111],
+    [0.54221157, 0.19375869, 0.44233698],
+    [0.5385549, 0.19285696, 0.44229959],
+    [0.5348913, 0.19197036, 0.44222958],
+    [0.53122177, 0.1910974, 0.44212735],
+    [0.52754464, 0.19024042, 0.44199159],
+    [0.52386353, 0.18939409, 0.44182449],
+    [0.52017476, 0.18856368, 0.44162345],
+    [0.51648277, 0.18774266, 0.44139128],
+    [0.51278481, 0.18693492, 0.44112605],
+    [0.50908361, 0.18613639, 0.4408295],
+    [0.50537784, 0.18534893, 0.44050064],
+    [0.50166912, 0.18457008, 0.44014054],
+    [0.49795686, 0.18380056, 0.43974881],
+    [0.49424218, 0.18303865, 0.43932623],
+    [0.49052472, 0.18228477, 0.43887255],
+    [0.48680565, 0.1815371, 0.43838867],
+    [0.48308419, 0.18079663, 0.43787408],
+    [0.47936222, 0.18006056, 0.43733022],
+    [0.47563799, 0.17933127, 0.43675585],
+    [0.47191466, 0.17860416, 0.43615337],
+    [0.46818879, 0.17788392, 0.43552047],
+    [0.46446454, 0.17716458, 0.43486036],
+    [0.46073893, 0.17645017, 0.43417097],
+    [0.45701462, 0.17573691, 0.43345429],
+    [0.45329097, 0.17502549, 0.43271025],
+    [0.44956744, 0.17431649, 0.4319386],
+    [0.44584668, 0.17360625, 0.43114133],
+    [0.44212538, 0.17289906, 0.43031642],
+    [0.43840678, 0.17219041, 0.42946642],
+    [0.43469046, 0.17148074, 0.42859124],
+    [0.4309749, 0.17077192, 0.42769008],
+    [0.42726297, 0.17006003, 0.42676519],
+    [0.42355299, 0.16934709, 0.42581586],
+    [0.41984535, 0.16863258, 0.42484219],
+    [0.41614149, 0.16791429, 0.42384614],
+    [0.41244029, 0.16719372, 0.42282661],
+    [0.40874177, 0.16647061, 0.42178429],
+    [0.40504765, 0.16574261, 0.42072062],
+    [0.401357, 0.16501079, 0.41963528],
+    [0.397669, 0.16427607, 0.418528],
+    [0.39398585, 0.16353554, 0.41740053],
+    [0.39030735, 0.16278924, 0.41625344],
+    [0.3866314, 0.16203977, 0.41508517],
+    [0.38295904, 0.16128519, 0.41389849],
+    [0.37928736, 0.16052483, 0.41270599],
+    [0.37562649, 0.15974704, 0.41151182],
+    [0.37197803, 0.15895049, 0.41031532],
+    [0.36833779, 0.15813871, 0.40911916],
+    [0.36470944, 0.15730861, 0.40792149],
+    [0.36109117, 0.15646169, 0.40672362],
+    [0.35748213, 0.15559861, 0.40552633],
+    [0.353885, 0.15471714, 0.40432831],
+    [0.35029682, 0.15381967, 0.4031316],
+    [0.34671861, 0.1529053, 0.40193587],
+    [0.34315191, 0.15197275, 0.40074049],
+    [0.33959331, 0.15102466, 0.3995478],
+    [0.33604378, 0.15006017, 0.39835754],
+    [0.33250529, 0.14907766, 0.39716879],
+    [0.32897621, 0.14807831, 0.39598285],
+    [0.3254559, 0.14706248, 0.39480044],
+    [0.32194567, 0.14602909, 0.39362106],
+    [0.31844477, 0.14497857, 0.39244549],
+    [0.31494974, 0.14391333, 0.39127626],
+    [0.31146605, 0.14282918, 0.39011024],
+    [0.30798857, 0.1417297, 0.38895105],
+    [0.30451661, 0.14061515, 0.38779953],
+    [0.30105136, 0.13948445, 0.38665531],
+    [0.2975886, 0.1383403, 0.38552159],
+    [0.29408557, 0.13721193, 0.38442775]
+]
+
+
+_crest_lut = [
+    [0.6468274, 0.80289262, 0.56592265],
+    [0.64233318, 0.80081141, 0.56639461],
+    [0.63791969, 0.7987162, 0.56674976],
+    [0.6335316, 0.79661833, 0.56706128],
+    [0.62915226, 0.7945212, 0.56735066],
+    [0.62477862, 0.79242543, 0.56762143],
+    [0.62042003, 0.79032918, 0.56786129],
+    [0.61606327, 0.78823508, 0.56808666],
+    [0.61171322, 0.78614216, 0.56829092],
+    [0.60736933, 0.78405055, 0.56847436],
+    [0.60302658, 0.78196121, 0.56864272],
+    [0.59868708, 0.77987374, 0.56879289],
+    [0.59435366, 0.77778758, 0.56892099],
+    [0.59001953, 0.77570403, 0.56903477],
+    [0.58568753, 0.77362254, 0.56913028],
+    [0.58135593, 0.77154342, 0.56920908],
+    [0.57702623, 0.76946638, 0.56926895],
+    [0.57269165, 0.76739266, 0.5693172],
+    [0.56835934, 0.76532092, 0.56934507],
+    [0.56402533, 0.76325185, 0.56935664],
+    [0.55968429, 0.76118643, 0.56935732],
+    [0.55534159, 0.75912361, 0.56934052],
+    [0.55099572, 0.75706366, 0.56930743],
+    [0.54664626, 0.75500662, 0.56925799],
+    [0.54228969, 0.75295306, 0.56919546],
+    [0.53792417, 0.75090328, 0.56912118],
+    [0.53355172, 0.74885687, 0.5690324],
+    [0.52917169, 0.74681387, 0.56892926],
+    [0.52478243, 0.74477453, 0.56881287],
+    [0.52038338, 0.74273888, 0.56868323],
+    [0.5159739, 0.74070697, 0.56854039],
+    [0.51155269, 0.73867895, 0.56838507],
+    [0.50711872, 0.73665492, 0.56821764],
+    [0.50267118, 0.73463494, 0.56803826],
+    [0.49822926, 0.73261388, 0.56785146],
+    [0.49381422, 0.73058524, 0.56767484],
+    [0.48942421, 0.72854938, 0.56751036],
+    [0.48505993, 0.72650623, 0.56735752],
+    [0.48072207, 0.72445575, 0.56721583],
+    [0.4764113, 0.72239788, 0.56708475],
+    [0.47212827, 0.72033258, 0.56696376],
+    [0.46787361, 0.71825983, 0.56685231],
+    [0.46364792, 0.71617961, 0.56674986],
+    [0.45945271, 0.71409167, 0.56665625],
+    [0.45528878, 0.71199595, 0.56657103],
+    [0.45115557, 0.70989276, 0.5664931],
+    [0.44705356, 0.70778212, 0.56642189],
+    [0.44298321, 0.70566406, 0.56635683],
+    [0.43894492, 0.70353863, 0.56629734],
+    [0.43493911, 0.70140588, 0.56624286],
+    [0.43096612, 0.69926587, 0.5661928],
+    [0.42702625, 0.69711868, 0.56614659],
+    [0.42311977, 0.69496438, 0.56610368],
+    [0.41924689, 0.69280308, 0.56606355],
+    [0.41540778, 0.69063486, 0.56602564],
+    [0.41160259, 0.68845984, 0.56598944],
+    [0.40783143, 0.68627814, 0.56595436],
+    [0.40409434, 0.68408988, 0.56591994],
+    [0.40039134, 0.68189518, 0.56588564],
+    [0.39672238, 0.6796942, 0.56585103],
+    [0.39308781, 0.67748696, 0.56581581],
+    [0.38949137, 0.67527276, 0.56578084],
+    [0.38592889, 0.67305266, 0.56574422],
+    [0.38240013, 0.67082685, 0.56570561],
+    [0.37890483, 0.66859548, 0.56566462],
+    [0.37544276, 0.66635871, 0.56562081],
+    [0.37201365, 0.66411673, 0.56557372],
+    [0.36861709, 0.6618697, 0.5655231],
+    [0.36525264, 0.65961782, 0.56546873],
+    [0.36191986, 0.65736125, 0.56541032],
+    [0.35861935, 0.65509998, 0.56534768],
+    [0.35535621, 0.65283302, 0.56528211],
+    [0.35212361, 0.65056188, 0.56521171],
+    [0.34892097, 0.64828676, 0.56513633],
+    [0.34574785, 0.64600783, 0.56505539],
+    [0.34260357, 0.64372528, 0.5649689],
+    [0.33948744, 0.64143931, 0.56487679],
+    [0.33639887, 0.6391501, 0.56477869],
+    [0.33334501, 0.63685626, 0.56467661],
+    [0.33031952, 0.63455911, 0.564569],
+    [0.3273199, 0.63225924, 0.56445488],
+    [0.32434526, 0.62995682, 0.56433457],
+    [0.32139487, 0.62765201, 0.56420795],
+    [0.31846807, 0.62534504, 0.56407446],
+    [0.3155731, 0.62303426, 0.56393695],
+    [0.31270304, 0.62072111, 0.56379321],
+    [0.30985436, 0.61840624, 0.56364307],
+    [0.30702635, 0.61608984, 0.56348606],
+    [0.30421803, 0.61377205, 0.56332267],
+    [0.30143611, 0.61145167, 0.56315419],
+    [0.29867863, 0.60912907, 0.56298054],
+    [0.29593872, 0.60680554, 0.56280022],
+    [0.29321538, 0.60448121, 0.56261376],
+    [0.2905079, 0.60215628, 0.56242036],
+    [0.28782827, 0.5998285, 0.56222366],
+    [0.28516521, 0.59749996, 0.56202093],
+    [0.28251558, 0.59517119, 0.56181204],
+    [0.27987847, 0.59284232, 0.56159709],
+    [0.27726216, 0.59051189, 0.56137785],
+    [0.27466434, 0.58818027, 0.56115433],
+    [0.2720767, 0.58584893, 0.56092486],
+    [0.26949829, 0.58351797, 0.56068983],
+    [0.26693801, 0.58118582, 0.56045121],
+    [0.26439366, 0.57885288, 0.56020858],
+    [0.26185616, 0.57652063, 0.55996077],
+    [0.25932459, 0.57418919, 0.55970795],
+    [0.25681303, 0.57185614, 0.55945297],
+    [0.25431024, 0.56952337, 0.55919385],
+    [0.25180492, 0.56719255, 0.5589305],
+    [0.24929311, 0.56486397, 0.5586654],
+    [0.24678356, 0.56253666, 0.55839491],
+    [0.24426587, 0.56021153, 0.55812473],
+    [0.24174022, 0.55788852, 0.55785448],
+    [0.23921167, 0.55556705, 0.55758211],
+    [0.23668315, 0.55324675, 0.55730676],
+    [0.23414742, 0.55092825, 0.55703167],
+    [0.23160473, 0.54861143, 0.5567573],
+    [0.22905996, 0.54629572, 0.55648168],
+    [0.22651648, 0.54398082, 0.5562029],
+    [0.22396709, 0.54166721, 0.55592542],
+    [0.22141221, 0.53935481, 0.55564885],
+    [0.21885269, 0.53704347, 0.55537294],
+    [0.21629986, 0.53473208, 0.55509319],
+    [0.21374297, 0.53242154, 0.5548144],
+    [0.21118255, 0.53011166, 0.55453708],
+    [0.2086192, 0.52780237, 0.55426067],
+    [0.20605624, 0.52549322, 0.55398479],
+    [0.20350004, 0.5231837, 0.55370601],
+    [0.20094292, 0.52087429, 0.55342884],
+    [0.19838567, 0.51856489, 0.55315283],
+    [0.19582911, 0.51625531, 0.55287818],
+    [0.19327413, 0.51394542, 0.55260469],
+    [0.19072933, 0.51163448, 0.5523289],
+    [0.18819045, 0.50932268, 0.55205372],
+    [0.18565609, 0.50701014, 0.55177937],
+    [0.18312739, 0.50469666, 0.55150597],
+    [0.18060561, 0.50238204, 0.55123374],
+    [0.178092, 0.50006616, 0.55096224],
+    [0.17558808, 0.49774882, 0.55069118],
+    [0.17310341, 0.49542924, 0.5504176],
+    [0.17063111, 0.49310789, 0.55014445],
+    [0.1681728, 0.49078458, 0.54987159],
+    [0.1657302, 0.48845913, 0.54959882],
+    [0.16330517, 0.48613135, 0.54932605],
+    [0.16089963, 0.48380104, 0.54905306],
+    [0.15851561, 0.48146803, 0.54877953],
+    [0.15615526, 0.47913212, 0.54850526],
+    [0.15382083, 0.47679313, 0.54822991],
+    [0.15151471, 0.47445087, 0.54795318],
+    [0.14924112, 0.47210502, 0.54767411],
+    [0.1470032, 0.46975537, 0.54739226],
+    [0.14480101, 0.46740187, 0.54710832],
+    [0.14263736, 0.46504434, 0.54682188],
+    [0.14051521, 0.46268258, 0.54653253],
+    [0.13843761, 0.46031639, 0.54623985],
+    [0.13640774, 0.45794558, 0.5459434],
+    [0.13442887, 0.45556994, 0.54564272],
+    [0.1325044, 0.45318928, 0.54533736],
+    [0.13063777, 0.4508034, 0.54502674],
+    [0.12883252, 0.44841211, 0.5447104],
+    [0.12709242, 0.44601517, 0.54438795],
+    [0.1254209, 0.44361244, 0.54405855],
+    [0.12382162, 0.44120373, 0.54372156],
+    [0.12229818, 0.43878887, 0.54337634],
+    [0.12085453, 0.4363676, 0.54302253],
+    [0.11949938, 0.43393955, 0.54265715],
+    [0.11823166, 0.43150478, 0.54228104],
+    [0.11705496, 0.42906306, 0.54189388],
+    [0.115972, 0.42661431, 0.54149449],
+    [0.11498598, 0.42415835, 0.54108222],
+    [0.11409965, 0.42169502, 0.54065622],
+    [0.11331533, 0.41922424, 0.5402155],
+    [0.11263542, 0.41674582, 0.53975931],
+    [0.1120615, 0.4142597, 0.53928656],
+    [0.11159738, 0.41176567, 0.53879549],
+    [0.11125248, 0.40926325, 0.53828203],
+    [0.11101698, 0.40675289, 0.53774864],
+    [0.11089152, 0.40423445, 0.53719455],
+    [0.11085121, 0.4017095, 0.53662425],
+    [0.11087217, 0.39917938, 0.53604354],
+    [0.11095515, 0.39664394, 0.53545166],
+    [0.11110676, 0.39410282, 0.53484509],
+    [0.11131735, 0.39155635, 0.53422678],
+    [0.11158595, 0.38900446, 0.53359634],
+    [0.11191139, 0.38644711, 0.5329534],
+    [0.11229224, 0.38388426, 0.53229748],
+    [0.11273683, 0.38131546, 0.53162393],
+    [0.11323438, 0.37874109, 0.53093619],
+    [0.11378271, 0.37616112, 0.53023413],
+    [0.11437992, 0.37357557, 0.52951727],
+    [0.11502681, 0.37098429, 0.52878396],
+    [0.11572661, 0.36838709, 0.52803124],
+    [0.11646936, 0.36578429, 0.52726234],
+    [0.11725299, 0.3631759, 0.52647685],
+    [0.1180755, 0.36056193, 0.52567436],
+    [0.1189438, 0.35794203, 0.5248497],
+    [0.11984752, 0.35531657, 0.52400649],
+    [0.1207833, 0.35268564, 0.52314492],
+    [0.12174895, 0.35004927, 0.52226461],
+    [0.12274959, 0.34740723, 0.52136104],
+    [0.12377809, 0.34475975, 0.52043639],
+    [0.12482961, 0.34210702, 0.51949179],
+    [0.125902, 0.33944908, 0.51852688],
+    [0.12699998, 0.33678574, 0.51753708],
+    [0.12811691, 0.33411727, 0.51652464],
+    [0.12924811, 0.33144384, 0.51549084],
+    [0.13039157, 0.32876552, 0.51443538],
+    [0.13155228, 0.32608217, 0.51335321],
+    [0.13272282, 0.32339407, 0.51224759],
+    [0.13389954, 0.32070138, 0.51111946],
+    [0.13508064, 0.31800419, 0.50996862],
+    [0.13627149, 0.31530238, 0.50878942],
+    [0.13746376, 0.31259627, 0.50758645],
+    [0.13865499, 0.30988598, 0.50636017],
+    [0.13984364, 0.30717161, 0.50511042],
+    [0.14103515, 0.30445309, 0.50383119],
+    [0.14222093, 0.30173071, 0.50252813],
+    [0.14339946, 0.2990046, 0.50120127],
+    [0.14456941, 0.29627483, 0.49985054],
+    [0.14573579, 0.29354139, 0.49847009],
+    [0.14689091, 0.29080452, 0.49706566],
+    [0.1480336, 0.28806432, 0.49563732],
+    [0.1491628, 0.28532086, 0.49418508],
+    [0.15028228, 0.28257418, 0.49270402],
+    [0.15138673, 0.27982444, 0.49119848],
+    [0.15247457, 0.27707172, 0.48966925],
+    [0.15354487, 0.2743161, 0.48811641],
+    [0.15459955, 0.27155765, 0.4865371],
+    [0.15563716, 0.26879642, 0.4849321],
+    [0.1566572, 0.26603191, 0.48330429],
+    [0.15765823, 0.26326032, 0.48167456],
+    [0.15862147, 0.26048295, 0.48005785],
+    [0.15954301, 0.25770084, 0.47845341],
+    [0.16043267, 0.25491144, 0.4768626],
+    [0.16129262, 0.25211406, 0.4752857],
+    [0.1621119, 0.24931169, 0.47372076],
+    [0.16290577, 0.24649998, 0.47217025],
+    [0.16366819, 0.24368054, 0.47063302],
+    [0.1644021, 0.24085237, 0.46910949],
+    [0.16510882, 0.2380149, 0.46759982],
+    [0.16579015, 0.23516739, 0.46610429],
+    [0.1664433, 0.2323105, 0.46462219],
+    [0.16707586, 0.22944155, 0.46315508],
+    [0.16768475, 0.22656122, 0.46170223],
+    [0.16826815, 0.22366984, 0.46026308],
+    [0.16883174, 0.22076514, 0.45883891],
+    [0.16937589, 0.21784655, 0.45742976],
+    [0.16990129, 0.21491339, 0.45603578],
+    [0.1704074, 0.21196535, 0.45465677],
+    [0.17089473, 0.20900176, 0.4532928],
+    [0.17136819, 0.20602012, 0.45194524],
+    [0.17182683, 0.20302012, 0.45061386],
+    [0.17227059, 0.20000106, 0.44929865],
+    [0.17270583, 0.19695949, 0.44800165],
+    [0.17313804, 0.19389201, 0.44672488],
+    [0.17363177, 0.19076859, 0.44549087]
+]
+
+
+_lut_dict = dict(
+    rocket=_rocket_lut,
+    mako=_mako_lut,
+    icefire=_icefire_lut,
+    vlag=_vlag_lut,
+    flare=_flare_lut,
+    crest=_crest_lut,
+
+)
+
 for _name, _lut in _lut_dict.items():
+
     _cmap = colors.ListedColormap(_lut, _name)
     locals()[_name] = _cmap
-    _cmap_r = colors.ListedColormap(_lut[::-1], _name + '_r')
-    locals()[_name + '_r'] = _cmap_r
+
+    _cmap_r = colors.ListedColormap(_lut[::-1], _name + "_r")
+    locals()[_name + "_r"] = _cmap_r
+
     register_colormap(_name, _cmap)
-    register_colormap(_name + '_r', _cmap_r)
+    register_colormap(_name + "_r", _cmap_r)
+
 del colors, register_colormap
diff --git a/seaborn/colors/crayons.py b/seaborn/colors/crayons.py
index b6a4ae39..548af1f1 100644
--- a/seaborn/colors/crayons.py
+++ b/seaborn/colors/crayons.py
@@ -1,46 +1,120 @@
-crayons = {'Almond': '#EFDECD', 'Antique Brass': '#CD9575', 'Apricot':
-    '#FDD9B5', 'Aquamarine': '#78DBE2', 'Asparagus': '#87A96B',
-    'Atomic Tangerine': '#FFA474', 'Banana Mania': '#FAE7B5', 'Beaver':
-    '#9F8170', 'Bittersweet': '#FD7C6E', 'Black': '#000000', 'Blue':
-    '#1F75FE', 'Blue Bell': '#A2A2D0', 'Blue Green': '#0D98BA',
-    'Blue Violet': '#7366BD', 'Blush': '#DE5D83', 'Brick Red': '#CB4154',
-    'Brown': '#B4674D', 'Burnt Orange': '#FF7F49', 'Burnt Sienna':
-    '#EA7E5D', 'Cadet Blue': '#B0B7C6', 'Canary': '#FFFF99',
-    'Caribbean Green': '#00CC99', 'Carnation Pink': '#FFAACC', 'Cerise':
-    '#DD4492', 'Cerulean': '#1DACD6', 'Chestnut': '#BC5D58', 'Copper':
-    '#DD9475', 'Cornflower': '#9ACEEB', 'Cotton Candy': '#FFBCD9',
-    'Dandelion': '#FDDB6D', 'Denim': '#2B6CC4', 'Desert Sand': '#EFCDB8',
-    'Eggplant': '#6E5160', 'Electric Lime': '#CEFF1D', 'Fern': '#71BC78',
-    'Forest Green': '#6DAE81', 'Fuchsia': '#C364C5', 'Fuzzy Wuzzy':
-    '#CC6666', 'Gold': '#E7C697', 'Goldenrod': '#FCD975',
-    'Granny Smith Apple': '#A8E4A0', 'Gray': '#95918C', 'Green': '#1CAC78',
-    'Green Yellow': '#F0E891', 'Hot Magenta': '#FF1DCE', 'Inchworm':
-    '#B2EC5D', 'Indigo': '#5D76CB', 'Jazzberry Jam': '#CA3767',
-    'Jungle Green': '#3BB08F', 'Laser Lemon': '#FEFE22', 'Lavender':
-    '#FCB4D5', 'Macaroni and Cheese': '#FFBD88', 'Magenta': '#F664AF',
-    'Mahogany': '#CD4A4C', 'Manatee': '#979AAA', 'Mango Tango': '#FF8243',
-    'Maroon': '#C8385A', 'Mauvelous': '#EF98AA', 'Melon': '#FDBCB4',
-    'Midnight Blue': '#1A4876', 'Mountain Meadow': '#30BA8F', 'Navy Blue':
-    '#1974D2', 'Neon Carrot': '#FFA343', 'Olive Green': '#BAB86C', 'Orange':
-    '#FF7538', 'Orchid': '#E6A8D7', 'Outer Space': '#414A4C',
-    'Outrageous Orange': '#FF6E4A', 'Pacific Blue': '#1CA9C9', 'Peach':
-    '#FFCFAB', 'Periwinkle': '#C5D0E6', 'Piggy Pink': '#FDDDE6',
-    'Pine Green': '#158078', 'Pink Flamingo': '#FC74FD', 'Pink Sherbert':
-    '#F78FA7', 'Plum': '#8E4585', 'Purple Heart': '#7442C8',
-    "Purple Mountains' Majesty": '#9D81BA', 'Purple Pizzazz': '#FE4EDA',
-    'Radical Red': '#FF496C', 'Raw Sienna': '#D68A59', 'Razzle Dazzle Rose':
-    '#FF48D0', 'Razzmatazz': '#E3256B', 'Red': '#EE204D', 'Red Orange':
-    '#FF5349', 'Red Violet': '#C0448F', "Robin's Egg Blue": '#1FCECB',
-    'Royal Purple': '#7851A9', 'Salmon': '#FF9BAA', 'Scarlet': '#FC2847',
-    "Screamin' Green": '#76FF7A', 'Sea Green': '#93DFB8', 'Sepia':
-    '#A5694F', 'Shadow': '#8A795D', 'Shamrock': '#45CEA2', 'Shocking Pink':
-    '#FB7EFD', 'Silver': '#CDC5C2', 'Sky Blue': '#80DAEB', 'Spring Green':
-    '#ECEABE', 'Sunglow': '#FFCF48', 'Sunset Orange': '#FD5E53', 'Tan':
-    '#FAA76C', 'Tickle Me Pink': '#FC89AC', 'Timberwolf': '#DBD7D2',
-    'Tropical Rain Forest': '#17806D', 'Tumbleweed': '#DEAA88',
-    'Turquoise Blue': '#77DDE7', 'Unmellow Yellow': '#FFFF66',
-    'Violet (Purple)': '#926EAE', 'Violet Red': '#F75394',
-    'Vivid Tangerine': '#FFA089', 'Vivid Violet': '#8F509D', 'White':
-    '#FFFFFF', 'Wild Blue Yonder': '#A2ADD0', 'Wild Strawberry': '#FF43A4',
-    'Wild Watermelon': '#FC6C85', 'Wisteria': '#CDA4DE', 'Yellow':
-    '#FCE883', 'Yellow Green': '#C5E384', 'Yellow Orange': '#FFAE42'}
+crayons = {'Almond': '#EFDECD',
+           'Antique Brass': '#CD9575',
+           'Apricot': '#FDD9B5',
+           'Aquamarine': '#78DBE2',
+           'Asparagus': '#87A96B',
+           'Atomic Tangerine': '#FFA474',
+           'Banana Mania': '#FAE7B5',
+           'Beaver': '#9F8170',
+           'Bittersweet': '#FD7C6E',
+           'Black': '#000000',
+           'Blue': '#1F75FE',
+           'Blue Bell': '#A2A2D0',
+           'Blue Green': '#0D98BA',
+           'Blue Violet': '#7366BD',
+           'Blush': '#DE5D83',
+           'Brick Red': '#CB4154',
+           'Brown': '#B4674D',
+           'Burnt Orange': '#FF7F49',
+           'Burnt Sienna': '#EA7E5D',
+           'Cadet Blue': '#B0B7C6',
+           'Canary': '#FFFF99',
+           'Caribbean Green': '#00CC99',
+           'Carnation Pink': '#FFAACC',
+           'Cerise': '#DD4492',
+           'Cerulean': '#1DACD6',
+           'Chestnut': '#BC5D58',
+           'Copper': '#DD9475',
+           'Cornflower': '#9ACEEB',
+           'Cotton Candy': '#FFBCD9',
+           'Dandelion': '#FDDB6D',
+           'Denim': '#2B6CC4',
+           'Desert Sand': '#EFCDB8',
+           'Eggplant': '#6E5160',
+           'Electric Lime': '#CEFF1D',
+           'Fern': '#71BC78',
+           'Forest Green': '#6DAE81',
+           'Fuchsia': '#C364C5',
+           'Fuzzy Wuzzy': '#CC6666',
+           'Gold': '#E7C697',
+           'Goldenrod': '#FCD975',
+           'Granny Smith Apple': '#A8E4A0',
+           'Gray': '#95918C',
+           'Green': '#1CAC78',
+           'Green Yellow': '#F0E891',
+           'Hot Magenta': '#FF1DCE',
+           'Inchworm': '#B2EC5D',
+           'Indigo': '#5D76CB',
+           'Jazzberry Jam': '#CA3767',
+           'Jungle Green': '#3BB08F',
+           'Laser Lemon': '#FEFE22',
+           'Lavender': '#FCB4D5',
+           'Macaroni and Cheese': '#FFBD88',
+           'Magenta': '#F664AF',
+           'Mahogany': '#CD4A4C',
+           'Manatee': '#979AAA',
+           'Mango Tango': '#FF8243',
+           'Maroon': '#C8385A',
+           'Mauvelous': '#EF98AA',
+           'Melon': '#FDBCB4',
+           'Midnight Blue': '#1A4876',
+           'Mountain Meadow': '#30BA8F',
+           'Navy Blue': '#1974D2',
+           'Neon Carrot': '#FFA343',
+           'Olive Green': '#BAB86C',
+           'Orange': '#FF7538',
+           'Orchid': '#E6A8D7',
+           'Outer Space': '#414A4C',
+           'Outrageous Orange': '#FF6E4A',
+           'Pacific Blue': '#1CA9C9',
+           'Peach': '#FFCFAB',
+           'Periwinkle': '#C5D0E6',
+           'Piggy Pink': '#FDDDE6',
+           'Pine Green': '#158078',
+           'Pink Flamingo': '#FC74FD',
+           'Pink Sherbert': '#F78FA7',
+           'Plum': '#8E4585',
+           'Purple Heart': '#7442C8',
+           "Purple Mountains' Majesty": '#9D81BA',
+           'Purple Pizzazz': '#FE4EDA',
+           'Radical Red': '#FF496C',
+           'Raw Sienna': '#D68A59',
+           'Razzle Dazzle Rose': '#FF48D0',
+           'Razzmatazz': '#E3256B',
+           'Red': '#EE204D',
+           'Red Orange': '#FF5349',
+           'Red Violet': '#C0448F',
+           "Robin's Egg Blue": '#1FCECB',
+           'Royal Purple': '#7851A9',
+           'Salmon': '#FF9BAA',
+           'Scarlet': '#FC2847',
+           "Screamin' Green": '#76FF7A',
+           'Sea Green': '#93DFB8',
+           'Sepia': '#A5694F',
+           'Shadow': '#8A795D',
+           'Shamrock': '#45CEA2',
+           'Shocking Pink': '#FB7EFD',
+           'Silver': '#CDC5C2',
+           'Sky Blue': '#80DAEB',
+           'Spring Green': '#ECEABE',
+           'Sunglow': '#FFCF48',
+           'Sunset Orange': '#FD5E53',
+           'Tan': '#FAA76C',
+           'Tickle Me Pink': '#FC89AC',
+           'Timberwolf': '#DBD7D2',
+           'Tropical Rain Forest': '#17806D',
+           'Tumbleweed': '#DEAA88',
+           'Turquoise Blue': '#77DDE7',
+           'Unmellow Yellow': '#FFFF66',
+           'Violet (Purple)': '#926EAE',
+           'Violet Red': '#F75394',
+           'Vivid Tangerine': '#FFA089',
+           'Vivid Violet': '#8F509D',
+           'White': '#FFFFFF',
+           'Wild Blue Yonder': '#A2ADD0',
+           'Wild Strawberry': '#FF43A4',
+           'Wild Watermelon': '#FC6C85',
+           'Wisteria': '#CDA4DE',
+           'Yellow': '#FCE883',
+           'Yellow Green': '#C5E384',
+           'Yellow Orange': '#FFAE42'}
diff --git a/seaborn/colors/xkcd_rgb.py b/seaborn/colors/xkcd_rgb.py
index 66ddf31a..0f775cf6 100644
--- a/seaborn/colors/xkcd_rgb.py
+++ b/seaborn/colors/xkcd_rgb.py
@@ -1,362 +1,949 @@
-xkcd_rgb = {'acid green': '#8ffe09', 'adobe': '#bd6c48', 'algae': '#54ac68',
-    'algae green': '#21c36f', 'almost black': '#070d0d', 'amber': '#feb308',
-    'amethyst': '#9b5fc0', 'apple': '#6ecb3c', 'apple green': '#76cd26',
-    'apricot': '#ffb16d', 'aqua': '#13eac9', 'aqua blue': '#02d8e9',
-    'aqua green': '#12e193', 'aqua marine': '#2ee8bb', 'aquamarine':
-    '#04d8b2', 'army green': '#4b5d16', 'asparagus': '#77ab56', 'aubergine':
-    '#3d0734', 'auburn': '#9a3001', 'avocado': '#90b134', 'avocado green':
-    '#87a922', 'azul': '#1d5dec', 'azure': '#069af3', 'baby blue':
-    '#a2cffe', 'baby green': '#8cff9e', 'baby pink': '#ffb7ce', 'baby poo':
-    '#ab9004', 'baby poop': '#937c00', 'baby poop green': '#8f9805',
-    'baby puke green': '#b6c406', 'baby purple': '#ca9bf7',
-    'baby shit brown': '#ad900d', 'baby shit green': '#889717', 'banana':
-    '#ffff7e', 'banana yellow': '#fafe4b', 'barbie pink': '#fe46a5',
-    'barf green': '#94ac02', 'barney': '#ac1db8', 'barney purple':
-    '#a00498', 'battleship grey': '#6b7c85', 'beige': '#e6daa6', 'berry':
-    '#990f4b', 'bile': '#b5c306', 'black': '#000000', 'bland': '#afa88b',
-    'blood': '#770001', 'blood orange': '#fe4b03', 'blood red': '#980002',
-    'blue': '#0343df', 'blue blue': '#2242c7', 'blue green': '#137e6d',
-    'blue grey': '#607c8e', 'blue purple': '#5729ce', 'blue violet':
-    '#5d06e9', 'blue with a hint of purple': '#533cc6', 'blue/green':
-    '#0f9b8e', 'blue/grey': '#758da3', 'blue/purple': '#5a06ef',
-    'blueberry': '#464196', 'bluegreen': '#017a79', 'bluegrey': '#85a3b2',
-    'bluey green': '#2bb179', 'bluey grey': '#89a0b0', 'bluey purple':
-    '#6241c7', 'bluish': '#2976bb', 'bluish green': '#10a674',
-    'bluish grey': '#748b97', 'bluish purple': '#703be7', 'blurple':
-    '#5539cc', 'blush': '#f29e8e', 'blush pink': '#fe828c', 'booger':
-    '#9bb53c', 'booger green': '#96b403', 'bordeaux': '#7b002c',
-    'boring green': '#63b365', 'bottle green': '#044a05', 'brick':
-    '#a03623', 'brick orange': '#c14a09', 'brick red': '#8f1402',
-    'bright aqua': '#0bf9ea', 'bright blue': '#0165fc', 'bright cyan':
-    '#41fdfe', 'bright green': '#01ff07', 'bright lavender': '#c760ff',
-    'bright light blue': '#26f7fd', 'bright light green': '#2dfe54',
-    'bright lilac': '#c95efb', 'bright lime': '#87fd05',
-    'bright lime green': '#65fe08', 'bright magenta': '#ff08e8',
-    'bright olive': '#9cbb04', 'bright orange': '#ff5b00', 'bright pink':
-    '#fe01b1', 'bright purple': '#be03fd', 'bright red': '#ff000d',
-    'bright sea green': '#05ffa6', 'bright sky blue': '#02ccfe',
-    'bright teal': '#01f9c6', 'bright turquoise': '#0ffef9',
-    'bright violet': '#ad0afd', 'bright yellow': '#fffd01',
-    'bright yellow green': '#9dff00', 'british racing green': '#05480d',
-    'bronze': '#a87900', 'brown': '#653700', 'brown green': '#706c11',
-    'brown grey': '#8d8468', 'brown orange': '#b96902', 'brown red':
-    '#922b05', 'brown yellow': '#b29705', 'brownish': '#9c6d57',
-    'brownish green': '#6a6e09', 'brownish grey': '#86775f',
-    'brownish orange': '#cb7723', 'brownish pink': '#c27e79',
-    'brownish purple': '#76424e', 'brownish red': '#9e3623',
-    'brownish yellow': '#c9b003', 'browny green': '#6f6c0a',
-    'browny orange': '#ca6b02', 'bruise': '#7e4071', 'bubble gum pink':
-    '#ff69af', 'bubblegum': '#ff6cb5', 'bubblegum pink': '#fe83cc', 'buff':
-    '#fef69e', 'burgundy': '#610023', 'burnt orange': '#c04e01',
-    'burnt red': '#9f2305', 'burnt siena': '#b75203', 'burnt sienna':
-    '#b04e0f', 'burnt umber': '#a0450e', 'burnt yellow': '#d5ab09',
-    'burple': '#6832e3', 'butter': '#ffff81', 'butter yellow': '#fffd74',
-    'butterscotch': '#fdb147', 'cadet blue': '#4e7496', 'camel': '#c69f59',
-    'camo': '#7f8f4e', 'camo green': '#526525', 'camouflage green':
-    '#4b6113', 'canary': '#fdff63', 'canary yellow': '#fffe40',
-    'candy pink': '#ff63e9', 'caramel': '#af6f09', 'carmine': '#9d0216',
-    'carnation': '#fd798f', 'carnation pink': '#ff7fa7', 'carolina blue':
-    '#8ab8fe', 'celadon': '#befdb7', 'celery': '#c1fd95', 'cement':
-    '#a5a391', 'cerise': '#de0c62', 'cerulean': '#0485d1', 'cerulean blue':
-    '#056eee', 'charcoal': '#343837', 'charcoal grey': '#3c4142',
-    'chartreuse': '#c1f80a', 'cherry': '#cf0234', 'cherry red': '#f7022a',
-    'chestnut': '#742802', 'chocolate': '#3d1c02', 'chocolate brown':
-    '#411900', 'cinnamon': '#ac4f06', 'claret': '#680018', 'clay':
-    '#b66a50', 'clay brown': '#b2713d', 'clear blue': '#247afd',
-    'cloudy blue': '#acc2d9', 'cobalt': '#1e488f', 'cobalt blue': '#030aa7',
-    'cocoa': '#875f42', 'coffee': '#a6814c', 'cool blue': '#4984b8',
-    'cool green': '#33b864', 'cool grey': '#95a3a6', 'copper': '#b66325',
-    'coral': '#fc5a50', 'coral pink': '#ff6163', 'cornflower': '#6a79f7',
-    'cornflower blue': '#5170d7', 'cranberry': '#9e003a', 'cream':
-    '#ffffc2', 'creme': '#ffffb6', 'crimson': '#8c000f', 'custard':
-    '#fffd78', 'cyan': '#00ffff', 'dandelion': '#fedf08', 'dark': '#1b2431',
-    'dark aqua': '#05696b', 'dark aquamarine': '#017371', 'dark beige':
-    '#ac9362', 'dark blue': '#00035b', 'dark blue green': '#005249',
-    'dark blue grey': '#1f3b4d', 'dark brown': '#341c02', 'dark coral':
-    '#cf524e', 'dark cream': '#fff39a', 'dark cyan': '#0a888a',
-    'dark forest green': '#002d04', 'dark fuchsia': '#9d0759', 'dark gold':
-    '#b59410', 'dark grass green': '#388004', 'dark green': '#033500',
-    'dark green blue': '#1f6357', 'dark grey': '#363737', 'dark grey blue':
-    '#29465b', 'dark hot pink': '#d90166', 'dark indigo': '#1f0954',
-    'dark khaki': '#9b8f55', 'dark lavender': '#856798', 'dark lilac':
-    '#9c6da5', 'dark lime': '#84b701', 'dark lime green': '#7ebd01',
-    'dark magenta': '#960056', 'dark maroon': '#3c0008', 'dark mauve':
-    '#874c62', 'dark mint': '#48c072', 'dark mint green': '#20c073',
-    'dark mustard': '#a88905', 'dark navy': '#000435', 'dark navy blue':
-    '#00022e', 'dark olive': '#373e02', 'dark olive green': '#3c4d03',
-    'dark orange': '#c65102', 'dark pastel green': '#56ae57', 'dark peach':
-    '#de7e5d', 'dark periwinkle': '#665fd1', 'dark pink': '#cb416b',
-    'dark plum': '#3f012c', 'dark purple': '#35063e', 'dark red': '#840000',
-    'dark rose': '#b5485d', 'dark royal blue': '#02066f', 'dark sage':
-    '#598556', 'dark salmon': '#c85a53', 'dark sand': '#a88f59',
-    'dark sea green': '#11875d', 'dark seafoam': '#1fb57a',
-    'dark seafoam green': '#3eaf76', 'dark sky blue': '#448ee4',
-    'dark slate blue': '#214761', 'dark tan': '#af884a', 'dark taupe':
-    '#7f684e', 'dark teal': '#014d4e', 'dark turquoise': '#045c5a',
-    'dark violet': '#34013f', 'dark yellow': '#d5b60a', 'dark yellow green':
-    '#728f02', 'darkblue': '#030764', 'darkgreen': '#054907',
-    'darkish blue': '#014182', 'darkish green': '#287c37', 'darkish pink':
-    '#da467d', 'darkish purple': '#751973', 'darkish red': '#a90308',
-    'deep aqua': '#08787f', 'deep blue': '#040273', 'deep brown': '#410200',
-    'deep green': '#02590f', 'deep lavender': '#8d5eb7', 'deep lilac':
-    '#966ebd', 'deep magenta': '#a0025c', 'deep orange': '#dc4d01',
-    'deep pink': '#cb0162', 'deep purple': '#36013f', 'deep red': '#9a0200',
-    'deep rose': '#c74767', 'deep sea blue': '#015482', 'deep sky blue':
-    '#0d75f8', 'deep teal': '#00555a', 'deep turquoise': '#017374',
-    'deep violet': '#490648', 'denim': '#3b638c', 'denim blue': '#3b5b92',
-    'desert': '#ccad60', 'diarrhea': '#9f8303', 'dirt': '#8a6e45',
-    'dirt brown': '#836539', 'dirty blue': '#3f829d', 'dirty green':
-    '#667e2c', 'dirty orange': '#c87606', 'dirty pink': '#ca7b80',
-    'dirty purple': '#734a65', 'dirty yellow': '#cdc50a', 'dodger blue':
-    '#3e82fc', 'drab': '#828344', 'drab green': '#749551', 'dried blood':
-    '#4b0101', 'duck egg blue': '#c3fbf4', 'dull blue': '#49759c',
-    'dull brown': '#876e4b', 'dull green': '#74a662', 'dull orange':
-    '#d8863b', 'dull pink': '#d5869d', 'dull purple': '#84597e', 'dull red':
-    '#bb3f3f', 'dull teal': '#5f9e8f', 'dull yellow': '#eedc5b', 'dusk':
-    '#4e5481', 'dusk blue': '#26538d', 'dusky blue': '#475f94',
-    'dusky pink': '#cc7a8b', 'dusky purple': '#895b7b', 'dusky rose':
-    '#ba6873', 'dust': '#b2996e', 'dusty blue': '#5a86ad', 'dusty green':
-    '#76a973', 'dusty lavender': '#ac86a8', 'dusty orange': '#f0833a',
-    'dusty pink': '#d58a94', 'dusty purple': '#825f87', 'dusty red':
-    '#b9484e', 'dusty rose': '#c0737a', 'dusty teal': '#4c9085', 'earth':
-    '#a2653e', 'easter green': '#8cfd7e', 'easter purple': '#c071fe',
-    'ecru': '#feffca', 'egg shell': '#fffcc4', 'eggplant': '#380835',
-    'eggplant purple': '#430541', 'eggshell': '#ffffd4', 'eggshell blue':
-    '#c4fff7', 'electric blue': '#0652ff', 'electric green': '#21fc0d',
-    'electric lime': '#a8ff04', 'electric pink': '#ff0490',
-    'electric purple': '#aa23ff', 'emerald': '#01a049', 'emerald green':
-    '#028f1e', 'evergreen': '#05472a', 'faded blue': '#658cbb',
-    'faded green': '#7bb274', 'faded orange': '#f0944d', 'faded pink':
-    '#de9dac', 'faded purple': '#916e99', 'faded red': '#d3494e',
-    'faded yellow': '#feff7f', 'fawn': '#cfaf7b', 'fern': '#63a950',
-    'fern green': '#548d44', 'fire engine red': '#fe0002', 'flat blue':
-    '#3c73a8', 'flat green': '#699d4c', 'fluorescent green': '#08ff08',
-    'fluro green': '#0aff02', 'foam green': '#90fda9', 'forest': '#0b5509',
-    'forest green': '#06470c', 'forrest green': '#154406', 'french blue':
-    '#436bad', 'fresh green': '#69d84f', 'frog green': '#58bc08', 'fuchsia':
-    '#ed0dd9', 'gold': '#dbb40c', 'golden': '#f5bf03', 'golden brown':
-    '#b27a01', 'golden rod': '#f9bc08', 'golden yellow': '#fec615',
-    'goldenrod': '#fac205', 'grape': '#6c3461', 'grape purple': '#5d1451',
-    'grapefruit': '#fd5956', 'grass': '#5cac2d', 'grass green': '#3f9b0b',
-    'grassy green': '#419c03', 'green': '#15b01a', 'green apple': '#5edc1f',
-    'green blue': '#06b48b', 'green brown': '#544e03', 'green grey':
-    '#77926f', 'green teal': '#0cb577', 'green yellow': '#c9ff27',
-    'green/blue': '#01c08d', 'green/yellow': '#b5ce08', 'greenblue':
-    '#23c48b', 'greenish': '#40a368', 'greenish beige': '#c9d179',
-    'greenish blue': '#0b8b87', 'greenish brown': '#696112',
-    'greenish cyan': '#2afeb7', 'greenish grey': '#96ae8d', 'greenish tan':
-    '#bccb7a', 'greenish teal': '#32bf84', 'greenish turquoise': '#00fbb0',
-    'greenish yellow': '#cdfd02', 'greeny blue': '#42b395', 'greeny brown':
-    '#696006', 'greeny grey': '#7ea07a', 'greeny yellow': '#c6f808', 'grey':
-    '#929591', 'grey blue': '#6b8ba4', 'grey brown': '#7f7053',
-    'grey green': '#789b73', 'grey pink': '#c3909b', 'grey purple':
-    '#826d8c', 'grey teal': '#5e9b8a', 'grey/blue': '#647d8e', 'grey/green':
-    '#86a17d', 'greyblue': '#77a1b5', 'greyish': '#a8a495', 'greyish blue':
-    '#5e819d', 'greyish brown': '#7a6a4f', 'greyish green': '#82a67d',
-    'greyish pink': '#c88d94', 'greyish purple': '#887191', 'greyish teal':
-    '#719f91', 'gross green': '#a0bf16', 'gunmetal': '#536267', 'hazel':
-    '#8e7618', 'heather': '#a484ac', 'heliotrope': '#d94ff5',
-    'highlighter green': '#1bfc06', 'hospital green': '#9be5aa',
-    'hot green': '#25ff29', 'hot magenta': '#f504c9', 'hot pink': '#ff028d',
-    'hot purple': '#cb00f5', 'hunter green': '#0b4008', 'ice': '#d6fffa',
-    'ice blue': '#d7fffe', 'icky green': '#8fae22', 'indian red': '#850e04',
-    'indigo': '#380282', 'indigo blue': '#3a18b1', 'iris': '#6258c4',
-    'irish green': '#019529', 'ivory': '#ffffcb', 'jade': '#1fa774',
-    'jade green': '#2baf6a', 'jungle green': '#048243', 'kelley green':
-    '#009337', 'kelly green': '#02ab2e', 'kermit green': '#5cb200',
-    'key lime': '#aeff6e', 'khaki': '#aaa662', 'khaki green': '#728639',
-    'kiwi': '#9cef43', 'kiwi green': '#8ee53f', 'lavender': '#c79fef',
-    'lavender blue': '#8b88f8', 'lavender pink': '#dd85d7', 'lawn green':
-    '#4da409', 'leaf': '#71aa34', 'leaf green': '#5ca904', 'leafy green':
-    '#51b73b', 'leather': '#ac7434', 'lemon': '#fdff52', 'lemon green':
-    '#adf802', 'lemon lime': '#bffe28', 'lemon yellow': '#fdff38', 'lichen':
-    '#8fb67b', 'light aqua': '#8cffdb', 'light aquamarine': '#7bfdc7',
-    'light beige': '#fffeb6', 'light blue': '#95d0fc', 'light blue green':
-    '#7efbb3', 'light blue grey': '#b7c9e2', 'light bluish green':
-    '#76fda8', 'light bright green': '#53fe5c', 'light brown': '#ad8150',
-    'light burgundy': '#a8415b', 'light cyan': '#acfffc', 'light eggplant':
-    '#894585', 'light forest green': '#4f9153', 'light gold': '#fddc5c',
-    'light grass green': '#9af764', 'light green': '#96f97b',
-    'light green blue': '#56fca2', 'light greenish blue': '#63f7b4',
-    'light grey': '#d8dcd6', 'light grey blue': '#9dbcd4',
-    'light grey green': '#b7e1a1', 'light indigo': '#6d5acf', 'light khaki':
-    '#e6f2a2', 'light lavendar': '#efc0fe', 'light lavender': '#dfc5fe',
-    'light light blue': '#cafffb', 'light light green': '#c8ffb0',
-    'light lilac': '#edc8ff', 'light lime': '#aefd6c', 'light lime green':
-    '#b9ff66', 'light magenta': '#fa5ff7', 'light maroon': '#a24857',
-    'light mauve': '#c292a1', 'light mint': '#b6ffbb', 'light mint green':
-    '#a6fbb2', 'light moss green': '#a6c875', 'light mustard': '#f7d560',
-    'light navy': '#155084', 'light navy blue': '#2e5a88',
-    'light neon green': '#4efd54', 'light olive': '#acbf69',
-    'light olive green': '#a4be5c', 'light orange': '#fdaa48',
-    'light pastel green': '#b2fba5', 'light pea green': '#c4fe82',
-    'light peach': '#ffd8b1', 'light periwinkle': '#c1c6fc', 'light pink':
-    '#ffd1df', 'light plum': '#9d5783', 'light purple': '#bf77f6',
-    'light red': '#ff474c', 'light rose': '#ffc5cb', 'light royal blue':
-    '#3a2efe', 'light sage': '#bcecac', 'light salmon': '#fea993',
-    'light sea green': '#98f6b0', 'light seafoam': '#a0febf',
-    'light seafoam green': '#a7ffb5', 'light sky blue': '#c6fcff',
-    'light tan': '#fbeeac', 'light teal': '#90e4c1', 'light turquoise':
-    '#7ef4cc', 'light urple': '#b36ff6', 'light violet': '#d6b4fc',
-    'light yellow': '#fffe7a', 'light yellow green': '#ccfd7f',
-    'light yellowish green': '#c2ff89', 'lightblue': '#7bc8f6',
-    'lighter green': '#75fd63', 'lighter purple': '#a55af4', 'lightgreen':
-    '#76ff7b', 'lightish blue': '#3d7afd', 'lightish green': '#61e160',
-    'lightish purple': '#a552e6', 'lightish red': '#fe2f4a', 'lilac':
-    '#cea2fd', 'liliac': '#c48efd', 'lime': '#aaff32', 'lime green':
-    '#89fe05', 'lime yellow': '#d0fe1d', 'lipstick': '#d5174e',
-    'lipstick red': '#c0022f', 'macaroni and cheese': '#efb435', 'magenta':
-    '#c20078', 'mahogany': '#4a0100', 'maize': '#f4d054', 'mango':
-    '#ffa62b', 'manilla': '#fffa86', 'marigold': '#fcc006', 'marine':
-    '#042e60', 'marine blue': '#01386a', 'maroon': '#650021', 'mauve':
-    '#ae7181', 'medium blue': '#2c6fbb', 'medium brown': '#7f5112',
-    'medium green': '#39ad48', 'medium grey': '#7d7f7c', 'medium pink':
-    '#f36196', 'medium purple': '#9e43a2', 'melon': '#ff7855', 'merlot':
-    '#730039', 'metallic blue': '#4f738e', 'mid blue': '#276ab3',
-    'mid green': '#50a747', 'midnight': '#03012d', 'midnight blue':
-    '#020035', 'midnight purple': '#280137', 'military green': '#667c3e',
-    'milk chocolate': '#7f4e1e', 'mint': '#9ffeb0', 'mint green': '#8fff9f',
-    'minty green': '#0bf77d', 'mocha': '#9d7651', 'moss': '#769958',
-    'moss green': '#658b38', 'mossy green': '#638b27', 'mud': '#735c12',
-    'mud brown': '#60460f', 'mud green': '#606602', 'muddy brown':
-    '#886806', 'muddy green': '#657432', 'muddy yellow': '#bfac05',
-    'mulberry': '#920a4e', 'murky green': '#6c7a0e', 'mushroom': '#ba9e88',
-    'mustard': '#ceb301', 'mustard brown': '#ac7e04', 'mustard green':
-    '#a8b504', 'mustard yellow': '#d2bd0a', 'muted blue': '#3b719f',
-    'muted green': '#5fa052', 'muted pink': '#d1768f', 'muted purple':
-    '#805b87', 'nasty green': '#70b23f', 'navy': '#01153e', 'navy blue':
-    '#001146', 'navy green': '#35530a', 'neon blue': '#04d9ff',
-    'neon green': '#0cff0c', 'neon pink': '#fe019a', 'neon purple':
-    '#bc13fe', 'neon red': '#ff073a', 'neon yellow': '#cfff04', 'nice blue':
-    '#107ab0', 'night blue': '#040348', 'ocean': '#017b92', 'ocean blue':
-    '#03719c', 'ocean green': '#3d9973', 'ocher': '#bf9b0c', 'ochre':
-    '#bf9005', 'ocre': '#c69c04', 'off blue': '#5684ae', 'off green':
-    '#6ba353', 'off white': '#ffffe4', 'off yellow': '#f1f33f', 'old pink':
-    '#c77986', 'old rose': '#c87f89', 'olive': '#6e750e', 'olive brown':
-    '#645403', 'olive drab': '#6f7632', 'olive green': '#677a04',
-    'olive yellow': '#c2b709', 'orange': '#f97306', 'orange brown':
-    '#be6400', 'orange pink': '#ff6f52', 'orange red': '#fd411e',
-    'orange yellow': '#ffad01', 'orangeish': '#fd8d49', 'orangered':
-    '#fe420f', 'orangey brown': '#b16002', 'orangey red': '#fa4224',
-    'orangey yellow': '#fdb915', 'orangish': '#fc824a', 'orangish brown':
-    '#b25f03', 'orangish red': '#f43605', 'orchid': '#c875c4', 'pale':
-    '#fff9d0', 'pale aqua': '#b8ffeb', 'pale blue': '#d0fefe', 'pale brown':
-    '#b1916e', 'pale cyan': '#b7fffa', 'pale gold': '#fdde6c', 'pale green':
-    '#c7fdb5', 'pale grey': '#fdfdfe', 'pale lavender': '#eecffe',
-    'pale light green': '#b1fc99', 'pale lilac': '#e4cbff', 'pale lime':
-    '#befd73', 'pale lime green': '#b1ff65', 'pale magenta': '#d767ad',
-    'pale mauve': '#fed0fc', 'pale olive': '#b9cc81', 'pale olive green':
-    '#b1d27b', 'pale orange': '#ffa756', 'pale peach': '#ffe5ad',
-    'pale pink': '#ffcfdc', 'pale purple': '#b790d4', 'pale red': '#d9544d',
-    'pale rose': '#fdc1c5', 'pale salmon': '#ffb19a', 'pale sky blue':
-    '#bdf6fe', 'pale teal': '#82cbb2', 'pale turquoise': '#a5fbd5',
-    'pale violet': '#ceaefa', 'pale yellow': '#ffff84', 'parchment':
-    '#fefcaf', 'pastel blue': '#a2bffe', 'pastel green': '#b0ff9d',
-    'pastel orange': '#ff964f', 'pastel pink': '#ffbacd', 'pastel purple':
-    '#caa0ff', 'pastel red': '#db5856', 'pastel yellow': '#fffe71', 'pea':
-    '#a4bf20', 'pea green': '#8eab12', 'pea soup': '#929901',
-    'pea soup green': '#94a617', 'peach': '#ffb07c', 'peachy pink':
-    '#ff9a8a', 'peacock blue': '#016795', 'pear': '#cbf85f', 'periwinkle':
-    '#8e82fe', 'periwinkle blue': '#8f99fb', 'perrywinkle': '#8f8ce7',
-    'petrol': '#005f6a', 'pig pink': '#e78ea5', 'pine': '#2b5d34',
-    'pine green': '#0a481e', 'pink': '#ff81c0', 'pink purple': '#db4bda',
-    'pink red': '#f5054f', 'pink/purple': '#ef1de7', 'pinkish': '#d46a7e',
-    'pinkish brown': '#b17261', 'pinkish grey': '#c8aca9', 'pinkish orange':
-    '#ff724c', 'pinkish purple': '#d648d7', 'pinkish red': '#f10c45',
-    'pinkish tan': '#d99b82', 'pinky': '#fc86aa', 'pinky purple': '#c94cbe',
-    'pinky red': '#fc2647', 'piss yellow': '#ddd618', 'pistachio':
-    '#c0fa8b', 'plum': '#580f41', 'plum purple': '#4e0550', 'poison green':
-    '#40fd14', 'poo': '#8f7303', 'poo brown': '#885f01', 'poop': '#7f5e00',
-    'poop brown': '#7a5901', 'poop green': '#6f7c00', 'powder blue':
-    '#b1d1fc', 'powder pink': '#ffb2d0', 'primary blue': '#0804f9',
-    'prussian blue': '#004577', 'puce': '#a57e52', 'puke': '#a5a502',
-    'puke brown': '#947706', 'puke green': '#9aae07', 'puke yellow':
-    '#c2be0e', 'pumpkin': '#e17701', 'pumpkin orange': '#fb7d07',
-    'pure blue': '#0203e2', 'purple': '#7e1e9c', 'purple blue': '#632de9',
-    'purple brown': '#673a3f', 'purple grey': '#866f85', 'purple pink':
-    '#e03fd8', 'purple red': '#990147', 'purple/blue': '#5d21d0',
-    'purple/pink': '#d725de', 'purpleish': '#98568d', 'purpleish blue':
-    '#6140ef', 'purpleish pink': '#df4ec8', 'purpley': '#8756e4',
-    'purpley blue': '#5f34e7', 'purpley grey': '#947e94', 'purpley pink':
-    '#c83cb9', 'purplish': '#94568c', 'purplish blue': '#601ef9',
-    'purplish brown': '#6b4247', 'purplish grey': '#7a687f',
-    'purplish pink': '#ce5dae', 'purplish red': '#b0054b', 'purply':
-    '#983fb2', 'purply blue': '#661aee', 'purply pink': '#f075e6', 'putty':
-    '#beae8a', 'racing green': '#014600', 'radioactive green': '#2cfa1f',
-    'raspberry': '#b00149', 'raw sienna': '#9a6200', 'raw umber': '#a75e09',
-    'really light blue': '#d4ffff', 'red': '#e50000', 'red brown':
-    '#8b2e16', 'red orange': '#fd3c06', 'red pink': '#fa2a55', 'red purple':
-    '#820747', 'red violet': '#9e0168', 'red wine': '#8c0034', 'reddish':
-    '#c44240', 'reddish brown': '#7f2b0a', 'reddish grey': '#997570',
-    'reddish orange': '#f8481c', 'reddish pink': '#fe2c54',
-    'reddish purple': '#910951', 'reddy brown': '#6e1005', 'rich blue':
-    '#021bf9', 'rich purple': '#720058', 'robin egg blue': '#8af1fe',
-    "robin's egg": '#6dedfd', "robin's egg blue": '#98eff9', 'rosa':
-    '#fe86a4', 'rose': '#cf6275', 'rose pink': '#f7879a', 'rose red':
-    '#be013c', 'rosy pink': '#f6688e', 'rouge': '#ab1239', 'royal':
-    '#0c1793', 'royal blue': '#0504aa', 'royal purple': '#4b006e', 'ruby':
-    '#ca0147', 'russet': '#a13905', 'rust': '#a83c09', 'rust brown':
-    '#8b3103', 'rust orange': '#c45508', 'rust red': '#aa2704',
-    'rusty orange': '#cd5909', 'rusty red': '#af2f0d', 'saffron': '#feb209',
-    'sage': '#87ae73', 'sage green': '#88b378', 'salmon': '#ff796c',
-    'salmon pink': '#fe7b7c', 'sand': '#e2ca76', 'sand brown': '#cba560',
-    'sand yellow': '#fce166', 'sandstone': '#c9ae74', 'sandy': '#f1da7a',
-    'sandy brown': '#c4a661', 'sandy yellow': '#fdee73', 'sap green':
-    '#5c8b15', 'sapphire': '#2138ab', 'scarlet': '#be0119', 'sea':
-    '#3c9992', 'sea blue': '#047495', 'sea green': '#53fca1', 'seafoam':
-    '#80f9ad', 'seafoam blue': '#78d1b6', 'seafoam green': '#7af9ab',
-    'seaweed': '#18d17b', 'seaweed green': '#35ad6b', 'sepia': '#985e2b',
-    'shamrock': '#01b44c', 'shamrock green': '#02c14d', 'shit': '#7f5f00',
-    'shit brown': '#7b5804', 'shit green': '#758000', 'shocking pink':
-    '#fe02a2', 'sick green': '#9db92c', 'sickly green': '#94b21c',
-    'sickly yellow': '#d0e429', 'sienna': '#a9561e', 'silver': '#c5c9c7',
-    'sky': '#82cafc', 'sky blue': '#75bbfd', 'slate': '#516572',
-    'slate blue': '#5b7c99', 'slate green': '#658d6d', 'slate grey':
-    '#59656d', 'slime green': '#99cc04', 'snot': '#acbb0d', 'snot green':
-    '#9dc100', 'soft blue': '#6488ea', 'soft green': '#6fc276', 'soft pink':
-    '#fdb0c0', 'soft purple': '#a66fb5', 'spearmint': '#1ef876',
-    'spring green': '#a9f971', 'spruce': '#0a5f38', 'squash': '#f2ab15',
-    'steel': '#738595', 'steel blue': '#5a7d9a', 'steel grey': '#6f828a',
-    'stone': '#ada587', 'stormy blue': '#507b9c', 'straw': '#fcf679',
-    'strawberry': '#fb2943', 'strong blue': '#0c06f7', 'strong pink':
-    '#ff0789', 'sun yellow': '#ffdf22', 'sunflower': '#ffc512',
-    'sunflower yellow': '#ffda03', 'sunny yellow': '#fff917',
-    'sunshine yellow': '#fffd37', 'swamp': '#698339', 'swamp green':
-    '#748500', 'tan': '#d1b26f', 'tan brown': '#ab7e4c', 'tan green':
-    '#a9be70', 'tangerine': '#ff9408', 'taupe': '#b9a281', 'tea': '#65ab7c',
-    'tea green': '#bdf8a3', 'teal': '#029386', 'teal blue': '#01889f',
-    'teal green': '#25a36f', 'tealish': '#24bca8', 'tealish green':
-    '#0cdc73', 'terra cotta': '#c9643b', 'terracota': '#cb6843',
-    'terracotta': '#ca6641', 'tiffany blue': '#7bf2da', 'tomato': '#ef4026',
-    'tomato red': '#ec2d01', 'topaz': '#13bbaf', 'toupe': '#c7ac7d',
-    'toxic green': '#61de2a', 'tree green': '#2a7e19', 'true blue':
-    '#010fcc', 'true green': '#089404', 'turquoise': '#06c2ac',
-    'turquoise blue': '#06b1c4', 'turquoise green': '#04f489',
-    'turtle green': '#75b84f', 'twilight': '#4e518b', 'twilight blue':
-    '#0a437a', 'ugly blue': '#31668a', 'ugly brown': '#7d7103',
-    'ugly green': '#7a9703', 'ugly pink': '#cd7584', 'ugly purple':
-    '#a442a0', 'ugly yellow': '#d0c101', 'ultramarine': '#2000b1',
-    'ultramarine blue': '#1805db', 'umber': '#b26400', 'velvet': '#750851',
-    'vermillion': '#f4320c', 'very dark blue': '#000133', 'very dark brown':
-    '#1d0200', 'very dark green': '#062e03', 'very dark purple': '#2a0134',
-    'very light blue': '#d5ffff', 'very light brown': '#d3b683',
-    'very light green': '#d1ffbd', 'very light pink': '#fff4f2',
-    'very light purple': '#f6cefc', 'very pale blue': '#d6fffe',
-    'very pale green': '#cffdbc', 'vibrant blue': '#0339f8',
-    'vibrant green': '#0add08', 'vibrant purple': '#ad03de', 'violet':
-    '#9a0eea', 'violet blue': '#510ac9', 'violet pink': '#fb5ffc',
-    'violet red': '#a50055', 'viridian': '#1e9167', 'vivid blue': '#152eff',
-    'vivid green': '#2fef10', 'vivid purple': '#9900fa', 'vomit': '#a2a415',
-    'vomit green': '#89a203', 'vomit yellow': '#c7c10c', 'warm blue':
-    '#4b57db', 'warm brown': '#964e02', 'warm grey': '#978a84', 'warm pink':
-    '#fb5581', 'warm purple': '#952e8f', 'washed out green': '#bcf5a6',
-    'water blue': '#0e87cc', 'watermelon': '#fd4659', 'weird green':
-    '#3ae57f', 'wheat': '#fbdd7e', 'white': '#ffffff', 'windows blue':
-    '#3778bf', 'wine': '#80013f', 'wine red': '#7b0323', 'wintergreen':
-    '#20f986', 'wisteria': '#a87dc2', 'yellow': '#ffff14', 'yellow brown':
-    '#b79400', 'yellow green': '#c0fb2d', 'yellow ochre': '#cb9d06',
-    'yellow orange': '#fcb001', 'yellow tan': '#ffe36e', 'yellow/green':
-    '#c8fd3d', 'yellowgreen': '#bbf90f', 'yellowish': '#faee66',
-    'yellowish brown': '#9b7a01', 'yellowish green': '#b0dd16',
-    'yellowish orange': '#ffab0f', 'yellowish tan': '#fcfc81',
-    'yellowy brown': '#ae8b0c', 'yellowy green': '#bff128'}
+xkcd_rgb = {'acid green': '#8ffe09',
+            'adobe': '#bd6c48',
+            'algae': '#54ac68',
+            'algae green': '#21c36f',
+            'almost black': '#070d0d',
+            'amber': '#feb308',
+            'amethyst': '#9b5fc0',
+            'apple': '#6ecb3c',
+            'apple green': '#76cd26',
+            'apricot': '#ffb16d',
+            'aqua': '#13eac9',
+            'aqua blue': '#02d8e9',
+            'aqua green': '#12e193',
+            'aqua marine': '#2ee8bb',
+            'aquamarine': '#04d8b2',
+            'army green': '#4b5d16',
+            'asparagus': '#77ab56',
+            'aubergine': '#3d0734',
+            'auburn': '#9a3001',
+            'avocado': '#90b134',
+            'avocado green': '#87a922',
+            'azul': '#1d5dec',
+            'azure': '#069af3',
+            'baby blue': '#a2cffe',
+            'baby green': '#8cff9e',
+            'baby pink': '#ffb7ce',
+            'baby poo': '#ab9004',
+            'baby poop': '#937c00',
+            'baby poop green': '#8f9805',
+            'baby puke green': '#b6c406',
+            'baby purple': '#ca9bf7',
+            'baby shit brown': '#ad900d',
+            'baby shit green': '#889717',
+            'banana': '#ffff7e',
+            'banana yellow': '#fafe4b',
+            'barbie pink': '#fe46a5',
+            'barf green': '#94ac02',
+            'barney': '#ac1db8',
+            'barney purple': '#a00498',
+            'battleship grey': '#6b7c85',
+            'beige': '#e6daa6',
+            'berry': '#990f4b',
+            'bile': '#b5c306',
+            'black': '#000000',
+            'bland': '#afa88b',
+            'blood': '#770001',
+            'blood orange': '#fe4b03',
+            'blood red': '#980002',
+            'blue': '#0343df',
+            'blue blue': '#2242c7',
+            'blue green': '#137e6d',
+            'blue grey': '#607c8e',
+            'blue purple': '#5729ce',
+            'blue violet': '#5d06e9',
+            'blue with a hint of purple': '#533cc6',
+            'blue/green': '#0f9b8e',
+            'blue/grey': '#758da3',
+            'blue/purple': '#5a06ef',
+            'blueberry': '#464196',
+            'bluegreen': '#017a79',
+            'bluegrey': '#85a3b2',
+            'bluey green': '#2bb179',
+            'bluey grey': '#89a0b0',
+            'bluey purple': '#6241c7',
+            'bluish': '#2976bb',
+            'bluish green': '#10a674',
+            'bluish grey': '#748b97',
+            'bluish purple': '#703be7',
+            'blurple': '#5539cc',
+            'blush': '#f29e8e',
+            'blush pink': '#fe828c',
+            'booger': '#9bb53c',
+            'booger green': '#96b403',
+            'bordeaux': '#7b002c',
+            'boring green': '#63b365',
+            'bottle green': '#044a05',
+            'brick': '#a03623',
+            'brick orange': '#c14a09',
+            'brick red': '#8f1402',
+            'bright aqua': '#0bf9ea',
+            'bright blue': '#0165fc',
+            'bright cyan': '#41fdfe',
+            'bright green': '#01ff07',
+            'bright lavender': '#c760ff',
+            'bright light blue': '#26f7fd',
+            'bright light green': '#2dfe54',
+            'bright lilac': '#c95efb',
+            'bright lime': '#87fd05',
+            'bright lime green': '#65fe08',
+            'bright magenta': '#ff08e8',
+            'bright olive': '#9cbb04',
+            'bright orange': '#ff5b00',
+            'bright pink': '#fe01b1',
+            'bright purple': '#be03fd',
+            'bright red': '#ff000d',
+            'bright sea green': '#05ffa6',
+            'bright sky blue': '#02ccfe',
+            'bright teal': '#01f9c6',
+            'bright turquoise': '#0ffef9',
+            'bright violet': '#ad0afd',
+            'bright yellow': '#fffd01',
+            'bright yellow green': '#9dff00',
+            'british racing green': '#05480d',
+            'bronze': '#a87900',
+            'brown': '#653700',
+            'brown green': '#706c11',
+            'brown grey': '#8d8468',
+            'brown orange': '#b96902',
+            'brown red': '#922b05',
+            'brown yellow': '#b29705',
+            'brownish': '#9c6d57',
+            'brownish green': '#6a6e09',
+            'brownish grey': '#86775f',
+            'brownish orange': '#cb7723',
+            'brownish pink': '#c27e79',
+            'brownish purple': '#76424e',
+            'brownish red': '#9e3623',
+            'brownish yellow': '#c9b003',
+            'browny green': '#6f6c0a',
+            'browny orange': '#ca6b02',
+            'bruise': '#7e4071',
+            'bubble gum pink': '#ff69af',
+            'bubblegum': '#ff6cb5',
+            'bubblegum pink': '#fe83cc',
+            'buff': '#fef69e',
+            'burgundy': '#610023',
+            'burnt orange': '#c04e01',
+            'burnt red': '#9f2305',
+            'burnt siena': '#b75203',
+            'burnt sienna': '#b04e0f',
+            'burnt umber': '#a0450e',
+            'burnt yellow': '#d5ab09',
+            'burple': '#6832e3',
+            'butter': '#ffff81',
+            'butter yellow': '#fffd74',
+            'butterscotch': '#fdb147',
+            'cadet blue': '#4e7496',
+            'camel': '#c69f59',
+            'camo': '#7f8f4e',
+            'camo green': '#526525',
+            'camouflage green': '#4b6113',
+            'canary': '#fdff63',
+            'canary yellow': '#fffe40',
+            'candy pink': '#ff63e9',
+            'caramel': '#af6f09',
+            'carmine': '#9d0216',
+            'carnation': '#fd798f',
+            'carnation pink': '#ff7fa7',
+            'carolina blue': '#8ab8fe',
+            'celadon': '#befdb7',
+            'celery': '#c1fd95',
+            'cement': '#a5a391',
+            'cerise': '#de0c62',
+            'cerulean': '#0485d1',
+            'cerulean blue': '#056eee',
+            'charcoal': '#343837',
+            'charcoal grey': '#3c4142',
+            'chartreuse': '#c1f80a',
+            'cherry': '#cf0234',
+            'cherry red': '#f7022a',
+            'chestnut': '#742802',
+            'chocolate': '#3d1c02',
+            'chocolate brown': '#411900',
+            'cinnamon': '#ac4f06',
+            'claret': '#680018',
+            'clay': '#b66a50',
+            'clay brown': '#b2713d',
+            'clear blue': '#247afd',
+            'cloudy blue': '#acc2d9',
+            'cobalt': '#1e488f',
+            'cobalt blue': '#030aa7',
+            'cocoa': '#875f42',
+            'coffee': '#a6814c',
+            'cool blue': '#4984b8',
+            'cool green': '#33b864',
+            'cool grey': '#95a3a6',
+            'copper': '#b66325',
+            'coral': '#fc5a50',
+            'coral pink': '#ff6163',
+            'cornflower': '#6a79f7',
+            'cornflower blue': '#5170d7',
+            'cranberry': '#9e003a',
+            'cream': '#ffffc2',
+            'creme': '#ffffb6',
+            'crimson': '#8c000f',
+            'custard': '#fffd78',
+            'cyan': '#00ffff',
+            'dandelion': '#fedf08',
+            'dark': '#1b2431',
+            'dark aqua': '#05696b',
+            'dark aquamarine': '#017371',
+            'dark beige': '#ac9362',
+            'dark blue': '#00035b',
+            'dark blue green': '#005249',
+            'dark blue grey': '#1f3b4d',
+            'dark brown': '#341c02',
+            'dark coral': '#cf524e',
+            'dark cream': '#fff39a',
+            'dark cyan': '#0a888a',
+            'dark forest green': '#002d04',
+            'dark fuchsia': '#9d0759',
+            'dark gold': '#b59410',
+            'dark grass green': '#388004',
+            'dark green': '#033500',
+            'dark green blue': '#1f6357',
+            'dark grey': '#363737',
+            'dark grey blue': '#29465b',
+            'dark hot pink': '#d90166',
+            'dark indigo': '#1f0954',
+            'dark khaki': '#9b8f55',
+            'dark lavender': '#856798',
+            'dark lilac': '#9c6da5',
+            'dark lime': '#84b701',
+            'dark lime green': '#7ebd01',
+            'dark magenta': '#960056',
+            'dark maroon': '#3c0008',
+            'dark mauve': '#874c62',
+            'dark mint': '#48c072',
+            'dark mint green': '#20c073',
+            'dark mustard': '#a88905',
+            'dark navy': '#000435',
+            'dark navy blue': '#00022e',
+            'dark olive': '#373e02',
+            'dark olive green': '#3c4d03',
+            'dark orange': '#c65102',
+            'dark pastel green': '#56ae57',
+            'dark peach': '#de7e5d',
+            'dark periwinkle': '#665fd1',
+            'dark pink': '#cb416b',
+            'dark plum': '#3f012c',
+            'dark purple': '#35063e',
+            'dark red': '#840000',
+            'dark rose': '#b5485d',
+            'dark royal blue': '#02066f',
+            'dark sage': '#598556',
+            'dark salmon': '#c85a53',
+            'dark sand': '#a88f59',
+            'dark sea green': '#11875d',
+            'dark seafoam': '#1fb57a',
+            'dark seafoam green': '#3eaf76',
+            'dark sky blue': '#448ee4',
+            'dark slate blue': '#214761',
+            'dark tan': '#af884a',
+            'dark taupe': '#7f684e',
+            'dark teal': '#014d4e',
+            'dark turquoise': '#045c5a',
+            'dark violet': '#34013f',
+            'dark yellow': '#d5b60a',
+            'dark yellow green': '#728f02',
+            'darkblue': '#030764',
+            'darkgreen': '#054907',
+            'darkish blue': '#014182',
+            'darkish green': '#287c37',
+            'darkish pink': '#da467d',
+            'darkish purple': '#751973',
+            'darkish red': '#a90308',
+            'deep aqua': '#08787f',
+            'deep blue': '#040273',
+            'deep brown': '#410200',
+            'deep green': '#02590f',
+            'deep lavender': '#8d5eb7',
+            'deep lilac': '#966ebd',
+            'deep magenta': '#a0025c',
+            'deep orange': '#dc4d01',
+            'deep pink': '#cb0162',
+            'deep purple': '#36013f',
+            'deep red': '#9a0200',
+            'deep rose': '#c74767',
+            'deep sea blue': '#015482',
+            'deep sky blue': '#0d75f8',
+            'deep teal': '#00555a',
+            'deep turquoise': '#017374',
+            'deep violet': '#490648',
+            'denim': '#3b638c',
+            'denim blue': '#3b5b92',
+            'desert': '#ccad60',
+            'diarrhea': '#9f8303',
+            'dirt': '#8a6e45',
+            'dirt brown': '#836539',
+            'dirty blue': '#3f829d',
+            'dirty green': '#667e2c',
+            'dirty orange': '#c87606',
+            'dirty pink': '#ca7b80',
+            'dirty purple': '#734a65',
+            'dirty yellow': '#cdc50a',
+            'dodger blue': '#3e82fc',
+            'drab': '#828344',
+            'drab green': '#749551',
+            'dried blood': '#4b0101',
+            'duck egg blue': '#c3fbf4',
+            'dull blue': '#49759c',
+            'dull brown': '#876e4b',
+            'dull green': '#74a662',
+            'dull orange': '#d8863b',
+            'dull pink': '#d5869d',
+            'dull purple': '#84597e',
+            'dull red': '#bb3f3f',
+            'dull teal': '#5f9e8f',
+            'dull yellow': '#eedc5b',
+            'dusk': '#4e5481',
+            'dusk blue': '#26538d',
+            'dusky blue': '#475f94',
+            'dusky pink': '#cc7a8b',
+            'dusky purple': '#895b7b',
+            'dusky rose': '#ba6873',
+            'dust': '#b2996e',
+            'dusty blue': '#5a86ad',
+            'dusty green': '#76a973',
+            'dusty lavender': '#ac86a8',
+            'dusty orange': '#f0833a',
+            'dusty pink': '#d58a94',
+            'dusty purple': '#825f87',
+            'dusty red': '#b9484e',
+            'dusty rose': '#c0737a',
+            'dusty teal': '#4c9085',
+            'earth': '#a2653e',
+            'easter green': '#8cfd7e',
+            'easter purple': '#c071fe',
+            'ecru': '#feffca',
+            'egg shell': '#fffcc4',
+            'eggplant': '#380835',
+            'eggplant purple': '#430541',
+            'eggshell': '#ffffd4',
+            'eggshell blue': '#c4fff7',
+            'electric blue': '#0652ff',
+            'electric green': '#21fc0d',
+            'electric lime': '#a8ff04',
+            'electric pink': '#ff0490',
+            'electric purple': '#aa23ff',
+            'emerald': '#01a049',
+            'emerald green': '#028f1e',
+            'evergreen': '#05472a',
+            'faded blue': '#658cbb',
+            'faded green': '#7bb274',
+            'faded orange': '#f0944d',
+            'faded pink': '#de9dac',
+            'faded purple': '#916e99',
+            'faded red': '#d3494e',
+            'faded yellow': '#feff7f',
+            'fawn': '#cfaf7b',
+            'fern': '#63a950',
+            'fern green': '#548d44',
+            'fire engine red': '#fe0002',
+            'flat blue': '#3c73a8',
+            'flat green': '#699d4c',
+            'fluorescent green': '#08ff08',
+            'fluro green': '#0aff02',
+            'foam green': '#90fda9',
+            'forest': '#0b5509',
+            'forest green': '#06470c',
+            'forrest green': '#154406',
+            'french blue': '#436bad',
+            'fresh green': '#69d84f',
+            'frog green': '#58bc08',
+            'fuchsia': '#ed0dd9',
+            'gold': '#dbb40c',
+            'golden': '#f5bf03',
+            'golden brown': '#b27a01',
+            'golden rod': '#f9bc08',
+            'golden yellow': '#fec615',
+            'goldenrod': '#fac205',
+            'grape': '#6c3461',
+            'grape purple': '#5d1451',
+            'grapefruit': '#fd5956',
+            'grass': '#5cac2d',
+            'grass green': '#3f9b0b',
+            'grassy green': '#419c03',
+            'green': '#15b01a',
+            'green apple': '#5edc1f',
+            'green blue': '#06b48b',
+            'green brown': '#544e03',
+            'green grey': '#77926f',
+            'green teal': '#0cb577',
+            'green yellow': '#c9ff27',
+            'green/blue': '#01c08d',
+            'green/yellow': '#b5ce08',
+            'greenblue': '#23c48b',
+            'greenish': '#40a368',
+            'greenish beige': '#c9d179',
+            'greenish blue': '#0b8b87',
+            'greenish brown': '#696112',
+            'greenish cyan': '#2afeb7',
+            'greenish grey': '#96ae8d',
+            'greenish tan': '#bccb7a',
+            'greenish teal': '#32bf84',
+            'greenish turquoise': '#00fbb0',
+            'greenish yellow': '#cdfd02',
+            'greeny blue': '#42b395',
+            'greeny brown': '#696006',
+            'greeny grey': '#7ea07a',
+            'greeny yellow': '#c6f808',
+            'grey': '#929591',
+            'grey blue': '#6b8ba4',
+            'grey brown': '#7f7053',
+            'grey green': '#789b73',
+            'grey pink': '#c3909b',
+            'grey purple': '#826d8c',
+            'grey teal': '#5e9b8a',
+            'grey/blue': '#647d8e',
+            'grey/green': '#86a17d',
+            'greyblue': '#77a1b5',
+            'greyish': '#a8a495',
+            'greyish blue': '#5e819d',
+            'greyish brown': '#7a6a4f',
+            'greyish green': '#82a67d',
+            'greyish pink': '#c88d94',
+            'greyish purple': '#887191',
+            'greyish teal': '#719f91',
+            'gross green': '#a0bf16',
+            'gunmetal': '#536267',
+            'hazel': '#8e7618',
+            'heather': '#a484ac',
+            'heliotrope': '#d94ff5',
+            'highlighter green': '#1bfc06',
+            'hospital green': '#9be5aa',
+            'hot green': '#25ff29',
+            'hot magenta': '#f504c9',
+            'hot pink': '#ff028d',
+            'hot purple': '#cb00f5',
+            'hunter green': '#0b4008',
+            'ice': '#d6fffa',
+            'ice blue': '#d7fffe',
+            'icky green': '#8fae22',
+            'indian red': '#850e04',
+            'indigo': '#380282',
+            'indigo blue': '#3a18b1',
+            'iris': '#6258c4',
+            'irish green': '#019529',
+            'ivory': '#ffffcb',
+            'jade': '#1fa774',
+            'jade green': '#2baf6a',
+            'jungle green': '#048243',
+            'kelley green': '#009337',
+            'kelly green': '#02ab2e',
+            'kermit green': '#5cb200',
+            'key lime': '#aeff6e',
+            'khaki': '#aaa662',
+            'khaki green': '#728639',
+            'kiwi': '#9cef43',
+            'kiwi green': '#8ee53f',
+            'lavender': '#c79fef',
+            'lavender blue': '#8b88f8',
+            'lavender pink': '#dd85d7',
+            'lawn green': '#4da409',
+            'leaf': '#71aa34',
+            'leaf green': '#5ca904',
+            'leafy green': '#51b73b',
+            'leather': '#ac7434',
+            'lemon': '#fdff52',
+            'lemon green': '#adf802',
+            'lemon lime': '#bffe28',
+            'lemon yellow': '#fdff38',
+            'lichen': '#8fb67b',
+            'light aqua': '#8cffdb',
+            'light aquamarine': '#7bfdc7',
+            'light beige': '#fffeb6',
+            'light blue': '#95d0fc',
+            'light blue green': '#7efbb3',
+            'light blue grey': '#b7c9e2',
+            'light bluish green': '#76fda8',
+            'light bright green': '#53fe5c',
+            'light brown': '#ad8150',
+            'light burgundy': '#a8415b',
+            'light cyan': '#acfffc',
+            'light eggplant': '#894585',
+            'light forest green': '#4f9153',
+            'light gold': '#fddc5c',
+            'light grass green': '#9af764',
+            'light green': '#96f97b',
+            'light green blue': '#56fca2',
+            'light greenish blue': '#63f7b4',
+            'light grey': '#d8dcd6',
+            'light grey blue': '#9dbcd4',
+            'light grey green': '#b7e1a1',
+            'light indigo': '#6d5acf',
+            'light khaki': '#e6f2a2',
+            'light lavendar': '#efc0fe',
+            'light lavender': '#dfc5fe',
+            'light light blue': '#cafffb',
+            'light light green': '#c8ffb0',
+            'light lilac': '#edc8ff',
+            'light lime': '#aefd6c',
+            'light lime green': '#b9ff66',
+            'light magenta': '#fa5ff7',
+            'light maroon': '#a24857',
+            'light mauve': '#c292a1',
+            'light mint': '#b6ffbb',
+            'light mint green': '#a6fbb2',
+            'light moss green': '#a6c875',
+            'light mustard': '#f7d560',
+            'light navy': '#155084',
+            'light navy blue': '#2e5a88',
+            'light neon green': '#4efd54',
+            'light olive': '#acbf69',
+            'light olive green': '#a4be5c',
+            'light orange': '#fdaa48',
+            'light pastel green': '#b2fba5',
+            'light pea green': '#c4fe82',
+            'light peach': '#ffd8b1',
+            'light periwinkle': '#c1c6fc',
+            'light pink': '#ffd1df',
+            'light plum': '#9d5783',
+            'light purple': '#bf77f6',
+            'light red': '#ff474c',
+            'light rose': '#ffc5cb',
+            'light royal blue': '#3a2efe',
+            'light sage': '#bcecac',
+            'light salmon': '#fea993',
+            'light sea green': '#98f6b0',
+            'light seafoam': '#a0febf',
+            'light seafoam green': '#a7ffb5',
+            'light sky blue': '#c6fcff',
+            'light tan': '#fbeeac',
+            'light teal': '#90e4c1',
+            'light turquoise': '#7ef4cc',
+            'light urple': '#b36ff6',
+            'light violet': '#d6b4fc',
+            'light yellow': '#fffe7a',
+            'light yellow green': '#ccfd7f',
+            'light yellowish green': '#c2ff89',
+            'lightblue': '#7bc8f6',
+            'lighter green': '#75fd63',
+            'lighter purple': '#a55af4',
+            'lightgreen': '#76ff7b',
+            'lightish blue': '#3d7afd',
+            'lightish green': '#61e160',
+            'lightish purple': '#a552e6',
+            'lightish red': '#fe2f4a',
+            'lilac': '#cea2fd',
+            'liliac': '#c48efd',
+            'lime': '#aaff32',
+            'lime green': '#89fe05',
+            'lime yellow': '#d0fe1d',
+            'lipstick': '#d5174e',
+            'lipstick red': '#c0022f',
+            'macaroni and cheese': '#efb435',
+            'magenta': '#c20078',
+            'mahogany': '#4a0100',
+            'maize': '#f4d054',
+            'mango': '#ffa62b',
+            'manilla': '#fffa86',
+            'marigold': '#fcc006',
+            'marine': '#042e60',
+            'marine blue': '#01386a',
+            'maroon': '#650021',
+            'mauve': '#ae7181',
+            'medium blue': '#2c6fbb',
+            'medium brown': '#7f5112',
+            'medium green': '#39ad48',
+            'medium grey': '#7d7f7c',
+            'medium pink': '#f36196',
+            'medium purple': '#9e43a2',
+            'melon': '#ff7855',
+            'merlot': '#730039',
+            'metallic blue': '#4f738e',
+            'mid blue': '#276ab3',
+            'mid green': '#50a747',
+            'midnight': '#03012d',
+            'midnight blue': '#020035',
+            'midnight purple': '#280137',
+            'military green': '#667c3e',
+            'milk chocolate': '#7f4e1e',
+            'mint': '#9ffeb0',
+            'mint green': '#8fff9f',
+            'minty green': '#0bf77d',
+            'mocha': '#9d7651',
+            'moss': '#769958',
+            'moss green': '#658b38',
+            'mossy green': '#638b27',
+            'mud': '#735c12',
+            'mud brown': '#60460f',
+            'mud green': '#606602',
+            'muddy brown': '#886806',
+            'muddy green': '#657432',
+            'muddy yellow': '#bfac05',
+            'mulberry': '#920a4e',
+            'murky green': '#6c7a0e',
+            'mushroom': '#ba9e88',
+            'mustard': '#ceb301',
+            'mustard brown': '#ac7e04',
+            'mustard green': '#a8b504',
+            'mustard yellow': '#d2bd0a',
+            'muted blue': '#3b719f',
+            'muted green': '#5fa052',
+            'muted pink': '#d1768f',
+            'muted purple': '#805b87',
+            'nasty green': '#70b23f',
+            'navy': '#01153e',
+            'navy blue': '#001146',
+            'navy green': '#35530a',
+            'neon blue': '#04d9ff',
+            'neon green': '#0cff0c',
+            'neon pink': '#fe019a',
+            'neon purple': '#bc13fe',
+            'neon red': '#ff073a',
+            'neon yellow': '#cfff04',
+            'nice blue': '#107ab0',
+            'night blue': '#040348',
+            'ocean': '#017b92',
+            'ocean blue': '#03719c',
+            'ocean green': '#3d9973',
+            'ocher': '#bf9b0c',
+            'ochre': '#bf9005',
+            'ocre': '#c69c04',
+            'off blue': '#5684ae',
+            'off green': '#6ba353',
+            'off white': '#ffffe4',
+            'off yellow': '#f1f33f',
+            'old pink': '#c77986',
+            'old rose': '#c87f89',
+            'olive': '#6e750e',
+            'olive brown': '#645403',
+            'olive drab': '#6f7632',
+            'olive green': '#677a04',
+            'olive yellow': '#c2b709',
+            'orange': '#f97306',
+            'orange brown': '#be6400',
+            'orange pink': '#ff6f52',
+            'orange red': '#fd411e',
+            'orange yellow': '#ffad01',
+            'orangeish': '#fd8d49',
+            'orangered': '#fe420f',
+            'orangey brown': '#b16002',
+            'orangey red': '#fa4224',
+            'orangey yellow': '#fdb915',
+            'orangish': '#fc824a',
+            'orangish brown': '#b25f03',
+            'orangish red': '#f43605',
+            'orchid': '#c875c4',
+            'pale': '#fff9d0',
+            'pale aqua': '#b8ffeb',
+            'pale blue': '#d0fefe',
+            'pale brown': '#b1916e',
+            'pale cyan': '#b7fffa',
+            'pale gold': '#fdde6c',
+            'pale green': '#c7fdb5',
+            'pale grey': '#fdfdfe',
+            'pale lavender': '#eecffe',
+            'pale light green': '#b1fc99',
+            'pale lilac': '#e4cbff',
+            'pale lime': '#befd73',
+            'pale lime green': '#b1ff65',
+            'pale magenta': '#d767ad',
+            'pale mauve': '#fed0fc',
+            'pale olive': '#b9cc81',
+            'pale olive green': '#b1d27b',
+            'pale orange': '#ffa756',
+            'pale peach': '#ffe5ad',
+            'pale pink': '#ffcfdc',
+            'pale purple': '#b790d4',
+            'pale red': '#d9544d',
+            'pale rose': '#fdc1c5',
+            'pale salmon': '#ffb19a',
+            'pale sky blue': '#bdf6fe',
+            'pale teal': '#82cbb2',
+            'pale turquoise': '#a5fbd5',
+            'pale violet': '#ceaefa',
+            'pale yellow': '#ffff84',
+            'parchment': '#fefcaf',
+            'pastel blue': '#a2bffe',
+            'pastel green': '#b0ff9d',
+            'pastel orange': '#ff964f',
+            'pastel pink': '#ffbacd',
+            'pastel purple': '#caa0ff',
+            'pastel red': '#db5856',
+            'pastel yellow': '#fffe71',
+            'pea': '#a4bf20',
+            'pea green': '#8eab12',
+            'pea soup': '#929901',
+            'pea soup green': '#94a617',
+            'peach': '#ffb07c',
+            'peachy pink': '#ff9a8a',
+            'peacock blue': '#016795',
+            'pear': '#cbf85f',
+            'periwinkle': '#8e82fe',
+            'periwinkle blue': '#8f99fb',
+            'perrywinkle': '#8f8ce7',
+            'petrol': '#005f6a',
+            'pig pink': '#e78ea5',
+            'pine': '#2b5d34',
+            'pine green': '#0a481e',
+            'pink': '#ff81c0',
+            'pink purple': '#db4bda',
+            'pink red': '#f5054f',
+            'pink/purple': '#ef1de7',
+            'pinkish': '#d46a7e',
+            'pinkish brown': '#b17261',
+            'pinkish grey': '#c8aca9',
+            'pinkish orange': '#ff724c',
+            'pinkish purple': '#d648d7',
+            'pinkish red': '#f10c45',
+            'pinkish tan': '#d99b82',
+            'pinky': '#fc86aa',
+            'pinky purple': '#c94cbe',
+            'pinky red': '#fc2647',
+            'piss yellow': '#ddd618',
+            'pistachio': '#c0fa8b',
+            'plum': '#580f41',
+            'plum purple': '#4e0550',
+            'poison green': '#40fd14',
+            'poo': '#8f7303',
+            'poo brown': '#885f01',
+            'poop': '#7f5e00',
+            'poop brown': '#7a5901',
+            'poop green': '#6f7c00',
+            'powder blue': '#b1d1fc',
+            'powder pink': '#ffb2d0',
+            'primary blue': '#0804f9',
+            'prussian blue': '#004577',
+            'puce': '#a57e52',
+            'puke': '#a5a502',
+            'puke brown': '#947706',
+            'puke green': '#9aae07',
+            'puke yellow': '#c2be0e',
+            'pumpkin': '#e17701',
+            'pumpkin orange': '#fb7d07',
+            'pure blue': '#0203e2',
+            'purple': '#7e1e9c',
+            'purple blue': '#632de9',
+            'purple brown': '#673a3f',
+            'purple grey': '#866f85',
+            'purple pink': '#e03fd8',
+            'purple red': '#990147',
+            'purple/blue': '#5d21d0',
+            'purple/pink': '#d725de',
+            'purpleish': '#98568d',
+            'purpleish blue': '#6140ef',
+            'purpleish pink': '#df4ec8',
+            'purpley': '#8756e4',
+            'purpley blue': '#5f34e7',
+            'purpley grey': '#947e94',
+            'purpley pink': '#c83cb9',
+            'purplish': '#94568c',
+            'purplish blue': '#601ef9',
+            'purplish brown': '#6b4247',
+            'purplish grey': '#7a687f',
+            'purplish pink': '#ce5dae',
+            'purplish red': '#b0054b',
+            'purply': '#983fb2',
+            'purply blue': '#661aee',
+            'purply pink': '#f075e6',
+            'putty': '#beae8a',
+            'racing green': '#014600',
+            'radioactive green': '#2cfa1f',
+            'raspberry': '#b00149',
+            'raw sienna': '#9a6200',
+            'raw umber': '#a75e09',
+            'really light blue': '#d4ffff',
+            'red': '#e50000',
+            'red brown': '#8b2e16',
+            'red orange': '#fd3c06',
+            'red pink': '#fa2a55',
+            'red purple': '#820747',
+            'red violet': '#9e0168',
+            'red wine': '#8c0034',
+            'reddish': '#c44240',
+            'reddish brown': '#7f2b0a',
+            'reddish grey': '#997570',
+            'reddish orange': '#f8481c',
+            'reddish pink': '#fe2c54',
+            'reddish purple': '#910951',
+            'reddy brown': '#6e1005',
+            'rich blue': '#021bf9',
+            'rich purple': '#720058',
+            'robin egg blue': '#8af1fe',
+            "robin's egg": '#6dedfd',
+            "robin's egg blue": '#98eff9',
+            'rosa': '#fe86a4',
+            'rose': '#cf6275',
+            'rose pink': '#f7879a',
+            'rose red': '#be013c',
+            'rosy pink': '#f6688e',
+            'rouge': '#ab1239',
+            'royal': '#0c1793',
+            'royal blue': '#0504aa',
+            'royal purple': '#4b006e',
+            'ruby': '#ca0147',
+            'russet': '#a13905',
+            'rust': '#a83c09',
+            'rust brown': '#8b3103',
+            'rust orange': '#c45508',
+            'rust red': '#aa2704',
+            'rusty orange': '#cd5909',
+            'rusty red': '#af2f0d',
+            'saffron': '#feb209',
+            'sage': '#87ae73',
+            'sage green': '#88b378',
+            'salmon': '#ff796c',
+            'salmon pink': '#fe7b7c',
+            'sand': '#e2ca76',
+            'sand brown': '#cba560',
+            'sand yellow': '#fce166',
+            'sandstone': '#c9ae74',
+            'sandy': '#f1da7a',
+            'sandy brown': '#c4a661',
+            'sandy yellow': '#fdee73',
+            'sap green': '#5c8b15',
+            'sapphire': '#2138ab',
+            'scarlet': '#be0119',
+            'sea': '#3c9992',
+            'sea blue': '#047495',
+            'sea green': '#53fca1',
+            'seafoam': '#80f9ad',
+            'seafoam blue': '#78d1b6',
+            'seafoam green': '#7af9ab',
+            'seaweed': '#18d17b',
+            'seaweed green': '#35ad6b',
+            'sepia': '#985e2b',
+            'shamrock': '#01b44c',
+            'shamrock green': '#02c14d',
+            'shit': '#7f5f00',
+            'shit brown': '#7b5804',
+            'shit green': '#758000',
+            'shocking pink': '#fe02a2',
+            'sick green': '#9db92c',
+            'sickly green': '#94b21c',
+            'sickly yellow': '#d0e429',
+            'sienna': '#a9561e',
+            'silver': '#c5c9c7',
+            'sky': '#82cafc',
+            'sky blue': '#75bbfd',
+            'slate': '#516572',
+            'slate blue': '#5b7c99',
+            'slate green': '#658d6d',
+            'slate grey': '#59656d',
+            'slime green': '#99cc04',
+            'snot': '#acbb0d',
+            'snot green': '#9dc100',
+            'soft blue': '#6488ea',
+            'soft green': '#6fc276',
+            'soft pink': '#fdb0c0',
+            'soft purple': '#a66fb5',
+            'spearmint': '#1ef876',
+            'spring green': '#a9f971',
+            'spruce': '#0a5f38',
+            'squash': '#f2ab15',
+            'steel': '#738595',
+            'steel blue': '#5a7d9a',
+            'steel grey': '#6f828a',
+            'stone': '#ada587',
+            'stormy blue': '#507b9c',
+            'straw': '#fcf679',
+            'strawberry': '#fb2943',
+            'strong blue': '#0c06f7',
+            'strong pink': '#ff0789',
+            'sun yellow': '#ffdf22',
+            'sunflower': '#ffc512',
+            'sunflower yellow': '#ffda03',
+            'sunny yellow': '#fff917',
+            'sunshine yellow': '#fffd37',
+            'swamp': '#698339',
+            'swamp green': '#748500',
+            'tan': '#d1b26f',
+            'tan brown': '#ab7e4c',
+            'tan green': '#a9be70',
+            'tangerine': '#ff9408',
+            'taupe': '#b9a281',
+            'tea': '#65ab7c',
+            'tea green': '#bdf8a3',
+            'teal': '#029386',
+            'teal blue': '#01889f',
+            'teal green': '#25a36f',
+            'tealish': '#24bca8',
+            'tealish green': '#0cdc73',
+            'terra cotta': '#c9643b',
+            'terracota': '#cb6843',
+            'terracotta': '#ca6641',
+            'tiffany blue': '#7bf2da',
+            'tomato': '#ef4026',
+            'tomato red': '#ec2d01',
+            'topaz': '#13bbaf',
+            'toupe': '#c7ac7d',
+            'toxic green': '#61de2a',
+            'tree green': '#2a7e19',
+            'true blue': '#010fcc',
+            'true green': '#089404',
+            'turquoise': '#06c2ac',
+            'turquoise blue': '#06b1c4',
+            'turquoise green': '#04f489',
+            'turtle green': '#75b84f',
+            'twilight': '#4e518b',
+            'twilight blue': '#0a437a',
+            'ugly blue': '#31668a',
+            'ugly brown': '#7d7103',
+            'ugly green': '#7a9703',
+            'ugly pink': '#cd7584',
+            'ugly purple': '#a442a0',
+            'ugly yellow': '#d0c101',
+            'ultramarine': '#2000b1',
+            'ultramarine blue': '#1805db',
+            'umber': '#b26400',
+            'velvet': '#750851',
+            'vermillion': '#f4320c',
+            'very dark blue': '#000133',
+            'very dark brown': '#1d0200',
+            'very dark green': '#062e03',
+            'very dark purple': '#2a0134',
+            'very light blue': '#d5ffff',
+            'very light brown': '#d3b683',
+            'very light green': '#d1ffbd',
+            'very light pink': '#fff4f2',
+            'very light purple': '#f6cefc',
+            'very pale blue': '#d6fffe',
+            'very pale green': '#cffdbc',
+            'vibrant blue': '#0339f8',
+            'vibrant green': '#0add08',
+            'vibrant purple': '#ad03de',
+            'violet': '#9a0eea',
+            'violet blue': '#510ac9',
+            'violet pink': '#fb5ffc',
+            'violet red': '#a50055',
+            'viridian': '#1e9167',
+            'vivid blue': '#152eff',
+            'vivid green': '#2fef10',
+            'vivid purple': '#9900fa',
+            'vomit': '#a2a415',
+            'vomit green': '#89a203',
+            'vomit yellow': '#c7c10c',
+            'warm blue': '#4b57db',
+            'warm brown': '#964e02',
+            'warm grey': '#978a84',
+            'warm pink': '#fb5581',
+            'warm purple': '#952e8f',
+            'washed out green': '#bcf5a6',
+            'water blue': '#0e87cc',
+            'watermelon': '#fd4659',
+            'weird green': '#3ae57f',
+            'wheat': '#fbdd7e',
+            'white': '#ffffff',
+            'windows blue': '#3778bf',
+            'wine': '#80013f',
+            'wine red': '#7b0323',
+            'wintergreen': '#20f986',
+            'wisteria': '#a87dc2',
+            'yellow': '#ffff14',
+            'yellow brown': '#b79400',
+            'yellow green': '#c0fb2d',
+            'yellow ochre': '#cb9d06',
+            'yellow orange': '#fcb001',
+            'yellow tan': '#ffe36e',
+            'yellow/green': '#c8fd3d',
+            'yellowgreen': '#bbf90f',
+            'yellowish': '#faee66',
+            'yellowish brown': '#9b7a01',
+            'yellowish green': '#b0dd16',
+            'yellowish orange': '#ffab0f',
+            'yellowish tan': '#fcfc81',
+            'yellowy brown': '#ae8b0c',
+            'yellowy green': '#bff128'}
diff --git a/seaborn/distributions.py b/seaborn/distributions.py
index 36572494..f8ec166c 100644
--- a/seaborn/distributions.py
+++ b/seaborn/distributions.py
@@ -4,6 +4,7 @@ from functools import partial
 import math
 import textwrap
 import warnings
+
 import numpy as np
 import pandas as pd
 import matplotlib as mpl
@@ -12,111 +13,1444 @@ import matplotlib.transforms as tx
 from matplotlib.cbook import normalize_kwargs
 from matplotlib.colors import to_rgba
 from matplotlib.collections import LineCollection
+
 from ._base import VectorPlotter
+
+# We have moved univariate histogram computation over to the new Hist class,
+# but still use the older Histogram for bivariate computation.
 from ._statistics import ECDF, Histogram, KDE
 from ._stats.counting import Hist
-from .axisgrid import FacetGrid, _facet_docs
-from .utils import remove_na, _get_transform_functions, _kde_support, _check_argument, _assign_default_kwargs, _default_color
+
+from .axisgrid import (
+    FacetGrid,
+    _facet_docs,
+)
+from .utils import (
+    remove_na,
+    _get_transform_functions,
+    _kde_support,
+    _check_argument,
+    _assign_default_kwargs,
+    _default_color,
+)
 from .palettes import color_palette
 from .external import husl
 from .external.kde import gaussian_kde
-from ._docstrings import DocstringComponents, _core_docs
-__all__ = ['displot', 'histplot', 'kdeplot', 'ecdfplot', 'rugplot', 'distplot']
-_dist_params = dict(multiple=
-    """
+from ._docstrings import (
+    DocstringComponents,
+    _core_docs,
+)
+
+
+__all__ = ["displot", "histplot", "kdeplot", "ecdfplot", "rugplot", "distplot"]
+
+# ==================================================================================== #
+# Module documentation
+# ==================================================================================== #
+
+_dist_params = dict(
+
+    multiple="""
 multiple : {{"layer", "stack", "fill"}}
     Method for drawing multiple elements when semantic mapping creates subsets.
     Only relevant with univariate data.
-    """
-    , log_scale=
-    """
+    """,
+    log_scale="""
 log_scale : bool or number, or pair of bools or numbers
     Set axis scale(s) to log. A single value sets the data axis for any numeric
     axes in the plot. A pair of values sets each axis independently.
     Numeric values are interpreted as the desired base (default 10).
     When `None` or `False`, seaborn defers to the existing Axes scale.
-    """
-    , legend=
-    '\nlegend : bool\n    If False, suppress the legend for semantic variables.\n    '
-    , cbar=
-    """
+    """,
+    legend="""
+legend : bool
+    If False, suppress the legend for semantic variables.
+    """,
+    cbar="""
 cbar : bool
     If True, add a colorbar to annotate the color mapping in a bivariate plot.
     Note: Does not currently support plots with a ``hue`` variable well.
-    """
-    , cbar_ax=
-    """
+    """,
+    cbar_ax="""
 cbar_ax : :class:`matplotlib.axes.Axes`
     Pre-existing axes for the colorbar.
-    """
-    , cbar_kws=
-    """
+    """,
+    cbar_kws="""
 cbar_kws : dict
     Additional parameters passed to :meth:`matplotlib.figure.Figure.colorbar`.
-    """
-    )
-_param_docs = DocstringComponents.from_nested_components(core=_core_docs[
-    'params'], facets=DocstringComponents(_facet_docs), dist=
-    DocstringComponents(_dist_params), kde=DocstringComponents.
-    from_function_params(KDE.__init__), hist=DocstringComponents.
-    from_function_params(Histogram.__init__), ecdf=DocstringComponents.
-    from_function_params(ECDF.__init__))
+    """,
+)
+
+_param_docs = DocstringComponents.from_nested_components(
+    core=_core_docs["params"],
+    facets=DocstringComponents(_facet_docs),
+    dist=DocstringComponents(_dist_params),
+    kde=DocstringComponents.from_function_params(KDE.__init__),
+    hist=DocstringComponents.from_function_params(Histogram.__init__),
+    ecdf=DocstringComponents.from_function_params(ECDF.__init__),
+)
+
+
+# ==================================================================================== #
+# Internal API
+# ==================================================================================== #


 class _DistributionPlotter(VectorPlotter):
-    wide_structure = {'x': '@values', 'hue': '@columns'}
-    flat_structure = {'x': '@values'}

-    def __init__(self, data=None, variables={}):
+    wide_structure = {"x": "@values", "hue": "@columns"}
+    flat_structure = {"x": "@values"}
+
+    def __init__(
+        self,
+        data=None,
+        variables={},
+    ):
+
         super().__init__(data=data, variables=variables)

     @property
     def univariate(self):
         """Return True if only x or y are used."""
-        pass
+        # TODO this could go down to core, but putting it here now.
+        # We'd want to be conceptually clear that univariate only applies
+        # to x/y and not to other semantics, which can exist.
+        # We haven't settled on a good conceptual name for x/y.
+        return bool({"x", "y"} - set(self.variables))

     @property
     def data_variable(self):
         """Return the variable with data for univariate plots."""
-        pass
+        # TODO This could also be in core, but it should have a better name.
+        if not self.univariate:
+            raise AttributeError("This is not a univariate plot")
+        return {"x", "y"}.intersection(self.variables).pop()

     @property
     def has_xy_data(self):
         """Return True at least one of x or y is defined."""
-        pass
+        # TODO see above points about where this should go
+        return bool({"x", "y"} & set(self.variables))

-    def _add_legend(self, ax_obj, artist, fill, element, multiple, alpha,
-        artist_kws, legend_kws):
+    def _add_legend(
+        self,
+        ax_obj, artist, fill, element, multiple, alpha, artist_kws, legend_kws,
+    ):
         """Add artists that reflect semantic mappings and put then in a legend."""
-        pass
+        # TODO note that this doesn't handle numeric mappings like the relational plots
+        handles = []
+        labels = []
+        for level in self._hue_map.levels:
+            color = self._hue_map(level)
+
+            kws = self._artist_kws(
+                artist_kws, fill, element, multiple, color, alpha
+            )
+
+            # color gets added to the kws to workaround an issue with barplot's color
+            # cycle integration but it causes problems in this context where we are
+            # setting artist properties directly, so pop it off here
+            if "facecolor" in kws:
+                kws.pop("color", None)
+
+            handles.append(artist(**kws))
+            labels.append(level)
+
+        if isinstance(ax_obj, mpl.axes.Axes):
+            ax_obj.legend(handles, labels, title=self.variables["hue"], **legend_kws)
+        else:  # i.e. a FacetGrid. TODO make this better
+            legend_data = dict(zip(labels, handles))
+            ax_obj.add_legend(
+                legend_data,
+                title=self.variables["hue"],
+                label_order=self.var_levels["hue"],
+                **legend_kws
+            )

     def _artist_kws(self, kws, fill, element, multiple, color, alpha):
         """Handle differences between artists in filled/unfilled plots."""
-        pass
+        kws = kws.copy()
+        if fill:
+            kws = normalize_kwargs(kws, mpl.collections.PolyCollection)
+            kws.setdefault("facecolor", to_rgba(color, alpha))
+
+            if element == "bars":
+                # Make bar() interface with property cycle correctly
+                # https://github.com/matplotlib/matplotlib/issues/19385
+                kws["color"] = "none"
+
+            if multiple in ["stack", "fill"] or element == "bars":
+                kws.setdefault("edgecolor", mpl.rcParams["patch.edgecolor"])
+            else:
+                kws.setdefault("edgecolor", to_rgba(color, 1))
+        elif element == "bars":
+            kws["facecolor"] = "none"
+            kws["edgecolor"] = to_rgba(color, alpha)
+        else:
+            kws["color"] = to_rgba(color, alpha)
+        return kws

     def _quantile_to_level(self, data, quantile):
         """Return data levels corresponding to quantile cuts of mass."""
-        pass
+        isoprop = np.asarray(quantile)
+        values = np.ravel(data)
+        sorted_values = np.sort(values)[::-1]
+        normalized_values = np.cumsum(sorted_values) / values.sum()
+        idx = np.searchsorted(normalized_values, 1 - isoprop)
+        levels = np.take(sorted_values, idx, mode="clip")
+        return levels

     def _cmap_from_color(self, color):
         """Return a sequential colormap given a color seed."""
-        pass
+        # Like so much else here, this is broadly useful, but keeping it
+        # in this class to signify that I haven't thought overly hard about it...
+        r, g, b, _ = to_rgba(color)
+        h, s, _ = husl.rgb_to_husl(r, g, b)
+        xx = np.linspace(-1, 1, int(1.15 * 256))[:256]
+        ramp = np.zeros((256, 3))
+        ramp[:, 0] = h
+        ramp[:, 1] = s * np.cos(xx)
+        ramp[:, 2] = np.linspace(35, 80, 256)
+        colors = np.clip([husl.husl_to_rgb(*hsl) for hsl in ramp], 0, 1)
+        return mpl.colors.ListedColormap(colors[::-1])

     def _default_discrete(self):
         """Find default values for discrete hist estimation based on variable type."""
-        pass
+        if self.univariate:
+            discrete = self.var_types[self.data_variable] == "categorical"
+        else:
+            discrete_x = self.var_types["x"] == "categorical"
+            discrete_y = self.var_types["y"] == "categorical"
+            discrete = discrete_x, discrete_y
+        return discrete

     def _resolve_multiple(self, curves, multiple):
         """Modify the density data structure to handle multiple densities."""
-        pass
+
+        # Default baselines have all densities starting at 0
+        baselines = {k: np.zeros_like(v) for k, v in curves.items()}
+
+        # TODO we should have some central clearinghouse for checking if any
+        # "grouping" (terminnology?) semantics have been assigned
+        if "hue" not in self.variables:
+            return curves, baselines
+
+        if multiple in ("stack", "fill"):
+
+            # Setting stack or fill means that the curves share a
+            # support grid / set of bin edges, so we can make a dataframe
+            # Reverse the column order to plot from top to bottom
+            curves = pd.DataFrame(curves).iloc[:, ::-1]
+
+            # Find column groups that are nested within col/row variables
+            column_groups = {}
+            for i, keyd in enumerate(map(dict, curves.columns)):
+                facet_key = keyd.get("col", None), keyd.get("row", None)
+                column_groups.setdefault(facet_key, [])
+                column_groups[facet_key].append(i)
+
+            baselines = curves.copy()
+
+            for col_idxs in column_groups.values():
+                cols = curves.columns[col_idxs]
+
+                norm_constant = curves[cols].sum(axis="columns")
+
+                # Take the cumulative sum to stack
+                curves[cols] = curves[cols].cumsum(axis="columns")
+
+                # Normalize by row sum to fill
+                if multiple == "fill":
+                    curves[cols] = curves[cols].div(norm_constant, axis="index")
+
+                # Define where each segment starts
+                baselines[cols] = curves[cols].shift(1, axis=1).fillna(0)
+
+        if multiple == "dodge":
+
+            # Account for the unique semantic (non-faceting) levels
+            # This will require rethiniking if we add other semantics!
+            hue_levels = self.var_levels["hue"]
+            n = len(hue_levels)
+            f_fwd, f_inv = self._get_scale_transforms(self.data_variable)
+            for key in curves:
+
+                level = dict(key)["hue"]
+                hist = curves[key].reset_index(name="heights")
+                level_idx = hue_levels.index(level)
+
+                a = f_fwd(hist["edges"])
+                b = f_fwd(hist["edges"] + hist["widths"])
+                w = (b - a) / n
+                new_min = f_inv(a + level_idx * w)
+                new_max = f_inv(a + (level_idx + 1) * w)
+                hist["widths"] = new_max - new_min
+                hist["edges"] = new_min
+
+                curves[key] = hist.set_index(["edges", "widths"])["heights"]
+
+        return curves, baselines
+
+    # -------------------------------------------------------------------------------- #
+    # Computation
+    # -------------------------------------------------------------------------------- #
+
+    def _compute_univariate_density(
+        self,
+        data_variable,
+        common_norm,
+        common_grid,
+        estimate_kws,
+        warn_singular=True,
+    ):
+
+        # Initialize the estimator object
+        estimator = KDE(**estimate_kws)
+
+        if set(self.variables) - {"x", "y"}:
+            if common_grid:
+                all_observations = self.comp_data.dropna()
+                estimator.define_support(all_observations[data_variable])
+        else:
+            common_norm = False
+
+        all_data = self.plot_data.dropna()
+        if common_norm and "weights" in all_data:
+            whole_weight = all_data["weights"].sum()
+        else:
+            whole_weight = len(all_data)
+
+        densities = {}
+
+        for sub_vars, sub_data in self.iter_data("hue", from_comp_data=True):
+
+            # Extract the data points from this sub set and remove nulls
+            observations = sub_data[data_variable]
+
+            # Extract the weights for this subset of observations
+            if "weights" in self.variables:
+                weights = sub_data["weights"]
+                part_weight = weights.sum()
+            else:
+                weights = None
+                part_weight = len(sub_data)
+
+            # Estimate the density of observations at this level
+            variance = np.nan_to_num(observations.var())
+            singular = len(observations) < 2 or math.isclose(variance, 0)
+            try:
+                if not singular:
+                    # Convoluted approach needed because numerical failures
+                    # can manifest in a few different ways.
+                    density, support = estimator(observations, weights=weights)
+            except np.linalg.LinAlgError:
+                singular = True
+
+            if singular:
+                msg = (
+                    "Dataset has 0 variance; skipping density estimate. "
+                    "Pass `warn_singular=False` to disable this warning."
+                )
+                if warn_singular:
+                    warnings.warn(msg, UserWarning, stacklevel=4)
+                continue
+
+            # Invert the scaling of the support points
+            _, f_inv = self._get_scale_transforms(self.data_variable)
+            support = f_inv(support)
+
+            # Apply a scaling factor so that the integral over all subsets is 1
+            if common_norm:
+                density *= part_weight / whole_weight
+
+            # Store the density for this level
+            key = tuple(sub_vars.items())
+            densities[key] = pd.Series(density, index=support)
+
+        return densities
+
+    # -------------------------------------------------------------------------------- #
+    # Plotting
+    # -------------------------------------------------------------------------------- #
+
+    def plot_univariate_histogram(
+        self,
+        multiple,
+        element,
+        fill,
+        common_norm,
+        common_bins,
+        shrink,
+        kde,
+        kde_kws,
+        color,
+        legend,
+        line_kws,
+        estimate_kws,
+        **plot_kws,
+    ):
+
+        # -- Default keyword dicts
+        kde_kws = {} if kde_kws is None else kde_kws.copy()
+        line_kws = {} if line_kws is None else line_kws.copy()
+        estimate_kws = {} if estimate_kws is None else estimate_kws.copy()
+
+        # --  Input checking
+        _check_argument("multiple", ["layer", "stack", "fill", "dodge"], multiple)
+        _check_argument("element", ["bars", "step", "poly"], element)
+
+        auto_bins_with_weights = (
+            "weights" in self.variables
+            and estimate_kws["bins"] == "auto"
+            and estimate_kws["binwidth"] is None
+            and not estimate_kws["discrete"]
+        )
+        if auto_bins_with_weights:
+            msg = (
+                "`bins` cannot be 'auto' when using weights. "
+                "Setting `bins=10`, but you will likely want to adjust."
+            )
+            warnings.warn(msg, UserWarning)
+            estimate_kws["bins"] = 10
+
+        # Simplify downstream code if we are not normalizing
+        if estimate_kws["stat"] == "count":
+            common_norm = False
+
+        orient = self.data_variable
+
+        # Now initialize the Histogram estimator
+        estimator = Hist(**estimate_kws)
+        histograms = {}
+
+        # Do pre-compute housekeeping related to multiple groups
+        all_data = self.comp_data.dropna()
+        all_weights = all_data.get("weights", None)
+
+        multiple_histograms = set(self.variables) - {"x", "y"}
+        if multiple_histograms:
+            if common_bins:
+                bin_kws = estimator._define_bin_params(all_data, orient, None)
+        else:
+            common_norm = False
+
+        if common_norm and all_weights is not None:
+            whole_weight = all_weights.sum()
+        else:
+            whole_weight = len(all_data)
+
+        # Estimate the smoothed kernel densities, for use later
+        if kde:
+            # TODO alternatively, clip at min/max bins?
+            kde_kws.setdefault("cut", 0)
+            kde_kws["cumulative"] = estimate_kws["cumulative"]
+            densities = self._compute_univariate_density(
+                self.data_variable,
+                common_norm,
+                common_bins,
+                kde_kws,
+                warn_singular=False,
+            )
+
+        # First pass through the data to compute the histograms
+        for sub_vars, sub_data in self.iter_data("hue", from_comp_data=True):
+
+            # Prepare the relevant data
+            key = tuple(sub_vars.items())
+            orient = self.data_variable
+
+            if "weights" in self.variables:
+                sub_data["weight"] = sub_data.pop("weights")
+                part_weight = sub_data["weight"].sum()
+            else:
+                part_weight = len(sub_data)
+
+            # Do the histogram computation
+            if not (multiple_histograms and common_bins):
+                bin_kws = estimator._define_bin_params(sub_data, orient, None)
+            res = estimator._normalize(estimator._eval(sub_data, orient, bin_kws))
+            heights = res[estimator.stat].to_numpy()
+            widths = res["space"].to_numpy()
+            edges = res[orient].to_numpy() - widths / 2
+
+            # Rescale the smoothed curve to match the histogram
+            if kde and key in densities:
+                density = densities[key]
+                if estimator.cumulative:
+                    hist_norm = heights.max()
+                else:
+                    hist_norm = (heights * widths).sum()
+                densities[key] *= hist_norm
+
+            # Convert edges back to original units for plotting
+            ax = self._get_axes(sub_vars)
+            _, inv = _get_transform_functions(ax, self.data_variable)
+            widths = inv(edges + widths) - inv(edges)
+            edges = inv(edges)
+
+            # Pack the histogram data and metadata together
+            edges = edges + (1 - shrink) / 2 * widths
+            widths *= shrink
+            index = pd.MultiIndex.from_arrays([
+                pd.Index(edges, name="edges"),
+                pd.Index(widths, name="widths"),
+            ])
+            hist = pd.Series(heights, index=index, name="heights")
+
+            # Apply scaling to normalize across groups
+            if common_norm:
+                hist *= part_weight / whole_weight
+
+            # Store the finalized histogram data for future plotting
+            histograms[key] = hist
+
+        # Modify the histogram and density data to resolve multiple groups
+        histograms, baselines = self._resolve_multiple(histograms, multiple)
+        if kde:
+            densities, _ = self._resolve_multiple(
+                densities, None if multiple == "dodge" else multiple
+            )
+
+        # Set autoscaling-related meta
+        sticky_stat = (0, 1) if multiple == "fill" else (0, np.inf)
+        if multiple == "fill":
+            # Filled plots should not have any margins
+            bin_vals = histograms.index.to_frame()
+            edges = bin_vals["edges"]
+            widths = bin_vals["widths"]
+            sticky_data = (
+                edges.min(),
+                edges.max() + widths.loc[edges.idxmax()]
+            )
+        else:
+            sticky_data = []
+
+        # --- Handle default visual attributes
+
+        # Note: default linewidth is determined after plotting
+
+        # Default alpha should depend on other parameters
+        if fill:
+            # Note: will need to account for other grouping semantics if added
+            if "hue" in self.variables and multiple == "layer":
+                default_alpha = .5 if element == "bars" else .25
+            elif kde:
+                default_alpha = .5
+            else:
+                default_alpha = .75
+        else:
+            default_alpha = 1
+        alpha = plot_kws.pop("alpha", default_alpha)  # TODO make parameter?
+
+        hist_artists = []
+
+        # Go back through the dataset and draw the plots
+        for sub_vars, _ in self.iter_data("hue", reverse=True):
+
+            key = tuple(sub_vars.items())
+            hist = histograms[key].rename("heights").reset_index()
+            bottom = np.asarray(baselines[key])
+
+            ax = self._get_axes(sub_vars)
+
+            # Define the matplotlib attributes that depend on semantic mapping
+            if "hue" in self.variables:
+                sub_color = self._hue_map(sub_vars["hue"])
+            else:
+                sub_color = color
+
+            artist_kws = self._artist_kws(
+                plot_kws, fill, element, multiple, sub_color, alpha
+            )
+
+            if element == "bars":
+
+                # Use matplotlib bar plotting
+
+                plot_func = ax.bar if self.data_variable == "x" else ax.barh
+                artists = plot_func(
+                    hist["edges"],
+                    hist["heights"] - bottom,
+                    hist["widths"],
+                    bottom,
+                    align="edge",
+                    **artist_kws,
+                )
+
+                for bar in artists:
+                    if self.data_variable == "x":
+                        bar.sticky_edges.x[:] = sticky_data
+                        bar.sticky_edges.y[:] = sticky_stat
+                    else:
+                        bar.sticky_edges.x[:] = sticky_stat
+                        bar.sticky_edges.y[:] = sticky_data
+
+                hist_artists.extend(artists)
+
+            else:
+
+                # Use either fill_between or plot to draw hull of histogram
+                if element == "step":
+
+                    final = hist.iloc[-1]
+                    x = np.append(hist["edges"], final["edges"] + final["widths"])
+                    y = np.append(hist["heights"], final["heights"])
+                    b = np.append(bottom, bottom[-1])
+
+                    if self.data_variable == "x":
+                        step = "post"
+                        drawstyle = "steps-post"
+                    else:
+                        step = "post"  # fillbetweenx handles mapping internally
+                        drawstyle = "steps-pre"
+
+                elif element == "poly":
+
+                    x = hist["edges"] + hist["widths"] / 2
+                    y = hist["heights"]
+                    b = bottom
+
+                    step = None
+                    drawstyle = None
+
+                if self.data_variable == "x":
+                    if fill:
+                        artist = ax.fill_between(x, b, y, step=step, **artist_kws)
+                    else:
+                        artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)
+                    artist.sticky_edges.x[:] = sticky_data
+                    artist.sticky_edges.y[:] = sticky_stat
+                else:
+                    if fill:
+                        artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)
+                    else:
+                        artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)
+                    artist.sticky_edges.x[:] = sticky_stat
+                    artist.sticky_edges.y[:] = sticky_data
+
+                hist_artists.append(artist)
+
+            if kde:
+
+                # Add in the density curves
+
+                try:
+                    density = densities[key]
+                except KeyError:
+                    continue
+                support = density.index
+
+                if "x" in self.variables:
+                    line_args = support, density
+                    sticky_x, sticky_y = None, (0, np.inf)
+                else:
+                    line_args = density, support
+                    sticky_x, sticky_y = (0, np.inf), None
+
+                line_kws["color"] = to_rgba(sub_color, 1)
+                line, = ax.plot(
+                    *line_args, **line_kws,
+                )
+
+                if sticky_x is not None:
+                    line.sticky_edges.x[:] = sticky_x
+                if sticky_y is not None:
+                    line.sticky_edges.y[:] = sticky_y
+
+        if element == "bars" and "linewidth" not in plot_kws:
+
+            # Now we handle linewidth, which depends on the scaling of the plot
+
+            # We will base everything on the minimum bin width
+            hist_metadata = pd.concat([
+                # Use .items for generality over dict or df
+                h.index.to_frame() for _, h in histograms.items()
+            ]).reset_index(drop=True)
+            thin_bar_idx = hist_metadata["widths"].idxmin()
+            binwidth = hist_metadata.loc[thin_bar_idx, "widths"]
+            left_edge = hist_metadata.loc[thin_bar_idx, "edges"]
+
+            # Set initial value
+            default_linewidth = math.inf
+
+            # Loop through subsets based only on facet variables
+            for sub_vars, _ in self.iter_data():
+
+                ax = self._get_axes(sub_vars)
+
+                # Needed in some cases to get valid transforms.
+                # Innocuous in other cases?
+                ax.autoscale_view()
+
+                # Convert binwidth from data coordinates to pixels
+                pts_x, pts_y = 72 / ax.figure.dpi * abs(
+                    ax.transData.transform([left_edge + binwidth] * 2)
+                    - ax.transData.transform([left_edge] * 2)
+                )
+                if self.data_variable == "x":
+                    binwidth_points = pts_x
+                else:
+                    binwidth_points = pts_y
+
+                # The relative size of the lines depends on the appearance
+                # This is a provisional value and may need more tweaking
+                default_linewidth = min(.1 * binwidth_points, default_linewidth)
+
+            # Set the attributes
+            for bar in hist_artists:
+
+                # Don't let the lines get too thick
+                max_linewidth = bar.get_linewidth()
+                if not fill:
+                    max_linewidth *= 1.5
+
+                linewidth = min(default_linewidth, max_linewidth)
+
+                # If not filling, don't let lines disappear
+                if not fill:
+                    min_linewidth = .5
+                    linewidth = max(linewidth, min_linewidth)
+
+                bar.set_linewidth(linewidth)
+
+        # --- Finalize the plot ----
+
+        # Axis labels
+        ax = self.ax if self.ax is not None else self.facets.axes.flat[0]
+        default_x = default_y = ""
+        if self.data_variable == "x":
+            default_y = estimator.stat.capitalize()
+        if self.data_variable == "y":
+            default_x = estimator.stat.capitalize()
+        self._add_axis_labels(ax, default_x, default_y)
+
+        # Legend for semantic variables
+        if "hue" in self.variables and legend:
+
+            if fill or element == "bars":
+                artist = partial(mpl.patches.Patch)
+            else:
+                artist = partial(mpl.lines.Line2D, [], [])
+
+            ax_obj = self.ax if self.ax is not None else self.facets
+            self._add_legend(
+                ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},
+            )
+
+    def plot_bivariate_histogram(
+        self,
+        common_bins, common_norm,
+        thresh, pthresh, pmax,
+        color, legend,
+        cbar, cbar_ax, cbar_kws,
+        estimate_kws,
+        **plot_kws,
+    ):
+
+        # Default keyword dicts
+        cbar_kws = {} if cbar_kws is None else cbar_kws.copy()
+
+        # Now initialize the Histogram estimator
+        estimator = Histogram(**estimate_kws)
+
+        # Do pre-compute housekeeping related to multiple groups
+        if set(self.variables) - {"x", "y"}:
+            all_data = self.comp_data.dropna()
+            if common_bins:
+                estimator.define_bin_params(
+                    all_data["x"],
+                    all_data["y"],
+                    all_data.get("weights", None),
+                )
+        else:
+            common_norm = False
+
+        # -- Determine colormap threshold and norm based on the full data
+
+        full_heights = []
+        for _, sub_data in self.iter_data(from_comp_data=True):
+            sub_heights, _ = estimator(
+                sub_data["x"], sub_data["y"], sub_data.get("weights", None)
+            )
+            full_heights.append(sub_heights)
+
+        common_color_norm = not set(self.variables) - {"x", "y"} or common_norm
+
+        if pthresh is not None and common_color_norm:
+            thresh = self._quantile_to_level(full_heights, pthresh)
+
+        plot_kws.setdefault("vmin", 0)
+        if common_color_norm:
+            if pmax is not None:
+                vmax = self._quantile_to_level(full_heights, pmax)
+            else:
+                vmax = plot_kws.pop("vmax", max(map(np.max, full_heights)))
+        else:
+            vmax = None
+
+        # Get a default color
+        # (We won't follow the color cycle here, as multiple plots are unlikely)
+        if color is None:
+            color = "C0"
+
+        # --- Loop over data (subsets) and draw the histograms
+        for sub_vars, sub_data in self.iter_data("hue", from_comp_data=True):
+
+            if sub_data.empty:
+                continue
+
+            # Do the histogram computation
+            heights, (x_edges, y_edges) = estimator(
+                sub_data["x"],
+                sub_data["y"],
+                weights=sub_data.get("weights", None),
+            )
+
+            # Get the axes for this plot
+            ax = self._get_axes(sub_vars)
+
+            # Invert the scale for the edges
+            _, inv_x = _get_transform_functions(ax, "x")
+            _, inv_y = _get_transform_functions(ax, "y")
+            x_edges = inv_x(x_edges)
+            y_edges = inv_y(y_edges)
+
+            # Apply scaling to normalize across groups
+            if estimator.stat != "count" and common_norm:
+                heights *= len(sub_data) / len(all_data)
+
+            # Define the specific kwargs for this artist
+            artist_kws = plot_kws.copy()
+            if "hue" in self.variables:
+                color = self._hue_map(sub_vars["hue"])
+                cmap = self._cmap_from_color(color)
+                artist_kws["cmap"] = cmap
+            else:
+                cmap = artist_kws.pop("cmap", None)
+                if isinstance(cmap, str):
+                    cmap = color_palette(cmap, as_cmap=True)
+                elif cmap is None:
+                    cmap = self._cmap_from_color(color)
+                artist_kws["cmap"] = cmap
+
+            # Set the upper norm on the colormap
+            if not common_color_norm and pmax is not None:
+                vmax = self._quantile_to_level(heights, pmax)
+            if vmax is not None:
+                artist_kws["vmax"] = vmax
+
+            # Make cells at or below the threshold transparent
+            if not common_color_norm and pthresh:
+                thresh = self._quantile_to_level(heights, pthresh)
+            if thresh is not None:
+                heights = np.ma.masked_less_equal(heights, thresh)
+
+            # pcolormesh is going to turn the grid off, but we want to keep it
+            # I'm not sure if there's a better way to get the grid state
+            x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])
+            y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])
+
+            mesh = ax.pcolormesh(
+                x_edges,
+                y_edges,
+                heights.T,
+                **artist_kws,
+            )
+
+            # pcolormesh sets sticky edges, but we only want them if not thresholding
+            if thresh is not None:
+                mesh.sticky_edges.x[:] = []
+                mesh.sticky_edges.y[:] = []
+
+            # Add an optional colorbar
+            # Note, we want to improve this. When hue is used, it will stack
+            # multiple colorbars with redundant ticks in an ugly way.
+            # But it's going to take some work to have multiple colorbars that
+            # share ticks nicely.
+            if cbar:
+                ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)
+
+            # Reset the grid state
+            if x_grid:
+                ax.grid(True, axis="x")
+            if y_grid:
+                ax.grid(True, axis="y")
+
+        # --- Finalize the plot
+
+        ax = self.ax if self.ax is not None else self.facets.axes.flat[0]
+        self._add_axis_labels(ax)
+
+        if "hue" in self.variables and legend:
+
+            # TODO if possible, I would like to move the contour
+            # intensity information into the legend too and label the
+            # iso proportions rather than the raw density values
+
+            artist_kws = {}
+            artist = partial(mpl.patches.Patch)
+            ax_obj = self.ax if self.ax is not None else self.facets
+            self._add_legend(
+                ax_obj, artist, True, False, "layer", 1, artist_kws, {},
+            )
+
+    def plot_univariate_density(
+        self,
+        multiple,
+        common_norm,
+        common_grid,
+        warn_singular,
+        fill,
+        color,
+        legend,
+        estimate_kws,
+        **plot_kws,
+    ):
+
+        # Handle conditional defaults
+        if fill is None:
+            fill = multiple in ("stack", "fill")
+
+        # Preprocess the matplotlib keyword dictionaries
+        if fill:
+            artist = mpl.collections.PolyCollection
+        else:
+            artist = mpl.lines.Line2D
+        plot_kws = normalize_kwargs(plot_kws, artist)
+
+        # Input checking
+        _check_argument("multiple", ["layer", "stack", "fill"], multiple)
+
+        # Always share the evaluation grid when stacking
+        subsets = bool(set(self.variables) - {"x", "y"})
+        if subsets and multiple in ("stack", "fill"):
+            common_grid = True
+
+        # Do the computation
+        densities = self._compute_univariate_density(
+            self.data_variable,
+            common_norm,
+            common_grid,
+            estimate_kws,
+            warn_singular,
+        )
+
+        # Adjust densities based on the `multiple` rule
+        densities, baselines = self._resolve_multiple(densities, multiple)
+
+        # Control the interaction with autoscaling by defining sticky_edges
+        # i.e. we don't want autoscale margins below the density curve
+        sticky_density = (0, 1) if multiple == "fill" else (0, np.inf)
+
+        if multiple == "fill":
+            # Filled plots should not have any margins
+            sticky_support = densities.index.min(), densities.index.max()
+        else:
+            sticky_support = []
+
+        if fill:
+            if multiple == "layer":
+                default_alpha = .25
+            else:
+                default_alpha = .75
+        else:
+            default_alpha = 1
+        alpha = plot_kws.pop("alpha", default_alpha)  # TODO make parameter?
+
+        # Now iterate through the subsets and draw the densities
+        # We go backwards so stacked densities read from top-to-bottom
+        for sub_vars, _ in self.iter_data("hue", reverse=True):
+
+            # Extract the support grid and density curve for this level
+            key = tuple(sub_vars.items())
+            try:
+                density = densities[key]
+            except KeyError:
+                continue
+            support = density.index
+            fill_from = baselines[key]
+
+            ax = self._get_axes(sub_vars)
+
+            if "hue" in self.variables:
+                sub_color = self._hue_map(sub_vars["hue"])
+            else:
+                sub_color = color
+
+            artist_kws = self._artist_kws(
+                plot_kws, fill, False, multiple, sub_color, alpha
+            )
+
+            # Either plot a curve with observation values on the x axis
+            if "x" in self.variables:
+
+                if fill:
+                    artist = ax.fill_between(support, fill_from, density, **artist_kws)
+
+                else:
+                    artist, = ax.plot(support, density, **artist_kws)
+
+                artist.sticky_edges.x[:] = sticky_support
+                artist.sticky_edges.y[:] = sticky_density
+
+            # Or plot a curve with observation values on the y axis
+            else:
+                if fill:
+                    artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)
+                else:
+                    artist, = ax.plot(density, support, **artist_kws)
+
+                artist.sticky_edges.x[:] = sticky_density
+                artist.sticky_edges.y[:] = sticky_support
+
+        # --- Finalize the plot ----
+
+        ax = self.ax if self.ax is not None else self.facets.axes.flat[0]
+        default_x = default_y = ""
+        if self.data_variable == "x":
+            default_y = "Density"
+        if self.data_variable == "y":
+            default_x = "Density"
+        self._add_axis_labels(ax, default_x, default_y)
+
+        if "hue" in self.variables and legend:
+
+            if fill:
+                artist = partial(mpl.patches.Patch)
+            else:
+                artist = partial(mpl.lines.Line2D, [], [])
+
+            ax_obj = self.ax if self.ax is not None else self.facets
+            self._add_legend(
+                ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},
+            )
+
+    def plot_bivariate_density(
+        self,
+        common_norm,
+        fill,
+        levels,
+        thresh,
+        color,
+        legend,
+        cbar,
+        warn_singular,
+        cbar_ax,
+        cbar_kws,
+        estimate_kws,
+        **contour_kws,
+    ):
+
+        contour_kws = contour_kws.copy()
+
+        estimator = KDE(**estimate_kws)
+
+        if not set(self.variables) - {"x", "y"}:
+            common_norm = False
+
+        all_data = self.plot_data.dropna()
+
+        # Loop through the subsets and estimate the KDEs
+        densities, supports = {}, {}
+
+        for sub_vars, sub_data in self.iter_data("hue", from_comp_data=True):
+
+            # Extract the data points from this sub set
+            observations = sub_data[["x", "y"]]
+            min_variance = observations.var().fillna(0).min()
+            observations = observations["x"], observations["y"]
+
+            # Extract the weights for this subset of observations
+            if "weights" in self.variables:
+                weights = sub_data["weights"]
+            else:
+                weights = None
+
+            # Estimate the density of observations at this level
+            singular = math.isclose(min_variance, 0)
+            try:
+                if not singular:
+                    density, support = estimator(*observations, weights=weights)
+            except np.linalg.LinAlgError:
+                # Testing for 0 variance doesn't catch all cases where scipy raises,
+                # but we can also get a ValueError, so we need this convoluted approach
+                singular = True
+
+            if singular:
+                msg = (
+                    "KDE cannot be estimated (0 variance or perfect covariance). "
+                    "Pass `warn_singular=False` to disable this warning."
+                )
+                if warn_singular:
+                    warnings.warn(msg, UserWarning, stacklevel=3)
+                continue
+
+            # Transform the support grid back to the original scale
+            ax = self._get_axes(sub_vars)
+            _, inv_x = _get_transform_functions(ax, "x")
+            _, inv_y = _get_transform_functions(ax, "y")
+            support = inv_x(support[0]), inv_y(support[1])
+
+            # Apply a scaling factor so that the integral over all subsets is 1
+            if common_norm:
+                density *= len(sub_data) / len(all_data)
+
+            key = tuple(sub_vars.items())
+            densities[key] = density
+            supports[key] = support
+
+        # Define a grid of iso-proportion levels
+        if thresh is None:
+            thresh = 0
+        if isinstance(levels, Number):
+            levels = np.linspace(thresh, 1, levels)
+        else:
+            if min(levels) < 0 or max(levels) > 1:
+                raise ValueError("levels must be in [0, 1]")
+
+        # Transform from iso-proportions to iso-densities
+        if common_norm:
+            common_levels = self._quantile_to_level(
+                list(densities.values()), levels,
+            )
+            draw_levels = {k: common_levels for k in densities}
+        else:
+            draw_levels = {
+                k: self._quantile_to_level(d, levels)
+                for k, d in densities.items()
+            }
+
+        # Define the coloring of the contours
+        if "hue" in self.variables:
+            for param in ["cmap", "colors"]:
+                if param in contour_kws:
+                    msg = f"{param} parameter ignored when using hue mapping."
+                    warnings.warn(msg, UserWarning)
+                    contour_kws.pop(param)
+        else:
+
+            # Work out a default coloring of the contours
+            coloring_given = set(contour_kws) & {"cmap", "colors"}
+            if fill and not coloring_given:
+                cmap = self._cmap_from_color(color)
+                contour_kws["cmap"] = cmap
+            if not fill and not coloring_given:
+                contour_kws["colors"] = [color]
+
+            # Use our internal colormap lookup
+            cmap = contour_kws.pop("cmap", None)
+            if isinstance(cmap, str):
+                cmap = color_palette(cmap, as_cmap=True)
+            if cmap is not None:
+                contour_kws["cmap"] = cmap
+
+        # Loop through the subsets again and plot the data
+        for sub_vars, _ in self.iter_data("hue"):
+
+            if "hue" in sub_vars:
+                color = self._hue_map(sub_vars["hue"])
+                if fill:
+                    contour_kws["cmap"] = self._cmap_from_color(color)
+                else:
+                    contour_kws["colors"] = [color]
+
+            ax = self._get_axes(sub_vars)
+
+            # Choose the function to plot with
+            # TODO could add a pcolormesh based option as well
+            # Which would look something like element="raster"
+            if fill:
+                contour_func = ax.contourf
+            else:
+                contour_func = ax.contour
+
+            key = tuple(sub_vars.items())
+            if key not in densities:
+                continue
+            density = densities[key]
+            xx, yy = supports[key]
+
+            # Pop the label kwarg which is unused by contour_func (but warns)
+            contour_kws.pop("label", None)
+
+            cset = contour_func(
+                xx, yy, density,
+                levels=draw_levels[key],
+                **contour_kws,
+            )
+
+            # Add a color bar representing the contour heights
+            # Note: this shows iso densities, not iso proportions
+            # See more notes in histplot about how this could be improved
+            if cbar:
+                cbar_kws = {} if cbar_kws is None else cbar_kws
+                ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)
+
+        # --- Finalize the plot
+        ax = self.ax if self.ax is not None else self.facets.axes.flat[0]
+        self._add_axis_labels(ax)
+
+        if "hue" in self.variables and legend:
+
+            # TODO if possible, I would like to move the contour
+            # intensity information into the legend too and label the
+            # iso proportions rather than the raw density values
+
+            artist_kws = {}
+            if fill:
+                artist = partial(mpl.patches.Patch)
+            else:
+                artist = partial(mpl.lines.Line2D, [], [])
+
+            ax_obj = self.ax if self.ax is not None else self.facets
+            self._add_legend(
+                ax_obj, artist, fill, False, "layer", 1, artist_kws, {},
+            )
+
+    def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):
+
+        estimator = ECDF(**estimate_kws)
+
+        # Set the draw style to step the right way for the data variable
+        drawstyles = dict(x="steps-post", y="steps-pre")
+        plot_kws["drawstyle"] = drawstyles[self.data_variable]
+
+        # Loop through the subsets, transform and plot the data
+        for sub_vars, sub_data in self.iter_data(
+            "hue", reverse=True, from_comp_data=True,
+        ):
+
+            # Compute the ECDF
+            if sub_data.empty:
+                continue
+
+            observations = sub_data[self.data_variable]
+            weights = sub_data.get("weights", None)
+            stat, vals = estimator(observations, weights=weights)
+
+            # Assign attributes based on semantic mapping
+            artist_kws = plot_kws.copy()
+            if "hue" in self.variables:
+                artist_kws["color"] = self._hue_map(sub_vars["hue"])
+
+            # Return the data variable to the linear domain
+            ax = self._get_axes(sub_vars)
+            _, inv = _get_transform_functions(ax, self.data_variable)
+            vals = inv(vals)
+
+            # Manually set the minimum value on a "log" scale
+            if isinstance(inv.__self__, mpl.scale.LogTransform):
+                vals[0] = -np.inf
+
+            # Work out the orientation of the plot
+            if self.data_variable == "x":
+                plot_args = vals, stat
+                stat_variable = "y"
+            else:
+                plot_args = stat, vals
+                stat_variable = "x"
+
+            if estimator.stat == "count":
+                top_edge = len(observations)
+            else:
+                top_edge = 1
+
+            # Draw the line for this subset
+            artist, = ax.plot(*plot_args, **artist_kws)
+            sticky_edges = getattr(artist.sticky_edges, stat_variable)
+            sticky_edges[:] = 0, top_edge
+
+        # --- Finalize the plot ----
+        ax = self.ax if self.ax is not None else self.facets.axes.flat[0]
+        stat = estimator.stat.capitalize()
+        default_x = default_y = ""
+        if self.data_variable == "x":
+            default_y = stat
+        if self.data_variable == "y":
+            default_x = stat
+        self._add_axis_labels(ax, default_x, default_y)
+
+        if "hue" in self.variables and legend:
+            artist = partial(mpl.lines.Line2D, [], [])
+            alpha = plot_kws.get("alpha", 1)
+            ax_obj = self.ax if self.ax is not None else self.facets
+            self._add_legend(
+                ax_obj, artist, False, False, None, alpha, plot_kws, {},
+            )
+
+    def plot_rug(self, height, expand_margins, legend, **kws):
+
+        for sub_vars, sub_data, in self.iter_data(from_comp_data=True):
+
+            ax = self._get_axes(sub_vars)
+
+            kws.setdefault("linewidth", 1)
+
+            if expand_margins:
+                xmarg, ymarg = ax.margins()
+                if "x" in self.variables:
+                    ymarg += height * 2
+                if "y" in self.variables:
+                    xmarg += height * 2
+                ax.margins(x=xmarg, y=ymarg)
+
+            if "hue" in self.variables:
+                kws.pop("c", None)
+                kws.pop("color", None)
+
+            if "x" in self.variables:
+                self._plot_single_rug(sub_data, "x", height, ax, kws)
+            if "y" in self.variables:
+                self._plot_single_rug(sub_data, "y", height, ax, kws)
+
+            # --- Finalize the plot
+            self._add_axis_labels(ax)
+            if "hue" in self.variables and legend:
+                # TODO ideally i'd like the legend artist to look like a rug
+                legend_artist = partial(mpl.lines.Line2D, [], [])
+                self._add_legend(
+                    ax, legend_artist, False, False, None, 1, {}, {},
+                )

     def _plot_single_rug(self, sub_data, var, height, ax, kws):
         """Draw a rugplot along one axis of the plot."""
-        pass
+        vector = sub_data[var]
+        n = len(vector)
+
+        # Return data to linear domain
+        _, inv = _get_transform_functions(ax, var)
+        vector = inv(vector)
+
+        # We'll always add a single collection with varying colors
+        if "hue" in self.variables:
+            colors = self._hue_map(sub_data["hue"])
+        else:
+            colors = None
+
+        # Build the array of values for the LineCollection
+        if var == "x":
+
+            trans = tx.blended_transform_factory(ax.transData, ax.transAxes)
+            xy_pairs = np.column_stack([
+                np.repeat(vector, 2), np.tile([0, height], n)
+            ])
+
+        if var == "y":
+
+            trans = tx.blended_transform_factory(ax.transAxes, ax.transData)
+            xy_pairs = np.column_stack([
+                np.tile([0, height], n), np.repeat(vector, 2)
+            ])
+
+        # Draw the lines on the plot
+        line_segs = xy_pairs.reshape([n, 2, 2])
+        ax.add_collection(LineCollection(
+            line_segs, transform=trans, colors=colors, **kws
+        ))
+
+        ax.autoscale_view(scalex=var == "x", scaley=var == "y")
+
+
+# ==================================================================================== #
+# External API
+# ==================================================================================== #
+
+def histplot(
+    data=None, *,
+    # Vector variables
+    x=None, y=None, hue=None, weights=None,
+    # Histogram computation parameters
+    stat="count", bins="auto", binwidth=None, binrange=None,
+    discrete=None, cumulative=False, common_bins=True, common_norm=True,
+    # Histogram appearance parameters
+    multiple="layer", element="bars", fill=True, shrink=1,
+    # Histogram smoothing with a kernel density estimate
+    kde=False, kde_kws=None, line_kws=None,
+    # Bivariate histogram parameters
+    thresh=0, pthresh=None, pmax=None, cbar=False, cbar_ax=None, cbar_kws=None,
+    # Hue mapping parameters
+    palette=None, hue_order=None, hue_norm=None, color=None,
+    # Axes information
+    log_scale=None, legend=True, ax=None,
+    # Other appearance keywords
+    **kwargs,
+):
+
+    p = _DistributionPlotter(
+        data=data,
+        variables=dict(x=x, y=y, hue=hue, weights=weights),
+    )

+    p.map_hue(palette=palette, order=hue_order, norm=hue_norm)

-histplot.__doc__ = (
-    """Plot univariate or bivariate histograms to show distributions of datasets.
+    if ax is None:
+        ax = plt.gca()
+
+    p._attach(ax, log_scale=log_scale)
+
+    if p.univariate:  # Note, bivariate plots won't cycle
+        if fill:
+            method = ax.bar if element == "bars" else ax.fill_between
+        else:
+            method = ax.plot
+        color = _default_color(method, hue, color, kwargs)
+
+    if not p.has_xy_data:
+        return ax
+
+    # Default to discrete bins for categorical variables
+    if discrete is None:
+        discrete = p._default_discrete()
+
+    estimate_kws = dict(
+        stat=stat,
+        bins=bins,
+        binwidth=binwidth,
+        binrange=binrange,
+        discrete=discrete,
+        cumulative=cumulative,
+    )
+
+    if p.univariate:
+
+        p.plot_univariate_histogram(
+            multiple=multiple,
+            element=element,
+            fill=fill,
+            shrink=shrink,
+            common_norm=common_norm,
+            common_bins=common_bins,
+            kde=kde,
+            kde_kws=kde_kws,
+            color=color,
+            legend=legend,
+            estimate_kws=estimate_kws,
+            line_kws=line_kws,
+            **kwargs,
+        )
+
+    else:
+
+        p.plot_bivariate_histogram(
+            common_bins=common_bins,
+            common_norm=common_norm,
+            thresh=thresh,
+            pthresh=pthresh,
+            pmax=pmax,
+            color=color,
+            legend=legend,
+            cbar=cbar,
+            cbar_ax=cbar_ax,
+            cbar_kws=cbar_kws,
+            estimate_kws=estimate_kws,
+            **kwargs,
+        )
+
+    return ax
+
+
+histplot.__doc__ = """\
+Plot univariate or bivariate histograms to show distributions of datasets.

 A histogram is a classic visualization tool that represents the distribution
 of one or more variables by counting the number of observations that fall within
@@ -237,11 +1571,167 @@ Examples

 .. include:: ../docstrings/histplot.rst

-"""
-    .format(params=_param_docs, returns=_core_docs['returns'], seealso=
-    _core_docs['seealso']))
-kdeplot.__doc__ = (
-    """Plot univariate or bivariate distributions using kernel density estimation.
+""".format(
+    params=_param_docs,
+    returns=_core_docs["returns"],
+    seealso=_core_docs["seealso"],
+)
+
+
+def kdeplot(
+    data=None, *, x=None, y=None, hue=None, weights=None,
+    palette=None, hue_order=None, hue_norm=None, color=None, fill=None,
+    multiple="layer", common_norm=True, common_grid=False, cumulative=False,
+    bw_method="scott", bw_adjust=1, warn_singular=True, log_scale=None,
+    levels=10, thresh=.05, gridsize=200, cut=3, clip=None,
+    legend=True, cbar=False, cbar_ax=None, cbar_kws=None, ax=None,
+    **kwargs,
+):
+
+    # --- Start with backwards compatability for versions < 0.11.0 ----------------
+
+    # Handle (past) deprecation of `data2`
+    if "data2" in kwargs:
+        msg = "`data2` has been removed (replaced by `y`); please update your code."
+        raise TypeError(msg)
+
+    # Handle deprecation of `vertical`
+    vertical = kwargs.pop("vertical", None)
+    if vertical is not None:
+        if vertical:
+            action_taken = "assigning data to `y`."
+            if x is None:
+                data, y = y, data
+            else:
+                x, y = y, x
+        else:
+            action_taken = "assigning data to `x`."
+        msg = textwrap.dedent(f"""\n
+        The `vertical` parameter is deprecated; {action_taken}
+        This will become an error in seaborn v0.14.0; please update your code.
+        """)
+        warnings.warn(msg, UserWarning, stacklevel=2)
+
+    # Handle deprecation of `bw`
+    bw = kwargs.pop("bw", None)
+    if bw is not None:
+        msg = textwrap.dedent(f"""\n
+        The `bw` parameter is deprecated in favor of `bw_method` and `bw_adjust`.
+        Setting `bw_method={bw}`, but please see the docs for the new parameters
+        and update your code. This will become an error in seaborn v0.14.0.
+        """)
+        warnings.warn(msg, UserWarning, stacklevel=2)
+        bw_method = bw
+
+    # Handle deprecation of `kernel`
+    if kwargs.pop("kernel", None) is not None:
+        msg = textwrap.dedent("""\n
+        Support for alternate kernels has been removed; using Gaussian kernel.
+        This will become an error in seaborn v0.14.0; please update your code.
+        """)
+        warnings.warn(msg, UserWarning, stacklevel=2)
+
+    # Handle deprecation of shade_lowest
+    shade_lowest = kwargs.pop("shade_lowest", None)
+    if shade_lowest is not None:
+        if shade_lowest:
+            thresh = 0
+        msg = textwrap.dedent(f"""\n
+        `shade_lowest` has been replaced by `thresh`; setting `thresh={thresh}.
+        This will become an error in seaborn v0.14.0; please update your code.
+        """)
+        warnings.warn(msg, UserWarning, stacklevel=2)
+
+    # Handle "soft" deprecation of shade `shade` is not really the right
+    # terminology here, but unlike some of the other deprecated parameters it
+    # is probably very commonly used and much hard to remove. This is therefore
+    # going to be a longer process where, first, `fill` will be introduced and
+    # be used throughout the documentation. In 0.12, when kwarg-only
+    # enforcement hits, we can remove the shade/shade_lowest out of the
+    # function signature all together and pull them out of the kwargs. Then we
+    # can actually fire a FutureWarning, and eventually remove.
+    shade = kwargs.pop("shade", None)
+    if shade is not None:
+        fill = shade
+        msg = textwrap.dedent(f"""\n
+        `shade` is now deprecated in favor of `fill`; setting `fill={shade}`.
+        This will become an error in seaborn v0.14.0; please update your code.
+        """)
+        warnings.warn(msg, FutureWarning, stacklevel=2)
+
+    # Handle `n_levels`
+    # This was never in the formal API but it was processed, and appeared in an
+    # example. We can treat as an alias for `levels` now and deprecate later.
+    levels = kwargs.pop("n_levels", levels)
+
+    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
+
+    p = _DistributionPlotter(
+        data=data,
+        variables=dict(x=x, y=y, hue=hue, weights=weights),
+    )
+
+    p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
+
+    if ax is None:
+        ax = plt.gca()
+
+    p._attach(ax, allowed_types=["numeric", "datetime"], log_scale=log_scale)
+
+    method = ax.fill_between if fill else ax.plot
+    color = _default_color(method, hue, color, kwargs)
+
+    if not p.has_xy_data:
+        return ax
+
+    # Pack the kwargs for statistics.KDE
+    estimate_kws = dict(
+        bw_method=bw_method,
+        bw_adjust=bw_adjust,
+        gridsize=gridsize,
+        cut=cut,
+        clip=clip,
+        cumulative=cumulative,
+    )
+
+    if p.univariate:
+
+        plot_kws = kwargs.copy()
+
+        p.plot_univariate_density(
+            multiple=multiple,
+            common_norm=common_norm,
+            common_grid=common_grid,
+            fill=fill,
+            color=color,
+            legend=legend,
+            warn_singular=warn_singular,
+            estimate_kws=estimate_kws,
+            **plot_kws,
+        )
+
+    else:
+
+        p.plot_bivariate_density(
+            common_norm=common_norm,
+            fill=fill,
+            levels=levels,
+            thresh=thresh,
+            legend=legend,
+            color=color,
+            warn_singular=warn_singular,
+            cbar=cbar,
+            cbar_ax=cbar_ax,
+            cbar_kws=cbar_kws,
+            estimate_kws=estimate_kws,
+            **kwargs,
+        )
+
+    return ax
+
+
+kdeplot.__doc__ = """\
+Plot univariate or bivariate distributions using kernel density estimation.

 A kernel density estimate (KDE) plot is a method for visualizing the
 distribution of observations in a dataset, analogous to a histogram. KDE
@@ -361,11 +1851,71 @@ Examples

 .. include:: ../docstrings/kdeplot.rst

-"""
-    .format(params=_param_docs, returns=_core_docs['returns'], seealso=
-    _core_docs['seealso']))
-ecdfplot.__doc__ = (
-    """Plot empirical cumulative distribution functions.
+""".format(
+    params=_param_docs,
+    returns=_core_docs["returns"],
+    seealso=_core_docs["seealso"],
+)
+
+
+def ecdfplot(
+    data=None, *,
+    # Vector variables
+    x=None, y=None, hue=None, weights=None,
+    # Computation parameters
+    stat="proportion", complementary=False,
+    # Hue mapping parameters
+    palette=None, hue_order=None, hue_norm=None,
+    # Axes information
+    log_scale=None, legend=True, ax=None,
+    # Other appearance keywords
+    **kwargs,
+):
+
+    p = _DistributionPlotter(
+        data=data,
+        variables=dict(x=x, y=y, hue=hue, weights=weights),
+    )
+
+    p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
+
+    # We could support other semantics (size, style) here fairly easily
+    # But it would make distplot a bit more complicated.
+    # It's always possible to add features like that later, so I am going to defer.
+    # It will be even easier to wait until after there is a more general/abstract
+    # way to go from semantic specs to artist attributes.
+
+    if ax is None:
+        ax = plt.gca()
+
+    p._attach(ax, log_scale=log_scale)
+
+    color = kwargs.pop("color", kwargs.pop("c", None))
+    kwargs["color"] = _default_color(ax.plot, hue, color, kwargs)
+
+    if not p.has_xy_data:
+        return ax
+
+    # We could add this one day, but it's of dubious value
+    if not p.univariate:
+        raise NotImplementedError("Bivariate ECDF plots are not implemented")
+
+    estimate_kws = dict(
+        stat=stat,
+        complementary=complementary,
+    )
+
+    p.plot_univariate_ecdf(
+        estimate_kws=estimate_kws,
+        legend=legend,
+        **kwargs,
+    )
+
+    return ax
+
+
+ecdfplot.__doc__ = """\
+Plot empirical cumulative distribution functions.

 An ECDF represents the proportion or count of observations falling below each
 unique value in a dataset. Compared to a histogram or density plot, it has the
@@ -413,11 +1963,95 @@ Examples

 .. include:: ../docstrings/ecdfplot.rst

-"""
-    .format(params=_param_docs, returns=_core_docs['returns'], seealso=
-    _core_docs['seealso']))
-rugplot.__doc__ = (
-    """Plot marginal distributions by drawing ticks along the x and y axes.
+""".format(
+    params=_param_docs,
+    returns=_core_docs["returns"],
+    seealso=_core_docs["seealso"],
+)
+
+
+def rugplot(
+    data=None, *, x=None, y=None, hue=None, height=.025, expand_margins=True,
+    palette=None, hue_order=None, hue_norm=None, legend=True, ax=None, **kwargs
+):
+
+    # A note: I think it would make sense to add multiple= to rugplot and allow
+    # rugs for different hue variables to be shifted orthogonal to the data axis
+    # But is this stacking, or dodging?
+
+    # A note: if we want to add a style semantic to rugplot,
+    # we could make an option that draws the rug using scatterplot
+
+    # A note, it would also be nice to offer some kind of histogram/density
+    # rugplot, since alpha blending doesn't work great in the large n regime
+
+    # --- Start with backwards compatability for versions < 0.11.0 ----------------
+
+    a = kwargs.pop("a", None)
+    axis = kwargs.pop("axis", None)
+
+    if a is not None:
+        data = a
+        msg = textwrap.dedent("""\n
+        The `a` parameter has been replaced; use `x`, `y`, and/or `data` instead.
+        Please update your code; This will become an error in seaborn v0.14.0.
+        """)
+        warnings.warn(msg, UserWarning, stacklevel=2)
+
+    if axis is not None:
+        if axis == "x":
+            x = data
+        elif axis == "y":
+            y = data
+        data = None
+        msg = textwrap.dedent(f"""\n
+        The `axis` parameter has been deprecated; use the `{axis}` parameter instead.
+        Please update your code; this will become an error in seaborn v0.14.0.
+        """)
+        warnings.warn(msg, UserWarning, stacklevel=2)
+
+    vertical = kwargs.pop("vertical", None)
+    if vertical is not None:
+        if vertical:
+            action_taken = "assigning data to `y`."
+            if x is None:
+                data, y = y, data
+            else:
+                x, y = y, x
+        else:
+            action_taken = "assigning data to `x`."
+        msg = textwrap.dedent(f"""\n
+        The `vertical` parameter is deprecated; {action_taken}
+        This will become an error in seaborn v0.14.0; please update your code.
+        """)
+        warnings.warn(msg, UserWarning, stacklevel=2)
+
+    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
+
+    p = _DistributionPlotter(
+        data=data,
+        variables=dict(x=x, y=y, hue=hue),
+    )
+    p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
+
+    if ax is None:
+        ax = plt.gca()
+
+    p._attach(ax)
+
+    color = kwargs.pop("color", kwargs.pop("c", None))
+    kwargs["color"] = _default_color(ax.plot, hue, color, kwargs)
+
+    if not p.has_xy_data:
+        return ax
+
+    p.plot_rug(height, expand_margins, legend, **kwargs)
+
+    return ax
+
+
+rugplot.__doc__ = """\
+Plot marginal distributions by drawing ticks along the x and y axes.

 This function is intended to complement other plots by showing the location
 of individual observations in an unobtrusive way.
@@ -451,10 +2085,206 @@ Examples

 .. include:: ../docstrings/rugplot.rst

-"""
-    .format(params=_param_docs, returns=_core_docs['returns']))
-displot.__doc__ = (
-    """Figure-level interface for drawing distribution plots onto a FacetGrid.
+""".format(
+    params=_param_docs,
+    returns=_core_docs["returns"],
+)
+
+
+def displot(
+    data=None, *,
+    # Vector variables
+    x=None, y=None, hue=None, row=None, col=None, weights=None,
+    # Other plot parameters
+    kind="hist", rug=False, rug_kws=None, log_scale=None, legend=True,
+    # Hue-mapping parameters
+    palette=None, hue_order=None, hue_norm=None, color=None,
+    # Faceting parameters
+    col_wrap=None, row_order=None, col_order=None,
+    height=5, aspect=1, facet_kws=None,
+    **kwargs,
+):
+
+    p = _DistributionPlotter(
+        data=data,
+        variables=dict(x=x, y=y, hue=hue, weights=weights, row=row, col=col),
+    )
+
+    p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
+
+    _check_argument("kind", ["hist", "kde", "ecdf"], kind)
+
+    # --- Initialize the FacetGrid object
+
+    # Check for attempt to plot onto specific axes and warn
+    if "ax" in kwargs:
+        msg = (
+            "`displot` is a figure-level function and does not accept "
+            "the ax= parameter. You may wish to try {}plot.".format(kind)
+        )
+        warnings.warn(msg, UserWarning)
+        kwargs.pop("ax")
+
+    for var in ["row", "col"]:
+        # Handle faceting variables that lack name information
+        if var in p.variables and p.variables[var] is None:
+            p.variables[var] = f"_{var}_"
+
+    # Adapt the plot_data dataframe for use with FacetGrid
+    grid_data = p.plot_data.rename(columns=p.variables)
+    grid_data = grid_data.loc[:, ~grid_data.columns.duplicated()]
+
+    col_name = p.variables.get("col")
+    row_name = p.variables.get("row")
+
+    if facet_kws is None:
+        facet_kws = {}
+
+    g = FacetGrid(
+        data=grid_data, row=row_name, col=col_name,
+        col_wrap=col_wrap, row_order=row_order,
+        col_order=col_order, height=height,
+        aspect=aspect,
+        **facet_kws,
+    )
+
+    # Now attach the axes object to the plotter object
+    if kind == "kde":
+        allowed_types = ["numeric", "datetime"]
+    else:
+        allowed_types = None
+    p._attach(g, allowed_types=allowed_types, log_scale=log_scale)
+
+    # Check for a specification that lacks x/y data and return early
+    if not p.has_xy_data:
+        return g
+
+    if color is None and hue is None:
+        color = "C0"
+    # XXX else warn if hue is not None?
+
+    kwargs["legend"] = legend
+
+    # --- Draw the plots
+
+    if kind == "hist":
+
+        hist_kws = kwargs.copy()
+
+        # Extract the parameters that will go directly to Histogram
+        estimate_defaults = {}
+        _assign_default_kwargs(estimate_defaults, Histogram.__init__, histplot)
+
+        estimate_kws = {}
+        for key, default_val in estimate_defaults.items():
+            estimate_kws[key] = hist_kws.pop(key, default_val)
+
+        # Handle derivative defaults
+        if estimate_kws["discrete"] is None:
+            estimate_kws["discrete"] = p._default_discrete()
+
+        hist_kws["estimate_kws"] = estimate_kws
+
+        hist_kws.setdefault("color", color)
+
+        if p.univariate:
+
+            _assign_default_kwargs(hist_kws, p.plot_univariate_histogram, histplot)
+            p.plot_univariate_histogram(**hist_kws)
+
+        else:
+
+            _assign_default_kwargs(hist_kws, p.plot_bivariate_histogram, histplot)
+            p.plot_bivariate_histogram(**hist_kws)
+
+    elif kind == "kde":
+
+        kde_kws = kwargs.copy()
+
+        # Extract the parameters that will go directly to KDE
+        estimate_defaults = {}
+        _assign_default_kwargs(estimate_defaults, KDE.__init__, kdeplot)
+
+        estimate_kws = {}
+        for key, default_val in estimate_defaults.items():
+            estimate_kws[key] = kde_kws.pop(key, default_val)
+
+        kde_kws["estimate_kws"] = estimate_kws
+        kde_kws["color"] = color
+
+        if p.univariate:
+
+            _assign_default_kwargs(kde_kws, p.plot_univariate_density, kdeplot)
+            p.plot_univariate_density(**kde_kws)
+
+        else:
+
+            _assign_default_kwargs(kde_kws, p.plot_bivariate_density, kdeplot)
+            p.plot_bivariate_density(**kde_kws)
+
+    elif kind == "ecdf":
+
+        ecdf_kws = kwargs.copy()
+
+        # Extract the parameters that will go directly to the estimator
+        estimate_kws = {}
+        estimate_defaults = {}
+        _assign_default_kwargs(estimate_defaults, ECDF.__init__, ecdfplot)
+        for key, default_val in estimate_defaults.items():
+            estimate_kws[key] = ecdf_kws.pop(key, default_val)
+
+        ecdf_kws["estimate_kws"] = estimate_kws
+        ecdf_kws["color"] = color
+
+        if p.univariate:
+
+            _assign_default_kwargs(ecdf_kws, p.plot_univariate_ecdf, ecdfplot)
+            p.plot_univariate_ecdf(**ecdf_kws)
+
+        else:
+
+            raise NotImplementedError("Bivariate ECDF plots are not implemented")
+
+    # All plot kinds can include a rug
+    if rug:
+        # TODO with expand_margins=True, each facet expands margins... annoying!
+        if rug_kws is None:
+            rug_kws = {}
+        _assign_default_kwargs(rug_kws, p.plot_rug, rugplot)
+        rug_kws["legend"] = False
+        if color is not None:
+            rug_kws["color"] = color
+        p.plot_rug(**rug_kws)
+
+    # Call FacetGrid annotation methods
+    # Note that the legend is currently set inside the plotting method
+    g.set_axis_labels(
+        x_var=p.variables.get("x", g.axes.flat[0].get_xlabel()),
+        y_var=p.variables.get("y", g.axes.flat[0].get_ylabel()),
+    )
+    g.set_titles()
+    g.tight_layout()
+
+    if data is not None and (x is not None or y is not None):
+        if not isinstance(data, pd.DataFrame):
+            data = pd.DataFrame(data)
+        g.data = pd.merge(
+            data,
+            g.data[g.data.columns.difference(data.columns)],
+            left_index=True,
+            right_index=True,
+        )
+    else:
+        wide_cols = {
+            k: f"_{k}_" if v is None else v for k, v in p.variables.items()
+        }
+        g.data = p.plot_data.rename(columns=wide_cols)
+
+    return g
+
+
+displot.__doc__ = """\
+Figure-level interface for drawing distribution plots onto a FacetGrid.

 This function provides access to several approaches for visualizing the
 univariate or bivariate distribution of data, including subsets of data
@@ -530,20 +2360,37 @@ about the breadth of options available for each plot kind.

 .. include:: ../docstrings/displot.rst

-"""
-    .format(params=_param_docs, returns=_core_docs['returns'], seealso=
-    _core_docs['seealso']))
+""".format(
+    params=_param_docs,
+    returns=_core_docs["returns"],
+    seealso=_core_docs["seealso"],
+)
+
+
+# =========================================================================== #
+# DEPRECATED FUNCTIONS LIVE BELOW HERE
+# =========================================================================== #


 def _freedman_diaconis_bins(a):
     """Calculate number of hist bins using Freedman-Diaconis rule."""
-    pass
+    # From https://stats.stackexchange.com/questions/798/
+    a = np.asarray(a)
+    if len(a) < 2:
+        return 1
+    iqr = np.subtract.reduce(np.nanpercentile(a, [75, 25]))
+    h = 2 * iqr / (len(a) ** (1 / 3))
+    # fall back to sqrt(a) bins if iqr is 0
+    if h == 0:
+        return int(np.sqrt(a.size))
+    else:
+        return int(np.ceil((a.max() - a.min()) / h))


 def distplot(a=None, bins=None, hist=True, kde=True, rug=False, fit=None,
-    hist_kws=None, kde_kws=None, rug_kws=None, fit_kws=None, color=None,
-    vertical=False, norm_hist=False, axlabel=None, label=None, ax=None, x=None
-    ):
+             hist_kws=None, kde_kws=None, rug_kws=None, fit_kws=None,
+             color=None, vertical=False, norm_hist=False, axlabel=None,
+             label=None, ax=None, x=None):
     """
     DEPRECATED

@@ -556,4 +2403,129 @@ def distplot(a=None, bins=None, hist=True, kde=True, rug=False, fit=None,
     https://gist.github.com/mwaskom/de44147ed2974457ad6372750bbe5751

     """
-    pass
+
+    if kde and not hist:
+        axes_level_suggestion = (
+            "`kdeplot` (an axes-level function for kernel density plots)"
+        )
+    else:
+        axes_level_suggestion = (
+            "`histplot` (an axes-level function for histograms)"
+        )
+
+    msg = textwrap.dedent(f"""
+
+    `distplot` is a deprecated function and will be removed in seaborn v0.14.0.
+
+    Please adapt your code to use either `displot` (a figure-level function with
+    similar flexibility) or {axes_level_suggestion}.
+
+    For a guide to updating your code to use the new functions, please see
+    https://gist.github.com/mwaskom/de44147ed2974457ad6372750bbe5751
+    """)
+    warnings.warn(msg, UserWarning, stacklevel=2)
+
+    if ax is None:
+        ax = plt.gca()
+
+    # Intelligently label the support axis
+    label_ax = bool(axlabel)
+    if axlabel is None and hasattr(a, "name"):
+        axlabel = a.name
+        if axlabel is not None:
+            label_ax = True
+
+    # Support new-style API
+    if x is not None:
+        a = x
+
+    # Make a a 1-d float array
+    a = np.asarray(a, float)
+    if a.ndim > 1:
+        a = a.squeeze()
+
+    # Drop null values from array
+    a = remove_na(a)
+
+    # Decide if the hist is normed
+    norm_hist = norm_hist or kde or (fit is not None)
+
+    # Handle dictionary defaults
+    hist_kws = {} if hist_kws is None else hist_kws.copy()
+    kde_kws = {} if kde_kws is None else kde_kws.copy()
+    rug_kws = {} if rug_kws is None else rug_kws.copy()
+    fit_kws = {} if fit_kws is None else fit_kws.copy()
+
+    # Get the color from the current color cycle
+    if color is None:
+        if vertical:
+            line, = ax.plot(0, a.mean())
+        else:
+            line, = ax.plot(a.mean(), 0)
+        color = line.get_color()
+        line.remove()
+
+    # Plug the label into the right kwarg dictionary
+    if label is not None:
+        if hist:
+            hist_kws["label"] = label
+        elif kde:
+            kde_kws["label"] = label
+        elif rug:
+            rug_kws["label"] = label
+        elif fit:
+            fit_kws["label"] = label
+
+    if hist:
+        if bins is None:
+            bins = min(_freedman_diaconis_bins(a), 50)
+        hist_kws.setdefault("alpha", 0.4)
+        hist_kws.setdefault("density", norm_hist)
+
+        orientation = "horizontal" if vertical else "vertical"
+        hist_color = hist_kws.pop("color", color)
+        ax.hist(a, bins, orientation=orientation,
+                color=hist_color, **hist_kws)
+        if hist_color != color:
+            hist_kws["color"] = hist_color
+
+    axis = "y" if vertical else "x"
+
+    if kde:
+        kde_color = kde_kws.pop("color", color)
+        kdeplot(**{axis: a}, ax=ax, color=kde_color, **kde_kws)
+        if kde_color != color:
+            kde_kws["color"] = kde_color
+
+    if rug:
+        rug_color = rug_kws.pop("color", color)
+        rugplot(**{axis: a}, ax=ax, color=rug_color, **rug_kws)
+        if rug_color != color:
+            rug_kws["color"] = rug_color
+
+    if fit is not None:
+
+        def pdf(x):
+            return fit.pdf(x, *params)
+
+        fit_color = fit_kws.pop("color", "#282828")
+        gridsize = fit_kws.pop("gridsize", 200)
+        cut = fit_kws.pop("cut", 3)
+        clip = fit_kws.pop("clip", (-np.inf, np.inf))
+        bw = gaussian_kde(a).scotts_factor() * a.std(ddof=1)
+        x = _kde_support(a, bw, gridsize, cut, clip)
+        params = fit.fit(a)
+        y = pdf(x)
+        if vertical:
+            x, y = y, x
+        ax.plot(x, y, color=fit_color, **fit_kws)
+        if fit_color != "#282828":
+            fit_kws["color"] = fit_color
+
+    if label_ax:
+        if vertical:
+            ax.set_ylabel(axlabel)
+        else:
+            ax.set_xlabel(axlabel)
+
+    return ax
diff --git a/seaborn/external/appdirs.py b/seaborn/external/appdirs.py
index dc520cd6..70c38296 100644
--- a/seaborn/external/appdirs.py
+++ b/seaborn/external/appdirs.py
@@ -1,3 +1,9 @@
+#!/usr/bin/env python3
+# Copyright (c) 2005-2010 ActiveState Software Inc.
+# Copyright (c) 2013 Eddy Petrișor
+
+# flake8: noqa
+
 """
 This file is directly from
 https://github.com/ActiveState/appdirs/blob/3fe6a83776843a46f20c2e5587afcffe05e03b39/appdirs.py
@@ -28,30 +34,44 @@ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 """
+
 """Utilities for determining application-specific dirs.

 See <https://github.com/ActiveState/appdirs> for details and usage.
 """
-__version__ = '1.4.4'
-__version_info__ = tuple(int(segment) for segment in __version__.split('.'))
+# Dev Notes:
+# - MSDN on where to store app data files:
+#   http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
+# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
+# - XDG spec for Un*x: https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
+
+__version__ = "1.4.4"
+__version_info__ = tuple(int(segment) for segment in __version__.split("."))
+
+
 import sys
 import os
+
 unicode = str
+
 if sys.platform.startswith('java'):
     import platform
     os_name = platform.java_ver()[3][0]
-    if os_name.startswith('Windows'):
+    if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
         system = 'win32'
-    elif os_name.startswith('Mac'):
+    elif os_name.startswith('Mac'): # "Mac OS X", etc.
         system = 'darwin'
-    else:
+    else: # "Linux", "SunOS", "FreeBSD", etc.
+        # Setting this to "linux2" is not ideal, but only Windows or Mac
+        # are actually checked for and the rest of the module expects
+        # *sys.platform* style strings.
         system = 'linux2'
 else:
     system = sys.platform


 def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
-    """Return full path to the user-specific cache dir for this application.
+    r"""Return full path to the user-specific cache dir for this application.

         "appname" is the name of application.
             If None, just the system directory is returned.
@@ -71,30 +91,145 @@ def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
     Typical user cache directories are:
         Mac OS X:   ~/Library/Caches/<AppName>
         Unix:       ~/.cache/<AppName> (XDG default)
-        Win XP:     C:\\Documents and Settings\\<username>\\Local Settings\\Application Data\\<AppAuthor>\\<AppName>\\Cache
-        Vista:      C:\\Users\\<username>\\AppData\\Local\\<AppAuthor>\\<AppName>\\Cache
+        Win XP:     C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
+        Vista:      C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache

     On Windows the only suggestion in the MSDN docs is that local settings go in
     the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
     app data dir (the default returned by `user_data_dir` above). Apps typically
     put cache data somewhere *under* the given dir here. Some examples:
-        ...\\Mozilla\\Firefox\\Profiles\\<ProfileName>\\Cache
-        ...\\Acme\\SuperApp\\Cache\\1.0
+        ...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
+        ...\Acme\SuperApp\Cache\1.0
     OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
     This can be disabled with the `opinion=False` option.
     """
-    pass
+    if system == "win32":
+        if appauthor is None:
+            appauthor = appname
+        path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
+        if appname:
+            if appauthor is not False:
+                path = os.path.join(path, appauthor, appname)
+            else:
+                path = os.path.join(path, appname)
+            if opinion:
+                path = os.path.join(path, "Cache")
+    elif system == 'darwin':
+        path = os.path.expanduser('~/Library/Caches')
+        if appname:
+            path = os.path.join(path, appname)
+    else:
+        path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
+        if appname:
+            path = os.path.join(path, appname)
+    if appname and version:
+        path = os.path.join(path, version)
+    return path


+#---- internal support stuff
+
 def _get_win_folder_from_registry(csidl_name):
     """This is a fallback technique at best. I'm not sure if using the
     registry for this guarantees us the correct answer for all CSIDL_*
     names.
     """
-    pass
-
-
-if system == 'win32':
+    import winreg as _winreg
+
+    shell_folder_name = {
+        "CSIDL_APPDATA": "AppData",
+        "CSIDL_COMMON_APPDATA": "Common AppData",
+        "CSIDL_LOCAL_APPDATA": "Local AppData",
+    }[csidl_name]
+
+    key = _winreg.OpenKey(
+        _winreg.HKEY_CURRENT_USER,
+        r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
+    )
+    dir, type = _winreg.QueryValueEx(key, shell_folder_name)
+    return dir
+
+
+def _get_win_folder_with_pywin32(csidl_name):
+    from win32com.shell import shellcon, shell
+    dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
+    # Try to make this a unicode path because SHGetFolderPath does
+    # not return unicode strings when there is unicode data in the
+    # path.
+    try:
+        dir = unicode(dir)
+
+        # Downgrade to short path name if have highbit chars. See
+        # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
+        has_high_char = False
+        for c in dir:
+            if ord(c) > 255:
+                has_high_char = True
+                break
+        if has_high_char:
+            try:
+                import win32api
+                dir = win32api.GetShortPathName(dir)
+            except ImportError:
+                pass
+    except UnicodeError:
+        pass
+    return dir
+
+
+def _get_win_folder_with_ctypes(csidl_name):
+    import ctypes
+
+    csidl_const = {
+        "CSIDL_APPDATA": 26,
+        "CSIDL_COMMON_APPDATA": 35,
+        "CSIDL_LOCAL_APPDATA": 28,
+    }[csidl_name]
+
+    buf = ctypes.create_unicode_buffer(1024)
+    ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
+
+    # Downgrade to short path name if have highbit chars. See
+    # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
+    has_high_char = False
+    for c in buf:
+        if ord(c) > 255:
+            has_high_char = True
+            break
+    if has_high_char:
+        buf2 = ctypes.create_unicode_buffer(1024)
+        if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
+            buf = buf2
+
+    return buf.value
+
+def _get_win_folder_with_jna(csidl_name):
+    import array
+    from com.sun import jna
+    from com.sun.jna.platform import win32
+
+    buf_size = win32.WinDef.MAX_PATH * 2
+    buf = array.zeros('c', buf_size)
+    shell = win32.Shell32.INSTANCE
+    shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
+    dir = jna.Native.toString(buf.tostring()).rstrip("\0")
+
+    # Downgrade to short path name if have highbit chars. See
+    # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
+    has_high_char = False
+    for c in dir:
+        if ord(c) > 255:
+            has_high_char = True
+            break
+    if has_high_char:
+        buf = array.zeros('c', buf_size)
+        kernel = win32.Kernel32.INSTANCE
+        if kernel.GetShortPathName(dir, buf, buf_size):
+            dir = jna.Native.toString(buf.tostring()).rstrip("\0")
+
+    return dir
+
+if system == "win32":
     try:
         import win32com.shell
         _get_win_folder = _get_win_folder_with_pywin32
diff --git a/seaborn/external/docscrape.py b/seaborn/external/docscrape.py
index c3814e10..99dc3ff7 100644
--- a/seaborn/external/docscrape.py
+++ b/seaborn/external/docscrape.py
@@ -38,40 +38,95 @@ import sys


 def strip_blank_lines(l):
-    """Remove leading and trailing blank lines from a list of lines"""
-    pass
+    "Remove leading and trailing blank lines from a list of lines"
+    while l and not l[0].strip():
+        del l[0]
+    while l and not l[-1].strip():
+        del l[-1]
+    return l


 class Reader:
     """A line-based string reader.

     """
-
     def __init__(self, data):
         """
         Parameters
         ----------
         data : str
-           String with lines separated by '
-'.
+           String with lines separated by '\n'.

         """
         if isinstance(data, list):
             self._str = data
         else:
-            self._str = data.split('\n')
+            self._str = data.split('\n')  # store string as list of lines
+
         self.reset()

     def __getitem__(self, n):
         return self._str[n]

+    def reset(self):
+        self._l = 0  # current line nr

-class ParseError(Exception):
+    def read(self):
+        if not self.eof():
+            out = self[self._l]
+            self._l += 1
+            return out
+        else:
+            return ''
+
+    def seek_next_non_empty_line(self):
+        for l in self[self._l:]:
+            if l.strip():
+                break
+            else:
+                self._l += 1
+
+    def eof(self):
+        return self._l >= len(self._str)
+
+    def read_to_condition(self, condition_func):
+        start = self._l
+        for line in self[start:]:
+            if condition_func(line):
+                return self[start:self._l]
+            self._l += 1
+            if self.eof():
+                return self[start:self._l+1]
+        return []
+
+    def read_to_next_empty_line(self):
+        self.seek_next_non_empty_line()
+
+        def is_empty(line):
+            return not line.strip()
+
+        return self.read_to_condition(is_empty)
+
+    def read_to_next_unindented_line(self):
+        def is_unindented(line):
+            return (line.strip() and (len(line.lstrip()) == len(line)))
+        return self.read_to_condition(is_unindented)
+
+    def peek(self, n=0):
+        if self._l + n < len(self._str):
+            return self[self._l + n]
+        else:
+            return ''

+    def is_empty(self):
+        return not ''.join(self._str).strip()
+
+
+class ParseError(Exception):
     def __str__(self):
         message = self.args[0]
         if hasattr(self, 'docstring'):
-            message = f'{message} in {self.docstring!r}'
+            message = f"{message} in {self.docstring!r}"
         return message


@@ -84,17 +139,35 @@ class NumpyDocString(Mapping):
     Instances define a mapping from section title to structured data.

     """
-    sections = {'Signature': '', 'Summary': [''], 'Extended Summary': [],
-        'Parameters': [], 'Returns': [], 'Yields': [], 'Receives': [],
-        'Raises': [], 'Warns': [], 'Other Parameters': [], 'Attributes': [],
-        'Methods': [], 'See Also': [], 'Notes': [], 'Warnings': [],
-        'References': '', 'Examples': '', 'index': {}}
+
+    sections = {
+        'Signature': '',
+        'Summary': [''],
+        'Extended Summary': [],
+        'Parameters': [],
+        'Returns': [],
+        'Yields': [],
+        'Receives': [],
+        'Raises': [],
+        'Warns': [],
+        'Other Parameters': [],
+        'Attributes': [],
+        'Methods': [],
+        'See Also': [],
+        'Notes': [],
+        'Warnings': [],
+        'References': '',
+        'Examples': '',
+        'index': {}
+    }

     def __init__(self, docstring, config={}):
         orig_docstring = docstring
         docstring = textwrap.dedent(docstring).split('\n')
+
         self._doc = Reader(docstring)
         self._parsed_data = copy.deepcopy(self.sections)
+
         try:
             self._parse()
         except ParseError as e:
@@ -106,7 +179,7 @@ class NumpyDocString(Mapping):

     def __setitem__(self, key, val):
         if key not in self._parsed_data:
-            self._error_location(f'Unknown section {key}', error=False)
+            self._error_location(f"Unknown section {key}", error=False)
         else:
             self._parsed_data[key] = val

@@ -115,17 +188,112 @@ class NumpyDocString(Mapping):

     def __len__(self):
         return len(self._parsed_data)
-    _role = ':(?P<role>\\w+):'
-    _funcbacktick = '`(?P<name>(?:~\\w+\\.)?[a-zA-Z0-9_\\.-]+)`'
-    _funcplain = '(?P<name2>[a-zA-Z0-9_\\.-]+)'
-    _funcname = '(' + _role + _funcbacktick + '|' + _funcplain + ')'
+
+    def _is_at_section(self):
+        self._doc.seek_next_non_empty_line()
+
+        if self._doc.eof():
+            return False
+
+        l1 = self._doc.peek().strip()  # e.g. Parameters
+
+        if l1.startswith('.. index::'):
+            return True
+
+        l2 = self._doc.peek(1).strip()  # ---------- or ==========
+        return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1))
+
+    def _strip(self, doc):
+        i = 0
+        j = 0
+        for i, line in enumerate(doc):
+            if line.strip():
+                break
+
+        for j, line in enumerate(doc[::-1]):
+            if line.strip():
+                break
+
+        return doc[i:len(doc)-j]
+
+    def _read_to_next_section(self):
+        section = self._doc.read_to_next_empty_line()
+
+        while not self._is_at_section() and not self._doc.eof():
+            if not self._doc.peek(-1).strip():  # previous line was empty
+                section += ['']
+
+            section += self._doc.read_to_next_empty_line()
+
+        return section
+
+    def _read_sections(self):
+        while not self._doc.eof():
+            data = self._read_to_next_section()
+            name = data[0].strip()
+
+            if name.startswith('..'):  # index section
+                yield name, data[1:]
+            elif len(data) < 2:
+                yield StopIteration
+            else:
+                yield name, self._strip(data[2:])
+
+    def _parse_param_list(self, content, single_element_is_type=False):
+        r = Reader(content)
+        params = []
+        while not r.eof():
+            header = r.read().strip()
+            if ' : ' in header:
+                arg_name, arg_type = header.split(' : ')[:2]
+            else:
+                if single_element_is_type:
+                    arg_name, arg_type = '', header
+                else:
+                    arg_name, arg_type = header, ''
+
+            desc = r.read_to_next_unindented_line()
+            desc = dedent_lines(desc)
+            desc = strip_blank_lines(desc)
+
+            params.append(Parameter(arg_name, arg_type, desc))
+
+        return params
+
+    # See also supports the following formats.
+    #
+    # <FUNCNAME>
+    # <FUNCNAME> SPACE* COLON SPACE+ <DESC> SPACE*
+    # <FUNCNAME> ( COMMA SPACE+ <FUNCNAME>)+ (COMMA | PERIOD)? SPACE*
+    # <FUNCNAME> ( COMMA SPACE+ <FUNCNAME>)* SPACE* COLON SPACE+ <DESC> SPACE*
+
+    # <FUNCNAME> is one of
+    #   <PLAIN_FUNCNAME>
+    #   COLON <ROLE> COLON BACKTICK <PLAIN_FUNCNAME> BACKTICK
+    # where
+    #   <PLAIN_FUNCNAME> is a legal function name, and
+    #   <ROLE> is any nonempty sequence of word characters.
+    # Examples: func_f1  :meth:`func_h1` :obj:`~baz.obj_r` :class:`class_j`
+    # <DESC> is a string describing the function.
+
+    _role = r":(?P<role>\w+):"
+    _funcbacktick = r"`(?P<name>(?:~\w+\.)?[a-zA-Z0-9_\.-]+)`"
+    _funcplain = r"(?P<name2>[a-zA-Z0-9_\.-]+)"
+    _funcname = r"(" + _role + _funcbacktick + r"|" + _funcplain + r")"
     _funcnamenext = _funcname.replace('role', 'rolenext')
     _funcnamenext = _funcnamenext.replace('name', 'namenext')
-    _description = '(?P<description>\\s*:(\\s+(?P<desc>\\S+.*))?)?\\s*$'
-    _func_rgx = re.compile('^\\s*' + _funcname + '\\s*')
-    _line_rgx = re.compile('^\\s*' + '(?P<allfuncs>' + _funcname +
-        '(?P<morefuncs>([,]\\s+' + _funcnamenext + ')*)' + ')' +
-        '(?P<trailing>[,\\.])?' + _description)
+    _description = r"(?P<description>\s*:(\s+(?P<desc>\S+.*))?)?\s*$"
+    _func_rgx = re.compile(r"^\s*" + _funcname + r"\s*")
+    _line_rgx = re.compile(
+        r"^\s*" +
+        r"(?P<allfuncs>" +        # group for all function names
+        _funcname +
+        r"(?P<morefuncs>([,]\s+" + _funcnamenext + r")*)" +
+        r")" +                     # end of "allfuncs"
+        r"(?P<trailing>[,\.])?" +   # Some function lists have a trailing comma (or period)  '\s*'
+        _description)
+
+    # Empty <DESC> elements are replaced with '..'
     empty_description = '..'

     def _parse_see_also(self, content):
@@ -136,7 +304,50 @@ class NumpyDocString(Mapping):
         func_name1, func_name2, :meth:`func_name`, func_name3

         """
-        pass
+
+        items = []
+
+        def parse_item_name(text):
+            """Match ':role:`name`' or 'name'."""
+            m = self._func_rgx.match(text)
+            if not m:
+                raise ParseError(f"{text} is not a item name")
+            role = m.group('role')
+            name = m.group('name') if role else m.group('name2')
+            return name, role, m.end()
+
+        rest = []
+        for line in content:
+            if not line.strip():
+                continue
+
+            line_match = self._line_rgx.match(line)
+            description = None
+            if line_match:
+                description = line_match.group('desc')
+                if line_match.group('trailing') and description:
+                    self._error_location(
+                        'Unexpected comma or period after function list at index %d of '
+                        'line "%s"' % (line_match.end('trailing'), line),
+                        error=False)
+            if not description and line.startswith(' '):
+                rest.append(line.strip())
+            elif line_match:
+                funcs = []
+                text = line_match.group('allfuncs')
+                while True:
+                    if not text.strip():
+                        break
+                    name, role, match_end = parse_item_name(text)
+                    funcs.append((name, role))
+                    text = text[match_end:].strip()
+                    if text and text[0] == ',':
+                        text = text[1:].strip()
+                rest = list(filter(None, [description]))
+                items.append((funcs, rest))
+            else:
+                raise ParseError(f"{line} is not a item name")
+        return items

     def _parse_index(self, section, content):
         """
@@ -144,11 +355,193 @@ class NumpyDocString(Mapping):
            :refguide: something, else, and more

         """
-        pass
+        def strip_each_in(lst):
+            return [s.strip() for s in lst]
+
+        out = {}
+        section = section.split('::')
+        if len(section) > 1:
+            out['default'] = strip_each_in(section[1].split(','))[0]
+        for line in content:
+            line = line.split(':')
+            if len(line) > 2:
+                out[line[1]] = strip_each_in(line[2].split(','))
+        return out

     def _parse_summary(self):
         """Grab signature (if given) and summary"""
-        pass
+        if self._is_at_section():
+            return
+
+        # If several signatures present, take the last one
+        while True:
+            summary = self._doc.read_to_next_empty_line()
+            summary_str = " ".join([s.strip() for s in summary]).strip()
+            compiled = re.compile(r'^([\w., ]+=)?\s*[\w\.]+\(.*\)$')
+            if compiled.match(summary_str):
+                self['Signature'] = summary_str
+                if not self._is_at_section():
+                    continue
+            break
+
+        if summary is not None:
+            self['Summary'] = summary
+
+        if not self._is_at_section():
+            self['Extended Summary'] = self._read_to_next_section()
+
+    def _parse(self):
+        self._doc.reset()
+        self._parse_summary()
+
+        sections = list(self._read_sections())
+        section_names = {section for section, content in sections}
+
+        has_returns = 'Returns' in section_names
+        has_yields = 'Yields' in section_names
+        # We could do more tests, but we are not. Arbitrarily.
+        if has_returns and has_yields:
+            msg = 'Docstring contains both a Returns and Yields section.'
+            raise ValueError(msg)
+        if not has_yields and 'Receives' in section_names:
+            msg = 'Docstring contains a Receives section but not Yields.'
+            raise ValueError(msg)
+
+        for (section, content) in sections:
+            if not section.startswith('..'):
+                section = (s.capitalize() for s in section.split(' '))
+                section = ' '.join(section)
+                if self.get(section):
+                    self._error_location(f"The section {section} appears twice")
+
+            if section in ('Parameters', 'Other Parameters', 'Attributes',
+                           'Methods'):
+                self[section] = self._parse_param_list(content)
+            elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):
+                self[section] = self._parse_param_list(
+                    content, single_element_is_type=True)
+            elif section.startswith('.. index::'):
+                self['index'] = self._parse_index(section, content)
+            elif section == 'See Also':
+                self['See Also'] = self._parse_see_also(content)
+            else:
+                self[section] = content
+
+    def _error_location(self, msg, error=True):
+        if hasattr(self, '_obj'):
+            # we know where the docs came from:
+            try:
+                filename = inspect.getsourcefile(self._obj)
+            except TypeError:
+                filename = None
+            msg = msg + f" in the docstring of {self._obj} in {filename}."
+        if error:
+            raise ValueError(msg)
+        else:
+            warn(msg)
+
+    # string conversion routines
+
+    def _str_header(self, name, symbol='-'):
+        return [name, len(name)*symbol]
+
+    def _str_indent(self, doc, indent=4):
+        out = []
+        for line in doc:
+            out += [' '*indent + line]
+        return out
+
+    def _str_signature(self):
+        if self['Signature']:
+            return [self['Signature'].replace('*', r'\*')] + ['']
+        else:
+            return ['']
+
+    def _str_summary(self):
+        if self['Summary']:
+            return self['Summary'] + ['']
+        else:
+            return []
+
+    def _str_extended_summary(self):
+        if self['Extended Summary']:
+            return self['Extended Summary'] + ['']
+        else:
+            return []
+
+    def _str_param_list(self, name):
+        out = []
+        if self[name]:
+            out += self._str_header(name)
+            for param in self[name]:
+                parts = []
+                if param.name:
+                    parts.append(param.name)
+                if param.type:
+                    parts.append(param.type)
+                out += [' : '.join(parts)]
+                if param.desc and ''.join(param.desc).strip():
+                    out += self._str_indent(param.desc)
+            out += ['']
+        return out
+
+    def _str_section(self, name):
+        out = []
+        if self[name]:
+            out += self._str_header(name)
+            out += self[name]
+            out += ['']
+        return out
+
+    def _str_see_also(self, func_role):
+        if not self['See Also']:
+            return []
+        out = []
+        out += self._str_header("See Also")
+        out += ['']
+        last_had_desc = True
+        for funcs, desc in self['See Also']:
+            assert isinstance(funcs, list)
+            links = []
+            for func, role in funcs:
+                if role:
+                    link = f':{role}:`{func}`'
+                elif func_role:
+                    link = f':{func_role}:`{func}`'
+                else:
+                    link = f"`{func}`_"
+                links.append(link)
+            link = ', '.join(links)
+            out += [link]
+            if desc:
+                out += self._str_indent([' '.join(desc)])
+                last_had_desc = True
+            else:
+                last_had_desc = False
+                out += self._str_indent([self.empty_description])
+
+        if last_had_desc:
+            out += ['']
+        out += ['']
+        return out
+
+    def _str_index(self):
+        idx = self['index']
+        out = []
+        output_index = False
+        default_index = idx.get('default', '')
+        if default_index:
+            output_index = True
+        out += [f'.. index:: {default_index}']
+        for section, references in idx.items():
+            if section == 'default':
+                continue
+            output_index = True
+            out += [f"   :{section}: {', '.join(references)}"]
+        if output_index:
+            return out
+        else:
+            return ''

     def __str__(self, func_role=''):
         out = []
@@ -156,7 +549,7 @@ class NumpyDocString(Mapping):
         out += self._str_summary()
         out += self._str_extended_summary()
         for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',
-            'Other Parameters', 'Raises', 'Warns'):
+                           'Other Parameters', 'Raises', 'Warns'):
             out += self._str_param_list(param_list)
         out += self._str_section('Warnings')
         out += self._str_see_also(func_role)
@@ -168,27 +561,41 @@ class NumpyDocString(Mapping):
         return '\n'.join(out)


+def indent(str, indent=4):
+    indent_str = ' '*indent
+    if str is None:
+        return indent_str
+    lines = str.split('\n')
+    return '\n'.join(indent_str + l for l in lines)
+
+
 def dedent_lines(lines):
     """Deindent a list of lines maximally"""
-    pass
+    return textwrap.dedent("\n".join(lines)).split("\n")


-class FunctionDoc(NumpyDocString):
+def header(text, style='-'):
+    return text + '\n' + style*len(text) + '\n'
+

+class FunctionDoc(NumpyDocString):
     def __init__(self, func, role='func', doc=None, config={}):
         self._f = func
-        self._role = role
+        self._role = role  # e.g. "func" or "meth"
+
         if doc is None:
             if func is None:
-                raise ValueError('No function or docstring given')
+                raise ValueError("No function or docstring given")
             doc = inspect.getdoc(func) or ''
         NumpyDocString.__init__(self, doc, config)
+
         if not self['Signature'] and func is not None:
             func, func_name = self.get_func()
             try:
                 try:
                     signature = str(inspect.signature(func))
                 except (AttributeError, ValueError):
+                    # try to read signature, backward compat for older Python
                     if sys.version_info[0] >= 3:
                         argspec = inspect.getfullargspec(func)
                     else:
@@ -199,63 +606,110 @@ class FunctionDoc(NumpyDocString):
                 signature = f'{func_name}()'
             self['Signature'] = signature

+    def get_func(self):
+        func_name = getattr(self._f, '__name__', self.__class__.__name__)
+        if inspect.isclass(self._f):
+            func = getattr(self._f, '__call__', self._f.__init__)
+        else:
+            func = self._f
+        return func, func_name
+
     def __str__(self):
         out = ''
+
         func, func_name = self.get_func()
-        roles = {'func': 'function', 'meth': 'method'}
+
+        roles = {'func': 'function',
+                 'meth': 'method'}
+
         if self._role:
             if self._role not in roles:
-                print(f'Warning: invalid role {self._role}')
+                print(f"Warning: invalid role {self._role}")
             out += f".. {roles.get(self._role, '')}:: {func_name}\n    \n\n"
+
         out += super().__str__(func_role=self._role)
         return out


 class ClassDoc(NumpyDocString):
+
     extra_public_methods = ['__call__']

     def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc,
-        config={}):
+                 config={}):
         if not inspect.isclass(cls) and cls is not None:
-            raise ValueError(f'Expected a class or None, but got {cls!r}')
+            raise ValueError(f"Expected a class or None, but got {cls!r}")
         self._cls = cls
+
         if 'sphinx' in sys.modules:
             from sphinx.ext.autodoc import ALL
         else:
             ALL = object()
-        self.show_inherited_members = config.get('show_inherited_class_members'
-            , True)
+
+        self.show_inherited_members = config.get(
+                    'show_inherited_class_members', True)
+
         if modulename and not modulename.endswith('.'):
             modulename += '.'
         self._mod = modulename
+
         if doc is None:
             if cls is None:
-                raise ValueError('No class or documentation string given')
+                raise ValueError("No class or documentation string given")
             doc = pydoc.getdoc(cls)
+
         NumpyDocString.__init__(self, doc)
+
         _members = config.get('members', [])
         if _members is ALL:
             _members = None
         _exclude = config.get('exclude-members', [])
-        if config.get('show_class_members', True) and _exclude is not ALL:

+        if config.get('show_class_members', True) and _exclude is not ALL:
             def splitlines_x(s):
                 if not s:
                     return []
                 else:
                     return s.splitlines()
-            for field, items in [('Methods', self.methods), ('Attributes',
-                self.properties)]:
+            for field, items in [('Methods', self.methods),
+                                 ('Attributes', self.properties)]:
                 if not self[field]:
                     doc_list = []
                     for name in sorted(items):
-                        if (name in _exclude or _members and name not in
-                            _members):
+                        if (name in _exclude or
+                                (_members and name not in _members)):
                             continue
                         try:
                             doc_item = pydoc.getdoc(getattr(self._cls, name))
-                            doc_list.append(Parameter(name, '',
-                                splitlines_x(doc_item)))
+                            doc_list.append(
+                                Parameter(name, '', splitlines_x(doc_item)))
                         except AttributeError:
-                            pass
+                            pass  # method doesn't exist
                     self[field] = doc_list
+
+    @property
+    def methods(self):
+        if self._cls is None:
+            return []
+        return [name for name, func in inspect.getmembers(self._cls)
+                if ((not name.startswith('_')
+                     or name in self.extra_public_methods)
+                    and isinstance(func, Callable)
+                    and self._is_show_member(name))]
+
+    @property
+    def properties(self):
+        if self._cls is None:
+            return []
+        return [name for name, func in inspect.getmembers(self._cls)
+                if (not name.startswith('_') and
+                    (func is None or isinstance(func, property) or
+                     inspect.isdatadescriptor(func))
+                    and self._is_show_member(name))]
+
+    def _is_show_member(self, name):
+        if self.show_inherited_members:
+            return True  # show all class members
+        if name not in self._cls.__dict__:
+            return False  # class member is inherited, we do not show it
+        return True
diff --git a/seaborn/external/husl.py b/seaborn/external/husl.py
index f2d85a4d..63e98cbb 100644
--- a/seaborn/external/husl.py
+++ b/seaborn/external/husl.py
@@ -1,14 +1,313 @@
 import operator
 import math
-__version__ = '2.1.0'
-m = [[3.2406, -1.5372, -0.4986], [-0.9689, 1.8758, 0.0415], [0.0557, -0.204,
-    1.057]]
-m_inv = [[0.4124, 0.3576, 0.1805], [0.2126, 0.7152, 0.0722], [0.0193, 
-    0.1192, 0.9505]]
+
+__version__ = "2.1.0"
+
+
+m = [
+    [3.2406, -1.5372, -0.4986],
+    [-0.9689, 1.8758, 0.0415],
+    [0.0557, -0.2040, 1.0570]
+]
+
+m_inv = [
+    [0.4124, 0.3576, 0.1805],
+    [0.2126, 0.7152, 0.0722],
+    [0.0193, 0.1192, 0.9505]
+]
+
+# Hard-coded D65 illuminant
 refX = 0.95047
-refY = 1.0
+refY = 1.00000
 refZ = 1.08883
 refU = 0.19784
 refV = 0.46834
 lab_e = 0.008856
 lab_k = 903.3
+
+
+# Public API
+
+def husl_to_rgb(h, s, l):
+    return lch_to_rgb(*husl_to_lch([h, s, l]))
+
+
+def husl_to_hex(h, s, l):
+    return rgb_to_hex(husl_to_rgb(h, s, l))
+
+
+def rgb_to_husl(r, g, b):
+    return lch_to_husl(rgb_to_lch(r, g, b))
+
+
+def hex_to_husl(hex):
+    return rgb_to_husl(*hex_to_rgb(hex))
+
+
+def huslp_to_rgb(h, s, l):
+    return lch_to_rgb(*huslp_to_lch([h, s, l]))
+
+
+def huslp_to_hex(h, s, l):
+    return rgb_to_hex(huslp_to_rgb(h, s, l))
+
+
+def rgb_to_huslp(r, g, b):
+    return lch_to_huslp(rgb_to_lch(r, g, b))
+
+
+def hex_to_huslp(hex):
+    return rgb_to_huslp(*hex_to_rgb(hex))
+
+
+def lch_to_rgb(l, c, h):
+    return xyz_to_rgb(luv_to_xyz(lch_to_luv([l, c, h])))
+
+
+def rgb_to_lch(r, g, b):
+    return luv_to_lch(xyz_to_luv(rgb_to_xyz([r, g, b])))
+
+
+def max_chroma(L, H):
+    hrad = math.radians(H)
+    sinH = (math.sin(hrad))
+    cosH = (math.cos(hrad))
+    sub1 = (math.pow(L + 16, 3.0) / 1560896.0)
+    sub2 = sub1 if sub1 > 0.008856 else (L / 903.3)
+    result = float("inf")
+    for row in m:
+        m1 = row[0]
+        m2 = row[1]
+        m3 = row[2]
+        top = ((0.99915 * m1 + 1.05122 * m2 + 1.14460 * m3) * sub2)
+        rbottom = (0.86330 * m3 - 0.17266 * m2)
+        lbottom = (0.12949 * m3 - 0.38848 * m1)
+        bottom = (rbottom * sinH + lbottom * cosH) * sub2
+
+        for t in (0.0, 1.0):
+            C = (L * (top - 1.05122 * t) / (bottom + 0.17266 * sinH * t))
+            if C > 0.0 and C < result:
+                result = C
+    return result
+
+
+def _hrad_extremum(L):
+    lhs = (math.pow(L, 3.0) + 48.0 * math.pow(L, 2.0) + 768.0 * L + 4096.0) / 1560896.0
+    rhs = 1107.0 / 125000.0
+    sub = lhs if lhs > rhs else 10.0 * L / 9033.0
+    chroma = float("inf")
+    result = None
+    for row in m:
+        for limit in (0.0, 1.0):
+            [m1, m2, m3] = row
+            top = -3015466475.0 * m3 * sub + 603093295.0 * m2 * sub - 603093295.0 * limit
+            bottom = 1356959916.0 * m1 * sub - 452319972.0 * m3 * sub
+            hrad = math.atan2(top, bottom)
+            # This is a math hack to deal with tan quadrants, I'm too lazy to figure
+            # out how to do this properly
+            if limit == 0.0:
+                hrad += math.pi
+            test = max_chroma(L, math.degrees(hrad))
+            if test < chroma:
+                chroma = test
+                result = hrad
+    return result
+
+
+def max_chroma_pastel(L):
+    H = math.degrees(_hrad_extremum(L))
+    return max_chroma(L, H)
+
+
+def dot_product(a, b):
+    return sum(map(operator.mul, a, b))
+
+
+def f(t):
+    if t > lab_e:
+        return (math.pow(t, 1.0 / 3.0))
+    else:
+        return (7.787 * t + 16.0 / 116.0)
+
+
+def f_inv(t):
+    if math.pow(t, 3.0) > lab_e:
+        return (math.pow(t, 3.0))
+    else:
+        return (116.0 * t - 16.0) / lab_k
+
+
+def from_linear(c):
+    if c <= 0.0031308:
+        return 12.92 * c
+    else:
+        return (1.055 * math.pow(c, 1.0 / 2.4) - 0.055)
+
+
+def to_linear(c):
+    a = 0.055
+
+    if c > 0.04045:
+        return (math.pow((c + a) / (1.0 + a), 2.4))
+    else:
+        return (c / 12.92)
+
+
+def rgb_prepare(triple):
+    ret = []
+    for ch in triple:
+        ch = round(ch, 3)
+
+        if ch < -0.0001 or ch > 1.0001:
+            raise Exception(f"Illegal RGB value {ch:f}")
+
+        if ch < 0:
+            ch = 0
+        if ch > 1:
+            ch = 1
+
+        # Fix for Python 3 which by default rounds 4.5 down to 4.0
+        # instead of Python 2 which is rounded to 5.0 which caused
+        # a couple off by one errors in the tests. Tests now all pass
+        # in Python 2 and Python 3
+        ret.append(int(round(ch * 255 + 0.001, 0)))
+
+    return ret
+
+
+def hex_to_rgb(hex):
+    if hex.startswith('#'):
+        hex = hex[1:]
+    r = int(hex[0:2], 16) / 255.0
+    g = int(hex[2:4], 16) / 255.0
+    b = int(hex[4:6], 16) / 255.0
+    return [r, g, b]
+
+
+def rgb_to_hex(triple):
+    [r, g, b] = triple
+    return '#%02x%02x%02x' % tuple(rgb_prepare([r, g, b]))
+
+
+def xyz_to_rgb(triple):
+    xyz = map(lambda row: dot_product(row, triple), m)
+    return list(map(from_linear, xyz))
+
+
+def rgb_to_xyz(triple):
+    rgbl = list(map(to_linear, triple))
+    return list(map(lambda row: dot_product(row, rgbl), m_inv))
+
+
+def xyz_to_luv(triple):
+    X, Y, Z = triple
+
+    if X == Y == Z == 0.0:
+        return [0.0, 0.0, 0.0]
+
+    varU = (4.0 * X) / (X + (15.0 * Y) + (3.0 * Z))
+    varV = (9.0 * Y) / (X + (15.0 * Y) + (3.0 * Z))
+    L = 116.0 * f(Y / refY) - 16.0
+
+    # Black will create a divide-by-zero error
+    if L == 0.0:
+        return [0.0, 0.0, 0.0]
+
+    U = 13.0 * L * (varU - refU)
+    V = 13.0 * L * (varV - refV)
+
+    return [L, U, V]
+
+
+def luv_to_xyz(triple):
+    L, U, V = triple
+
+    if L == 0:
+        return [0.0, 0.0, 0.0]
+
+    varY = f_inv((L + 16.0) / 116.0)
+    varU = U / (13.0 * L) + refU
+    varV = V / (13.0 * L) + refV
+    Y = varY * refY
+    X = 0.0 - (9.0 * Y * varU) / ((varU - 4.0) * varV - varU * varV)
+    Z = (9.0 * Y - (15.0 * varV * Y) - (varV * X)) / (3.0 * varV)
+
+    return [X, Y, Z]
+
+
+def luv_to_lch(triple):
+    L, U, V = triple
+
+    C = (math.pow(math.pow(U, 2) + math.pow(V, 2), (1.0 / 2.0)))
+    hrad = (math.atan2(V, U))
+    H = math.degrees(hrad)
+    if H < 0.0:
+        H = 360.0 + H
+
+    return [L, C, H]
+
+
+def lch_to_luv(triple):
+    L, C, H = triple
+
+    Hrad = math.radians(H)
+    U = (math.cos(Hrad) * C)
+    V = (math.sin(Hrad) * C)
+
+    return [L, U, V]
+
+
+def husl_to_lch(triple):
+    H, S, L = triple
+
+    if L > 99.9999999:
+        return [100, 0.0, H]
+    if L < 0.00000001:
+        return [0.0, 0.0, H]
+
+    mx = max_chroma(L, H)
+    C = mx / 100.0 * S
+
+    return [L, C, H]
+
+
+def lch_to_husl(triple):
+    L, C, H = triple
+
+    if L > 99.9999999:
+        return [H, 0.0, 100.0]
+    if L < 0.00000001:
+        return [H, 0.0, 0.0]
+
+    mx = max_chroma(L, H)
+    S = C / mx * 100.0
+
+    return [H, S, L]
+
+
+def huslp_to_lch(triple):
+    H, S, L = triple
+
+    if L > 99.9999999:
+        return [100, 0.0, H]
+    if L < 0.00000001:
+        return [0.0, 0.0, H]
+
+    mx = max_chroma_pastel(L)
+    C = mx / 100.0 * S
+
+    return [L, C, H]
+
+
+def lch_to_huslp(triple):
+    L, C, H = triple
+
+    if L > 99.9999999:
+        return [H, 0.0, 100.0]
+    if L < 0.00000001:
+        return [H, 0.0, 0.0]
+
+    mx = max_chroma_pastel(L)
+    S = C / mx * 100.0
+
+    return [H, S, L]
diff --git a/seaborn/external/kde.py b/seaborn/external/kde.py
index 4765f446..6add4e19 100644
--- a/seaborn/external/kde.py
+++ b/seaborn/external/kde.py
@@ -49,9 +49,32 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

 """
+
+# -------------------------------------------------------------------------------
+#
+#  Define classes for (uni/multi)-variate kernel density estimation.
+#
+#  Currently, only Gaussian kernels are implemented.
+#
+#  Written by: Robert Kern
+#
+#  Date: 2004-08-09
+#
+#  Modified: 2005-02-10 by Robert Kern.
+#              Contributed to SciPy
+#            2005-10-07 by Robert Kern.
+#              Some fixes to match the new scipy_core
+#
+#  Copyright 2004-2005 by Enthought, Inc.
+#
+# -------------------------------------------------------------------------------
+
 import numpy as np
-from numpy import asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, power, atleast_1d, sum, ones, cov
+from numpy import (asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi,
+                   sqrt, power, atleast_1d, sum, ones, cov)
 from numpy import linalg
+
+
 __all__ = ['gaussian_kde']


@@ -168,20 +191,22 @@ class gaussian_kde:
            Series A (General), 132, 272

     """
-
     def __init__(self, dataset, bw_method=None, weights=None):
         self.dataset = atleast_2d(asarray(dataset))
         if not self.dataset.size > 1:
-            raise ValueError('`dataset` input should have multiple elements.')
+            raise ValueError("`dataset` input should have multiple elements.")
+
         self.d, self.n = self.dataset.shape
+
         if weights is not None:
             self._weights = atleast_1d(weights).astype(float)
             self._weights /= sum(self._weights)
             if self.weights.ndim != 1:
-                raise ValueError('`weights` input should be one-dimensional.')
+                raise ValueError("`weights` input should be one-dimensional.")
             if len(self._weights) != self.n:
-                raise ValueError('`weights` input should be of length n')
-            self._neff = 1 / sum(self._weights ** 2)
+                raise ValueError("`weights` input should be of length n")
+            self._neff = 1/sum(self._weights**2)
+
         self.set_bandwidth(bw_method=bw_method)

     def evaluate(self, points):
@@ -204,7 +229,42 @@ class gaussian_kde:
                      the dimensionality of the KDE.

         """
-        pass
+        points = atleast_2d(asarray(points))
+
+        d, m = points.shape
+        if d != self.d:
+            if d == 1 and m == self.d:
+                # points was passed in as a row vector
+                points = reshape(points, (self.d, 1))
+                m = 1
+            else:
+                msg = f"points have dimension {d}, dataset has dimension {self.d}"
+                raise ValueError(msg)
+
+        output_dtype = np.common_type(self.covariance, points)
+        result = zeros((m,), dtype=output_dtype)
+
+        whitening = linalg.cholesky(self.inv_cov)
+        scaled_dataset = dot(whitening, self.dataset)
+        scaled_points = dot(whitening, points)
+
+        if m >= self.n:
+            # there are more points than data, so loop over data
+            for i in range(self.n):
+                diff = scaled_dataset[:, i, newaxis] - scaled_points
+                energy = sum(diff * diff, axis=0) / 2.0
+                result += self.weights[i]*exp(-energy)
+        else:
+            # loop over points
+            for i in range(m):
+                diff = scaled_dataset - scaled_points[:, i, newaxis]
+                energy = sum(diff * diff, axis=0) / 2.0
+                result[i] = sum(exp(-energy)*self.weights, axis=0)
+
+        result = result / self._norm_factor
+
+        return result
+
     __call__ = evaluate

     def scotts_factor(self):
@@ -215,7 +275,7 @@ class gaussian_kde:
         s : float
             Scott's factor.
         """
-        pass
+        return power(self.neff, -1./(self.d+4))

     def silverman_factor(self):
         """Compute the Silverman factor.
@@ -225,7 +285,9 @@ class gaussian_kde:
         s : float
             The silverman factor.
         """
-        pass
+        return power(self.neff*(self.d+2.0)/4.0, -1./(self.d+4))
+
+    #  Default method to calculate bandwidth, can be overwritten by subclass
     covariance_factor = scotts_factor
     covariance_factor.__doc__ = """Computes the coefficient (`kde.factor`) that
         multiplies the data covariance matrix to obtain the kernel covariance
@@ -254,13 +316,40 @@ class gaussian_kde:
         .. versionadded:: 0.11

         """
-        pass
+        if bw_method is None:
+            pass
+        elif bw_method == 'scott':
+            self.covariance_factor = self.scotts_factor
+        elif bw_method == 'silverman':
+            self.covariance_factor = self.silverman_factor
+        elif np.isscalar(bw_method) and not isinstance(bw_method, str):
+            self._bw_method = 'use constant'
+            self.covariance_factor = lambda: bw_method
+        elif callable(bw_method):
+            self._bw_method = bw_method
+            self.covariance_factor = lambda: self._bw_method(self)
+        else:
+            msg = "`bw_method` should be 'scott', 'silverman', a scalar " \
+                  "or a callable."
+            raise ValueError(msg)
+
+        self._compute_covariance()

     def _compute_covariance(self):
         """Computes the covariance matrix for each Gaussian kernel using
         covariance_factor().
         """
-        pass
+        self.factor = self.covariance_factor()
+        # Cache covariance and inverse covariance of the data
+        if not hasattr(self, '_data_inv_cov'):
+            self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,
+                                               bias=False,
+                                               aweights=self.weights))
+            self._data_inv_cov = linalg.inv(self._data_covariance)
+
+        self.covariance = self._data_covariance * self.factor**2
+        self.inv_cov = self._data_inv_cov / self.factor**2
+        self._norm_factor = sqrt(linalg.det(2*pi*self.covariance))

     def pdf(self, x):
         """
@@ -272,4 +361,20 @@ class gaussian_kde:
         docstring for more details.

         """
-        pass
+        return self.evaluate(x)
+
+    @property
+    def weights(self):
+        try:
+            return self._weights
+        except AttributeError:
+            self._weights = ones(self.n)/self.n
+            return self._weights
+
+    @property
+    def neff(self):
+        try:
+            return self._neff
+        except AttributeError:
+            self._neff = 1/sum(self.weights**2)
+            return self._neff
diff --git a/seaborn/external/version.py b/seaborn/external/version.py
index 1dfe1611..7eb57d32 100644
--- a/seaborn/external/version.py
+++ b/seaborn/external/version.py
@@ -14,40 +14,49 @@ Vendored from:
 - https://github.com/pypa/packaging/
 - commit ba07d8287b4554754ac7178d177033ea3f75d489 (09/09/2021)
 """
+
+
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+
+
 import collections
 import itertools
 import re
 from typing import Callable, Optional, SupportsInt, Tuple, Union
-__all__ = ['Version', 'InvalidVersion', 'VERSION_PATTERN']

+__all__ = ["Version", "InvalidVersion", "VERSION_PATTERN"]

-class InfinityType:

-    def __repr__(self) ->str:
-        return 'Infinity'
+# Vendored from https://github.com/pypa/packaging/blob/main/packaging/_structures.py
+
+class InfinityType:
+    def __repr__(self) -> str:
+        return "Infinity"

-    def __hash__(self) ->int:
+    def __hash__(self) -> int:
         return hash(repr(self))

-    def __lt__(self, other: object) ->bool:
+    def __lt__(self, other: object) -> bool:
         return False

-    def __le__(self, other: object) ->bool:
+    def __le__(self, other: object) -> bool:
         return False

-    def __eq__(self, other: object) ->bool:
+    def __eq__(self, other: object) -> bool:
         return isinstance(other, self.__class__)

-    def __ne__(self, other: object) ->bool:
+    def __ne__(self, other: object) -> bool:
         return not isinstance(other, self.__class__)

-    def __gt__(self, other: object) ->bool:
+    def __gt__(self, other: object) -> bool:
         return True

-    def __ge__(self, other: object) ->bool:
+    def __ge__(self, other: object) -> bool:
         return True

-    def __neg__(self: object) ->'NegativeInfinityType':
+    def __neg__(self: object) -> "NegativeInfinityType":
         return NegativeInfinity


@@ -55,48 +64,65 @@ Infinity = InfinityType()


 class NegativeInfinityType:
+    def __repr__(self) -> str:
+        return "-Infinity"

-    def __repr__(self) ->str:
-        return '-Infinity'
-
-    def __hash__(self) ->int:
+    def __hash__(self) -> int:
         return hash(repr(self))

-    def __lt__(self, other: object) ->bool:
+    def __lt__(self, other: object) -> bool:
         return True

-    def __le__(self, other: object) ->bool:
+    def __le__(self, other: object) -> bool:
         return True

-    def __eq__(self, other: object) ->bool:
+    def __eq__(self, other: object) -> bool:
         return isinstance(other, self.__class__)

-    def __ne__(self, other: object) ->bool:
+    def __ne__(self, other: object) -> bool:
         return not isinstance(other, self.__class__)

-    def __gt__(self, other: object) ->bool:
+    def __gt__(self, other: object) -> bool:
         return False

-    def __ge__(self, other: object) ->bool:
+    def __ge__(self, other: object) -> bool:
         return False

-    def __neg__(self: object) ->InfinityType:
+    def __neg__(self: object) -> InfinityType:
         return Infinity


 NegativeInfinity = NegativeInfinityType()
+
+
+# Vendored from https://github.com/pypa/packaging/blob/main/packaging/version.py
+
 InfiniteTypes = Union[InfinityType, NegativeInfinityType]
 PrePostDevType = Union[InfiniteTypes, Tuple[str, int]]
 SubLocalType = Union[InfiniteTypes, int, str]
-LocalType = Union[NegativeInfinityType, Tuple[Union[SubLocalType, Tuple[
-    SubLocalType, str], Tuple[NegativeInfinityType, SubLocalType]], ...]]
-CmpKey = Tuple[int, Tuple[int, ...], PrePostDevType, PrePostDevType,
-    PrePostDevType, LocalType]
+LocalType = Union[
+    NegativeInfinityType,
+    Tuple[
+        Union[
+            SubLocalType,
+            Tuple[SubLocalType, str],
+            Tuple[NegativeInfinityType, SubLocalType],
+        ],
+        ...,
+    ],
+]
+CmpKey = Tuple[
+    int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType
+]
 LegacyCmpKey = Tuple[int, Tuple[str, ...]]
-VersionComparisonMethod = Callable[[Union[CmpKey, LegacyCmpKey], Union[
-    CmpKey, LegacyCmpKey]], bool]
-_Version = collections.namedtuple('_Version', ['epoch', 'release', 'dev',
-    'pre', 'post', 'local'])
+VersionComparisonMethod = Callable[
+    [Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool
+]
+
+_Version = collections.namedtuple(
+    "_Version", ["epoch", "release", "dev", "pre", "post", "local"]
+)
+


 class InvalidVersion(ValueError):
@@ -108,116 +134,328 @@ class InvalidVersion(ValueError):
 class _BaseVersion:
     _key: Union[CmpKey, LegacyCmpKey]

-    def __hash__(self) ->int:
+    def __hash__(self) -> int:
         return hash(self._key)

-    def __lt__(self, other: '_BaseVersion') ->bool:
+    # Please keep the duplicated `isinstance` check
+    # in the six comparisons hereunder
+    # unless you find a way to avoid adding overhead function calls.
+    def __lt__(self, other: "_BaseVersion") -> bool:
         if not isinstance(other, _BaseVersion):
             return NotImplemented
+
         return self._key < other._key

-    def __le__(self, other: '_BaseVersion') ->bool:
+    def __le__(self, other: "_BaseVersion") -> bool:
         if not isinstance(other, _BaseVersion):
             return NotImplemented
+
         return self._key <= other._key

-    def __eq__(self, other: object) ->bool:
+    def __eq__(self, other: object) -> bool:
         if not isinstance(other, _BaseVersion):
             return NotImplemented
+
         return self._key == other._key

-    def __ge__(self, other: '_BaseVersion') ->bool:
+    def __ge__(self, other: "_BaseVersion") -> bool:
         if not isinstance(other, _BaseVersion):
             return NotImplemented
+
         return self._key >= other._key

-    def __gt__(self, other: '_BaseVersion') ->bool:
+    def __gt__(self, other: "_BaseVersion") -> bool:
         if not isinstance(other, _BaseVersion):
             return NotImplemented
+
         return self._key > other._key

-    def __ne__(self, other: object) ->bool:
+    def __ne__(self, other: object) -> bool:
         if not isinstance(other, _BaseVersion):
             return NotImplemented
+
         return self._key != other._key


-VERSION_PATTERN = """
+# Deliberately not anchored to the start and end of the string, to make it
+# easier for 3rd party code to reuse
+VERSION_PATTERN = r"""
     v?
     (?:
         (?:(?P<epoch>[0-9]+)!)?                           # epoch
-        (?P<release>[0-9]+(?:\\.[0-9]+)*)                  # release segment
+        (?P<release>[0-9]+(?:\.[0-9]+)*)                  # release segment
         (?P<pre>                                          # pre-release
-            [-_\\.]?
+            [-_\.]?
             (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
-            [-_\\.]?
+            [-_\.]?
             (?P<pre_n>[0-9]+)?
         )?
         (?P<post>                                         # post release
             (?:-(?P<post_n1>[0-9]+))
             |
             (?:
-                [-_\\.]?
+                [-_\.]?
                 (?P<post_l>post|rev|r)
-                [-_\\.]?
+                [-_\.]?
                 (?P<post_n2>[0-9]+)?
             )
         )?
         (?P<dev>                                          # dev release
-            [-_\\.]?
+            [-_\.]?
             (?P<dev_l>dev)
-            [-_\\.]?
+            [-_\.]?
             (?P<dev_n>[0-9]+)?
         )?
     )
-    (?:\\+(?P<local>[a-z0-9]+(?:[-_\\.][a-z0-9]+)*))?       # local version
+    (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
 """


 class Version(_BaseVersion):
-    _regex = re.compile('^\\s*' + VERSION_PATTERN + '\\s*$', re.VERBOSE |
-        re.IGNORECASE)

-    def __init__(self, version: str) ->None:
+    _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+    def __init__(self, version: str) -> None:
+
+        # Validate the version and parse it into pieces
         match = self._regex.search(version)
         if not match:
             raise InvalidVersion(f"Invalid version: '{version}'")
-        self._version = _Version(epoch=int(match.group('epoch')) if match.
-            group('epoch') else 0, release=tuple(int(i) for i in match.
-            group('release').split('.')), pre=_parse_letter_version(match.
-            group('pre_l'), match.group('pre_n')), post=
-            _parse_letter_version(match.group('post_l'), match.group(
-            'post_n1') or match.group('post_n2')), dev=
-            _parse_letter_version(match.group('dev_l'), match.group('dev_n'
-            )), local=_parse_local_version(match.group('local')))
-        self._key = _cmpkey(self._version.epoch, self._version.release,
-            self._version.pre, self._version.post, self._version.dev, self.
-            _version.local)
-
-    def __repr__(self) ->str:
+
+        # Store the parsed out pieces of the version
+        self._version = _Version(
+            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+            release=tuple(int(i) for i in match.group("release").split(".")),
+            pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
+            post=_parse_letter_version(
+                match.group("post_l"), match.group("post_n1") or match.group("post_n2")
+            ),
+            dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
+            local=_parse_local_version(match.group("local")),
+        )
+
+        # Generate a key which will be used for sorting
+        self._key = _cmpkey(
+            self._version.epoch,
+            self._version.release,
+            self._version.pre,
+            self._version.post,
+            self._version.dev,
+            self._version.local,
+        )
+
+    def __repr__(self) -> str:
         return f"<Version('{self}')>"

-    def __str__(self) ->str:
+    def __str__(self) -> str:
         parts = []
+
+        # Epoch
         if self.epoch != 0:
-            parts.append(f'{self.epoch}!')
-        parts.append('.'.join(str(x) for x in self.release))
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self.release))
+
+        # Pre-release
         if self.pre is not None:
-            parts.append(''.join(str(x) for x in self.pre))
+            parts.append("".join(str(x) for x in self.pre))
+
+        # Post-release
         if self.post is not None:
-            parts.append(f'.post{self.post}')
+            parts.append(f".post{self.post}")
+
+        # Development release
         if self.dev is not None:
-            parts.append(f'.dev{self.dev}')
+            parts.append(f".dev{self.dev}")
+
+        # Local version segment
         if self.local is not None:
-            parts.append(f'+{self.local}')
-        return ''.join(parts)
+            parts.append(f"+{self.local}")
+
+        return "".join(parts)
+
+    @property
+    def epoch(self) -> int:
+        _epoch: int = self._version.epoch
+        return _epoch
+
+    @property
+    def release(self) -> Tuple[int, ...]:
+        _release: Tuple[int, ...] = self._version.release
+        return _release
+
+    @property
+    def pre(self) -> Optional[Tuple[str, int]]:
+        _pre: Optional[Tuple[str, int]] = self._version.pre
+        return _pre
+
+    @property
+    def post(self) -> Optional[int]:
+        return self._version.post[1] if self._version.post else None
+
+    @property
+    def dev(self) -> Optional[int]:
+        return self._version.dev[1] if self._version.dev else None
+
+    @property
+    def local(self) -> Optional[str]:
+        if self._version.local:
+            return ".".join(str(x) for x in self._version.local)
+        else:
+            return None
+
+    @property
+    def public(self) -> str:
+        return str(self).split("+", 1)[0]
+
+    @property
+    def base_version(self) -> str:
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self.release))
+
+        return "".join(parts)
+
+    @property
+    def is_prerelease(self) -> bool:
+        return self.dev is not None or self.pre is not None
+
+    @property
+    def is_postrelease(self) -> bool:
+        return self.post is not None
+
+    @property
+    def is_devrelease(self) -> bool:
+        return self.dev is not None
+
+    @property
+    def major(self) -> int:
+        return self.release[0] if len(self.release) >= 1 else 0

+    @property
+    def minor(self) -> int:
+        return self.release[1] if len(self.release) >= 2 else 0

-_local_version_separators = re.compile('[\\._-]')
+    @property
+    def micro(self) -> int:
+        return self.release[2] if len(self.release) >= 3 else 0


-def _parse_local_version(local: str) ->Optional[LocalType]:
+def _parse_letter_version(
+    letter: str, number: Union[str, bytes, SupportsInt]
+) -> Optional[Tuple[str, int]]:
+
+    if letter:
+        # We consider there to be an implicit 0 in a pre-release if there is
+        # not a numeral associated with it.
+        if number is None:
+            number = 0
+
+        # We normalize any letters to their lower case form
+        letter = letter.lower()
+
+        # We consider some words to be alternate spellings of other words and
+        # in those cases we want to normalize the spellings to our preferred
+        # spelling.
+        if letter == "alpha":
+            letter = "a"
+        elif letter == "beta":
+            letter = "b"
+        elif letter in ["c", "pre", "preview"]:
+            letter = "rc"
+        elif letter in ["rev", "r"]:
+            letter = "post"
+
+        return letter, int(number)
+    if not letter and number:
+        # We assume if we are given a number, but we are not given a letter
+        # then this is using the implicit post release syntax (e.g. 1.0-1)
+        letter = "post"
+
+        return letter, int(number)
+
+    return None
+
+
+_local_version_separators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local: str) -> Optional[LocalType]:
     """
     Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
     """
-    pass
+    if local is not None:
+        return tuple(
+            part.lower() if not part.isdigit() else int(part)
+            for part in _local_version_separators.split(local)
+        )
+    return None
+
+
+def _cmpkey(
+    epoch: int,
+    release: Tuple[int, ...],
+    pre: Optional[Tuple[str, int]],
+    post: Optional[Tuple[str, int]],
+    dev: Optional[Tuple[str, int]],
+    local: Optional[Tuple[SubLocalType]],
+) -> CmpKey:
+
+    # When we compare a release version, we want to compare it with all of the
+    # trailing zeros removed. So we'll use a reverse the list, drop all the now
+    # leading zeros until we come to something non zero, then take the rest
+    # re-reverse it back into the correct order and make it a tuple and use
+    # that for our sorting key.
+    _release = tuple(
+        reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
+    )
+
+    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+    # We'll do this by abusing the pre segment, but we _only_ want to do this
+    # if there is not a pre or a post segment. If we have one of those then
+    # the normal sorting rules will handle this case correctly.
+    if pre is None and post is None and dev is not None:
+        _pre: PrePostDevType = NegativeInfinity
+    # Versions without a pre-release (except as noted above) should sort after
+    # those with one.
+    elif pre is None:
+        _pre = Infinity
+    else:
+        _pre = pre
+
+    # Versions without a post segment should sort before those with one.
+    if post is None:
+        _post: PrePostDevType = NegativeInfinity
+
+    else:
+        _post = post
+
+    # Versions without a development segment should sort after those with one.
+    if dev is None:
+        _dev: PrePostDevType = Infinity
+
+    else:
+        _dev = dev
+
+    if local is None:
+        # Versions without a local segment should sort before those with one.
+        _local: LocalType = NegativeInfinity
+    else:
+        # Versions with a local segment need that segment parsed to implement
+        # the sorting rules in PEP440.
+        # - Alpha numeric segments sort before numeric segments
+        # - Alpha numeric segments sort lexicographically
+        # - Numeric segments sort numerically
+        # - Shorter versions sort before longer versions when the prefixes
+        #   match exactly
+        _local = tuple(
+            (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
+        )
+
+    return epoch, _release, _pre, _post, _dev, _local
diff --git a/seaborn/matrix.py b/seaborn/matrix.py
index c8e407f8..6b99c118 100644
--- a/seaborn/matrix.py
+++ b/seaborn/matrix.py
@@ -1,5 +1,6 @@
 """Functions to visualize matrices of data."""
 import warnings
+
 import matplotlib as mpl
 from matplotlib.collections import LineCollection
 import matplotlib.pyplot as plt
@@ -11,26 +12,49 @@ try:
     _no_scipy = False
 except ImportError:
     _no_scipy = True
+
 from . import cm
 from .axisgrid import Grid
 from ._compat import get_colormap
-from .utils import despine, axis_ticklabels_overlap, relative_luminance, to_utf8, _draw_figure
-__all__ = ['heatmap', 'clustermap']
+from .utils import (
+    despine,
+    axis_ticklabels_overlap,
+    relative_luminance,
+    to_utf8,
+    _draw_figure,
+)
+
+
+__all__ = ["heatmap", "clustermap"]


 def _index_to_label(index):
     """Convert a pandas index or multiindex to an axis label."""
-    pass
+    if isinstance(index, pd.MultiIndex):
+        return "-".join(map(to_utf8, index.names))
+    else:
+        return index.name


 def _index_to_ticklabels(index):
     """Convert a pandas index or multiindex into ticklabels."""
-    pass
+    if isinstance(index, pd.MultiIndex):
+        return ["-".join(map(to_utf8, i)) for i in index.values]
+    else:
+        return index.values


 def _convert_colors(colors):
     """Convert either a list of colors or nested lists of colors to RGB."""
-    pass
+    to_rgb = mpl.colors.to_rgb
+
+    try:
+        to_rgb(colors[0])
+        # If this works, there is only one level of colors
+        return list(map(to_rgb, colors))
+    except ValueError:
+        # If we get here, we have nested lists
+        return [list(map(to_rgb, color_list)) for color_list in colors]


 def _matrix_mask(data, mask):
@@ -42,23 +66,55 @@ def _matrix_mask(data, mask):
     a DataFrame.

     """
-    pass
+    if mask is None:
+        mask = np.zeros(data.shape, bool)
+
+    if isinstance(mask, np.ndarray):
+        # For array masks, ensure that shape matches data then convert
+        if mask.shape != data.shape:
+            raise ValueError("Mask must have the same shape as data.")
+
+        mask = pd.DataFrame(mask,
+                            index=data.index,
+                            columns=data.columns,
+                            dtype=bool)
+
+    elif isinstance(mask, pd.DataFrame):
+        # For DataFrame masks, ensure that semantic labels match data
+        if not mask.index.equals(data.index) \
+           and mask.columns.equals(data.columns):
+            err = "Mask must have the same index and columns as data."
+            raise ValueError(err)
+
+    # Add any cells with missing data to the mask
+    # This works around an issue where `plt.pcolormesh` doesn't represent
+    # missing data properly
+    mask = mask | pd.isnull(data)
+
+    return mask


 class _HeatMapper:
     """Draw a heatmap plot of a matrix with nice labels and colormaps."""

     def __init__(self, data, vmin, vmax, cmap, center, robust, annot, fmt,
-        annot_kws, cbar, cbar_kws, xticklabels=True, yticklabels=True, mask
-        =None):
+                 annot_kws, cbar, cbar_kws,
+                 xticklabels=True, yticklabels=True, mask=None):
         """Initialize the plotting object."""
+        # We always want to have a DataFrame with semantic information
+        # and an ndarray to pass to matplotlib
         if isinstance(data, pd.DataFrame):
             plot_data = data.values
         else:
             plot_data = np.asarray(data)
             data = pd.DataFrame(plot_data)
+
+        # Validate the mask and convert to DataFrame
         mask = _matrix_mask(data, mask)
+
         plot_data = np.ma.masked_where(np.asarray(mask), plot_data)
+
+        # Get good names for the rows and columns
         xtickevery = 1
         if isinstance(xticklabels, int):
             xtickevery = xticklabels
@@ -67,6 +123,7 @@ class _HeatMapper:
             xticklabels = _index_to_ticklabels(data.columns)
         elif xticklabels is False:
             xticklabels = []
+
         ytickevery = 1
         if isinstance(yticklabels, int):
             ytickevery = yticklabels
@@ -75,30 +132,38 @@ class _HeatMapper:
             yticklabels = _index_to_ticklabels(data.index)
         elif yticklabels is False:
             yticklabels = []
+
         if not len(xticklabels):
             self.xticks = []
             self.xticklabels = []
-        elif isinstance(xticklabels, str) and xticklabels == 'auto':
-            self.xticks = 'auto'
+        elif isinstance(xticklabels, str) and xticklabels == "auto":
+            self.xticks = "auto"
             self.xticklabels = _index_to_ticklabels(data.columns)
         else:
             self.xticks, self.xticklabels = self._skip_ticks(xticklabels,
-                xtickevery)
+                                                             xtickevery)
+
         if not len(yticklabels):
             self.yticks = []
             self.yticklabels = []
-        elif isinstance(yticklabels, str) and yticklabels == 'auto':
-            self.yticks = 'auto'
+        elif isinstance(yticklabels, str) and yticklabels == "auto":
+            self.yticks = "auto"
             self.yticklabels = _index_to_ticklabels(data.index)
         else:
             self.yticks, self.yticklabels = self._skip_ticks(yticklabels,
-                ytickevery)
+                                                             ytickevery)
+
+        # Get good names for the axis labels
         xlabel = _index_to_label(data.columns)
         ylabel = _index_to_label(data.index)
-        self.xlabel = xlabel if xlabel is not None else ''
-        self.ylabel = ylabel if ylabel is not None else ''
-        self._determine_cmap_params(plot_data, vmin, vmax, cmap, center, robust
-            )
+        self.xlabel = xlabel if xlabel is not None else ""
+        self.ylabel = ylabel if ylabel is not None else ""
+
+        # Determine good default values for the colormapping
+        self._determine_cmap_params(plot_data, vmin, vmax,
+                                    cmap, center, robust)
+
+        # Sort out the annotations
         if annot is None or annot is False:
             annot = False
             annot_data = None
@@ -108,44 +173,195 @@ class _HeatMapper:
             else:
                 annot_data = np.asarray(annot)
                 if annot_data.shape != plot_data.shape:
-                    err = '`data` and `annot` must have same shape.'
+                    err = "`data` and `annot` must have same shape."
                     raise ValueError(err)
             annot = True
+
+        # Save other attributes to the object
         self.data = data
         self.plot_data = plot_data
+
         self.annot = annot
         self.annot_data = annot_data
+
         self.fmt = fmt
         self.annot_kws = {} if annot_kws is None else annot_kws.copy()
         self.cbar = cbar
         self.cbar_kws = {} if cbar_kws is None else cbar_kws.copy()

-    def _determine_cmap_params(self, plot_data, vmin, vmax, cmap, center,
-        robust):
+    def _determine_cmap_params(self, plot_data, vmin, vmax,
+                               cmap, center, robust):
         """Use some heuristics to set good defaults for colorbar and range."""
-        pass
+
+        # plot_data is a np.ma.array instance
+        calc_data = plot_data.astype(float).filled(np.nan)
+        if vmin is None:
+            if robust:
+                vmin = np.nanpercentile(calc_data, 2)
+            else:
+                vmin = np.nanmin(calc_data)
+        if vmax is None:
+            if robust:
+                vmax = np.nanpercentile(calc_data, 98)
+            else:
+                vmax = np.nanmax(calc_data)
+        self.vmin, self.vmax = vmin, vmax
+
+        # Choose default colormaps if not provided
+        if cmap is None:
+            if center is None:
+                self.cmap = cm.rocket
+            else:
+                self.cmap = cm.icefire
+        elif isinstance(cmap, str):
+            self.cmap = get_colormap(cmap)
+        elif isinstance(cmap, list):
+            self.cmap = mpl.colors.ListedColormap(cmap)
+        else:
+            self.cmap = cmap
+
+        # Recenter a divergent colormap
+        if center is not None:
+
+            # Copy bad values
+            # in mpl<3.2 only masked values are honored with "bad" color spec
+            # (see https://github.com/matplotlib/matplotlib/pull/14257)
+            bad = self.cmap(np.ma.masked_invalid([np.nan]))[0]
+
+            # under/over values are set for sure when cmap extremes
+            # do not map to the same color as +-inf
+            under = self.cmap(-np.inf)
+            over = self.cmap(np.inf)
+            under_set = under != self.cmap(0)
+            over_set = over != self.cmap(self.cmap.N - 1)
+
+            vrange = max(vmax - center, center - vmin)
+            normlize = mpl.colors.Normalize(center - vrange, center + vrange)
+            cmin, cmax = normlize([vmin, vmax])
+            cc = np.linspace(cmin, cmax, 256)
+            self.cmap = mpl.colors.ListedColormap(self.cmap(cc))
+            self.cmap.set_bad(bad)
+            if under_set:
+                self.cmap.set_under(under)
+            if over_set:
+                self.cmap.set_over(over)

     def _annotate_heatmap(self, ax, mesh):
         """Add textual labels with the value in each cell."""
-        pass
+        mesh.update_scalarmappable()
+        height, width = self.annot_data.shape
+        xpos, ypos = np.meshgrid(np.arange(width) + .5, np.arange(height) + .5)
+        for x, y, m, color, val in zip(xpos.flat, ypos.flat,
+                                       mesh.get_array().flat, mesh.get_facecolors(),
+                                       self.annot_data.flat):
+            if m is not np.ma.masked:
+                lum = relative_luminance(color)
+                text_color = ".15" if lum > .408 else "w"
+                annotation = ("{:" + self.fmt + "}").format(val)
+                text_kwargs = dict(color=text_color, ha="center", va="center")
+                text_kwargs.update(self.annot_kws)
+                ax.text(x, y, annotation, **text_kwargs)

     def _skip_ticks(self, labels, tickevery):
         """Return ticks and labels at evenly spaced intervals."""
-        pass
+        n = len(labels)
+        if tickevery == 0:
+            ticks, labels = [], []
+        elif tickevery == 1:
+            ticks, labels = np.arange(n) + .5, labels
+        else:
+            start, end, step = 0, n, tickevery
+            ticks = np.arange(start, end, step) + .5
+            labels = labels[start:end:step]
+        return ticks, labels

     def _auto_ticks(self, ax, labels, axis):
         """Determine ticks and ticklabels that minimize overlap."""
-        pass
+        transform = ax.figure.dpi_scale_trans.inverted()
+        bbox = ax.get_window_extent().transformed(transform)
+        size = [bbox.width, bbox.height][axis]
+        axis = [ax.xaxis, ax.yaxis][axis]
+        tick, = axis.set_ticks([0])
+        fontsize = tick.label1.get_size()
+        max_ticks = int(size // (fontsize / 72))
+        if max_ticks < 1:
+            return [], []
+        tick_every = len(labels) // max_ticks + 1
+        tick_every = 1 if tick_every == 0 else tick_every
+        ticks, labels = self._skip_ticks(labels, tick_every)
+        return ticks, labels

     def plot(self, ax, cax, kws):
         """Draw the heatmap on the provided Axes."""
-        pass
-
+        # Remove all the Axes spines
+        despine(ax=ax, left=True, bottom=True)
+
+        # setting vmin/vmax in addition to norm is deprecated
+        # so avoid setting if norm is set
+        if kws.get("norm") is None:
+            kws.setdefault("vmin", self.vmin)
+            kws.setdefault("vmax", self.vmax)
+
+        # Draw the heatmap
+        mesh = ax.pcolormesh(self.plot_data, cmap=self.cmap, **kws)
+
+        # Set the axis limits
+        ax.set(xlim=(0, self.data.shape[1]), ylim=(0, self.data.shape[0]))
+
+        # Invert the y axis to show the plot in matrix form
+        ax.invert_yaxis()
+
+        # Possibly add a colorbar
+        if self.cbar:
+            cb = ax.figure.colorbar(mesh, cax, ax, **self.cbar_kws)
+            cb.outline.set_linewidth(0)
+            # If rasterized is passed to pcolormesh, also rasterize the
+            # colorbar to avoid white lines on the PDF rendering
+            if kws.get('rasterized', False):
+                cb.solids.set_rasterized(True)
+
+        # Add row and column labels
+        if isinstance(self.xticks, str) and self.xticks == "auto":
+            xticks, xticklabels = self._auto_ticks(ax, self.xticklabels, 0)
+        else:
+            xticks, xticklabels = self.xticks, self.xticklabels

-def heatmap(data, *, vmin=None, vmax=None, cmap=None, center=None, robust=
-    False, annot=None, fmt='.2g', annot_kws=None, linewidths=0, linecolor=
-    'white', cbar=True, cbar_kws=None, cbar_ax=None, square=False,
-    xticklabels='auto', yticklabels='auto', mask=None, ax=None, **kwargs):
+        if isinstance(self.yticks, str) and self.yticks == "auto":
+            yticks, yticklabels = self._auto_ticks(ax, self.yticklabels, 1)
+        else:
+            yticks, yticklabels = self.yticks, self.yticklabels
+
+        ax.set(xticks=xticks, yticks=yticks)
+        xtl = ax.set_xticklabels(xticklabels)
+        ytl = ax.set_yticklabels(yticklabels, rotation="vertical")
+        plt.setp(ytl, va="center")  # GH2484
+
+        # Possibly rotate them if they overlap
+        _draw_figure(ax.figure)
+
+        if axis_ticklabels_overlap(xtl):
+            plt.setp(xtl, rotation="vertical")
+        if axis_ticklabels_overlap(ytl):
+            plt.setp(ytl, rotation="horizontal")
+
+        # Add the axis labels
+        ax.set(xlabel=self.xlabel, ylabel=self.ylabel)
+
+        # Annotate the cells with the formatted values
+        if self.annot:
+            self._annotate_heatmap(ax, mesh)
+
+
+def heatmap(
+    data, *,
+    vmin=None, vmax=None, cmap=None, center=None, robust=False,
+    annot=None, fmt=".2g", annot_kws=None,
+    linewidths=0, linecolor="white",
+    cbar=True, cbar_kws=None, cbar_ax=None,
+    square=False, xticklabels="auto", yticklabels="auto",
+    mask=None, ax=None,
+    **kwargs
+):
     """Plot rectangular data as a color-encoded matrix.

     This is an Axes-level function and will draw the heatmap into the
@@ -226,7 +442,22 @@ def heatmap(data, *, vmin=None, vmax=None, cmap=None, center=None, robust=
     .. include:: ../docstrings/heatmap.rst

     """
-    pass
+    # Initialize the plotter object
+    plotter = _HeatMapper(data, vmin, vmax, cmap, center, robust, annot, fmt,
+                          annot_kws, cbar, cbar_kws, xticklabels,
+                          yticklabels, mask)
+
+    # Add the pcolormesh kwargs here
+    kwargs["linewidths"] = linewidths
+    kwargs["edgecolor"] = linecolor
+
+    # Draw the plot and return the Axes
+    if ax is None:
+        ax = plt.gca()
+    if square:
+        ax.set_aspect("equal")
+    plotter.plot(ax, cbar_ax, kwargs)
+    return ax


 class _DendrogramPlotter:
@@ -243,25 +474,32 @@ class _DendrogramPlotter:
         self.axis = axis
         if self.axis == 1:
             data = data.T
+
         if isinstance(data, pd.DataFrame):
             array = data.values
         else:
             array = np.asarray(data)
             data = pd.DataFrame(array)
+
         self.array = array
         self.data = data
+
         self.shape = self.data.shape
         self.metric = metric
         self.method = method
         self.axis = axis
         self.label = label
         self.rotate = rotate
+
         if linkage is None:
             self.linkage = self.calculated_linkage
         else:
             self.linkage = linkage
         self.dendrogram = self.calculate_dendrogram()
+
+        # Dendrogram ends are always at multiples of 5, who knows why
         ticks = 10 * np.arange(self.data.shape[0]) + 5
+
         if self.label:
             ticklabels = _index_to_ticklabels(self.data.index)
             ticklabels = [ticklabels[i] for i in self.reordered_ind]
@@ -269,6 +507,7 @@ class _DendrogramPlotter:
                 self.xticks = []
                 self.yticks = ticks
                 self.xticklabels = []
+
                 self.yticklabels = ticklabels
                 self.ylabel = _index_to_label(self.data.index)
                 self.xlabel = ''
@@ -283,9 +522,45 @@ class _DendrogramPlotter:
             self.xticks, self.yticks = [], []
             self.yticklabels, self.xticklabels = [], []
             self.xlabel, self.ylabel = '', ''
+
         self.dependent_coord = self.dendrogram['dcoord']
         self.independent_coord = self.dendrogram['icoord']

+    def _calculate_linkage_scipy(self):
+        linkage = hierarchy.linkage(self.array, method=self.method,
+                                    metric=self.metric)
+        return linkage
+
+    def _calculate_linkage_fastcluster(self):
+        import fastcluster
+        # Fastcluster has a memory-saving vectorized version, but only
+        # with certain linkage methods, and mostly with euclidean metric
+        # vector_methods = ('single', 'centroid', 'median', 'ward')
+        euclidean_methods = ('centroid', 'median', 'ward')
+        euclidean = self.metric == 'euclidean' and self.method in \
+            euclidean_methods
+        if euclidean or self.method == 'single':
+            return fastcluster.linkage_vector(self.array,
+                                              method=self.method,
+                                              metric=self.metric)
+        else:
+            linkage = fastcluster.linkage(self.array, method=self.method,
+                                          metric=self.metric)
+            return linkage
+
+    @property
+    def calculated_linkage(self):
+
+        try:
+            return self._calculate_linkage_fastcluster()
+        except ImportError:
+            if np.prod(self.shape) >= 10000:
+                msg = ("Clustering large matrix with scipy. Installing "
+                       "`fastcluster` may give better performance.")
+                warnings.warn(msg)
+
+        return self._calculate_linkage_scipy()
+
     def calculate_dendrogram(self):
         """Calculates a dendrogram based on the linkage matrix

@@ -299,12 +574,13 @@ class _DendrogramPlotter:
             .dendrogram. The important key-value pairing is
             "reordered_ind" which indicates the re-ordering of the matrix
         """
-        pass
+        return hierarchy.dendrogram(self.linkage, no_plot=True,
+                                    color_threshold=-np.inf)

     @property
     def reordered_ind(self):
         """Indices of the matrix, reordered by the dendrogram"""
-        pass
+        return self.dendrogram['leaves']

     def plot(self, ax, tree_kws):
         """Plots a dendrogram of the similarities between data on the axes
@@ -315,11 +591,59 @@ class _DendrogramPlotter:
             Axes object upon which the dendrogram is plotted

         """
-        pass
+        tree_kws = {} if tree_kws is None else tree_kws.copy()
+        tree_kws.setdefault("linewidths", .5)
+        tree_kws.setdefault("colors", tree_kws.pop("color", (.2, .2, .2)))
+
+        if self.rotate and self.axis == 0:
+            coords = zip(self.dependent_coord, self.independent_coord)
+        else:
+            coords = zip(self.independent_coord, self.dependent_coord)
+        lines = LineCollection([list(zip(x, y)) for x, y in coords],
+                               **tree_kws)
+
+        ax.add_collection(lines)
+        number_of_leaves = len(self.reordered_ind)
+        max_dependent_coord = max(map(max, self.dependent_coord))
+
+        if self.rotate:
+            ax.yaxis.set_ticks_position('right')
+
+            # Constants 10 and 1.05 come from
+            # `scipy.cluster.hierarchy._plot_dendrogram`
+            ax.set_ylim(0, number_of_leaves * 10)
+            ax.set_xlim(0, max_dependent_coord * 1.05)
+
+            ax.invert_xaxis()
+            ax.invert_yaxis()
+        else:
+            # Constants 10 and 1.05 come from
+            # `scipy.cluster.hierarchy._plot_dendrogram`
+            ax.set_xlim(0, number_of_leaves * 10)
+            ax.set_ylim(0, max_dependent_coord * 1.05)
+
+        despine(ax=ax, bottom=True, left=True)
+
+        ax.set(xticks=self.xticks, yticks=self.yticks,
+               xlabel=self.xlabel, ylabel=self.ylabel)
+        xtl = ax.set_xticklabels(self.xticklabels)
+        ytl = ax.set_yticklabels(self.yticklabels, rotation='vertical')
+
+        # Force a draw of the plot to avoid matplotlib window error
+        _draw_figure(ax.figure)

+        if len(ytl) > 0 and axis_ticklabels_overlap(ytl):
+            plt.setp(ytl, rotation="horizontal")
+        if len(xtl) > 0 and axis_ticklabels_overlap(xtl):
+            plt.setp(xtl, rotation="vertical")
+        return self

-def dendrogram(data, *, linkage=None, axis=1, label=True, metric=
-    'euclidean', method='average', rotate=False, tree_kws=None, ax=None):
+
+def dendrogram(
+    data, *,
+    linkage=None, axis=1, label=True, metric='euclidean',
+    method='average', rotate=False, tree_kws=None, ax=None
+):
     """Draw a tree diagram of relationships within a matrix

     Parameters
@@ -357,72 +681,157 @@ def dendrogram(data, *, linkage=None, axis=1, label=True, metric=
     dendrogramplotter.reordered_ind

     """
-    pass
+    if _no_scipy:
+        raise RuntimeError("dendrogram requires scipy to be installed")
+
+    plotter = _DendrogramPlotter(data, linkage=linkage, axis=axis,
+                                 metric=metric, method=method,
+                                 label=label, rotate=rotate)
+    if ax is None:
+        ax = plt.gca()
+
+    return plotter.plot(ax=ax, tree_kws=tree_kws)


 class ClusterGrid(Grid):

-    def __init__(self, data, pivot_kws=None, z_score=None, standard_scale=
-        None, figsize=None, row_colors=None, col_colors=None, mask=None,
-        dendrogram_ratio=None, colors_ratio=None, cbar_pos=None):
+    def __init__(self, data, pivot_kws=None, z_score=None, standard_scale=None,
+                 figsize=None, row_colors=None, col_colors=None, mask=None,
+                 dendrogram_ratio=None, colors_ratio=None, cbar_pos=None):
         """Grid object for organizing clustered heatmap input on to axes"""
         if _no_scipy:
-            raise RuntimeError('ClusterGrid requires scipy to be available')
+            raise RuntimeError("ClusterGrid requires scipy to be available")
+
         if isinstance(data, pd.DataFrame):
             self.data = data
         else:
             self.data = pd.DataFrame(data)
+
         self.data2d = self.format_data(self.data, pivot_kws, z_score,
-            standard_scale)
+                                       standard_scale)
+
         self.mask = _matrix_mask(self.data2d, mask)
+
         self._figure = plt.figure(figsize=figsize)
-        self.row_colors, self.row_color_labels = self._preprocess_colors(data,
-            row_colors, axis=0)
-        self.col_colors, self.col_color_labels = self._preprocess_colors(data,
-            col_colors, axis=1)
+
+        self.row_colors, self.row_color_labels = \
+            self._preprocess_colors(data, row_colors, axis=0)
+        self.col_colors, self.col_color_labels = \
+            self._preprocess_colors(data, col_colors, axis=1)
+
         try:
             row_dendrogram_ratio, col_dendrogram_ratio = dendrogram_ratio
         except TypeError:
             row_dendrogram_ratio = col_dendrogram_ratio = dendrogram_ratio
+
         try:
             row_colors_ratio, col_colors_ratio = colors_ratio
         except TypeError:
             row_colors_ratio = col_colors_ratio = colors_ratio
+
         width_ratios = self.dim_ratios(self.row_colors,
-            row_dendrogram_ratio, row_colors_ratio)
+                                       row_dendrogram_ratio,
+                                       row_colors_ratio)
         height_ratios = self.dim_ratios(self.col_colors,
-            col_dendrogram_ratio, col_colors_ratio)
+                                        col_dendrogram_ratio,
+                                        col_colors_ratio)
+
         nrows = 2 if self.col_colors is None else 3
         ncols = 2 if self.row_colors is None else 3
-        self.gs = gridspec.GridSpec(nrows, ncols, width_ratios=width_ratios,
-            height_ratios=height_ratios)
+
+        self.gs = gridspec.GridSpec(nrows, ncols,
+                                    width_ratios=width_ratios,
+                                    height_ratios=height_ratios)
+
         self.ax_row_dendrogram = self._figure.add_subplot(self.gs[-1, 0])
         self.ax_col_dendrogram = self._figure.add_subplot(self.gs[0, -1])
         self.ax_row_dendrogram.set_axis_off()
         self.ax_col_dendrogram.set_axis_off()
+
         self.ax_row_colors = None
         self.ax_col_colors = None
+
         if self.row_colors is not None:
-            self.ax_row_colors = self._figure.add_subplot(self.gs[-1, 1])
+            self.ax_row_colors = self._figure.add_subplot(
+                self.gs[-1, 1])
         if self.col_colors is not None:
-            self.ax_col_colors = self._figure.add_subplot(self.gs[1, -1])
+            self.ax_col_colors = self._figure.add_subplot(
+                self.gs[1, -1])
+
         self.ax_heatmap = self._figure.add_subplot(self.gs[-1, -1])
         if cbar_pos is None:
             self.ax_cbar = self.cax = None
         else:
+            # Initialize the colorbar axes in the gridspec so that tight_layout
+            # works. We will move it where it belongs later. This is a hack.
             self.ax_cbar = self._figure.add_subplot(self.gs[0, 0])
-            self.cax = self.ax_cbar
+            self.cax = self.ax_cbar  # Backwards compatibility
         self.cbar_pos = cbar_pos
+
         self.dendrogram_row = None
         self.dendrogram_col = None

     def _preprocess_colors(self, data, colors, axis):
         """Preprocess {row/col}_colors to extract labels and convert colors."""
-        pass
-
-    def format_data(self, data, pivot_kws, z_score=None, standard_scale=None):
+        labels = None
+
+        if colors is not None:
+            if isinstance(colors, (pd.DataFrame, pd.Series)):
+
+                # If data is unindexed, raise
+                if (not hasattr(data, "index") and axis == 0) or (
+                    not hasattr(data, "columns") and axis == 1
+                ):
+                    axis_name = "col" if axis else "row"
+                    msg = (f"{axis_name}_colors indices can't be matched with data "
+                           f"indices. Provide {axis_name}_colors as a non-indexed "
+                           "datatype, e.g. by using `.to_numpy()``")
+                    raise TypeError(msg)
+
+                # Ensure colors match data indices
+                if axis == 0:
+                    colors = colors.reindex(data.index)
+                else:
+                    colors = colors.reindex(data.columns)
+
+                # Replace na's with white color
+                # TODO We should set these to transparent instead
+                colors = colors.astype(object).fillna('white')
+
+                # Extract color values and labels from frame/series
+                if isinstance(colors, pd.DataFrame):
+                    labels = list(colors.columns)
+                    colors = colors.T.values
+                else:
+                    if colors.name is None:
+                        labels = [""]
+                    else:
+                        labels = [colors.name]
+                    colors = colors.values
+
+            colors = _convert_colors(colors)
+
+        return colors, labels
+
+    def format_data(self, data, pivot_kws, z_score=None,
+                    standard_scale=None):
         """Extract variables from data or use directly."""
-        pass
+
+        # Either the data is already in 2d matrix format, or need to do a pivot
+        if pivot_kws is not None:
+            data2d = data.pivot(**pivot_kws)
+        else:
+            data2d = data
+
+        if z_score is not None and standard_scale is not None:
+            raise ValueError(
+                'Cannot perform both z-scoring and standard-scaling on data')
+
+        if z_score is not None:
+            data2d = self.z_score(data2d, z_score)
+        if standard_scale is not None:
+            data2d = self.standard_scale(data2d, standard_scale)
+        return data2d

     @staticmethod
     def z_score(data2d, axis=1):
@@ -442,7 +851,17 @@ class ClusterGrid(Grid):
             Noramlized data with a mean of 0 and variance of 1 across the
             specified axis.
         """
-        pass
+        if axis == 1:
+            z_scored = data2d
+        else:
+            z_scored = data2d.T
+
+        z_scored = (z_scored - z_scored.mean()) / z_scored.std()
+
+        if axis == 1:
+            return z_scored
+        else:
+            return z_scored.T

     @staticmethod
     def standard_scale(data2d, axis=1):
@@ -463,11 +882,38 @@ class ClusterGrid(Grid):
             specified axis.

         """
-        pass
+        # Normalize these values to range from 0 to 1
+        if axis == 1:
+            standardized = data2d
+        else:
+            standardized = data2d.T
+
+        subtract = standardized.min()
+        standardized = (standardized - subtract) / (
+            standardized.max() - standardized.min())
+
+        if axis == 1:
+            return standardized
+        else:
+            return standardized.T

     def dim_ratios(self, colors, dendrogram_ratio, colors_ratio):
         """Get the proportions of the figure taken up by each axes."""
-        pass
+        ratios = [dendrogram_ratio]
+
+        if colors is not None:
+            # Colors are encoded as rgb, so there is an extra dimension
+            if np.ndim(colors) > 2:
+                n_colors = len(colors)
+            else:
+                n_colors = 1
+
+            ratios += [n_colors * colors_ratio]
+
+        # Add the ratio for the heatmap itself
+        ratios.append(1 - sum(ratios))
+
+        return ratios

     @staticmethod
     def color_list_to_matrix_and_cmap(colors, ind, axis=0):
@@ -493,7 +939,58 @@ class ClusterGrid(Grid):
         cmap : matplotlib.colors.ListedColormap

         """
-        pass
+        try:
+            mpl.colors.to_rgb(colors[0])
+        except ValueError:
+            # We have a 2D color structure
+            m, n = len(colors), len(colors[0])
+            if not all(len(c) == n for c in colors[1:]):
+                raise ValueError("Multiple side color vectors must have same size")
+        else:
+            # We have one vector of colors
+            m, n = 1, len(colors)
+            colors = [colors]
+
+        # Map from unique colors to colormap index value
+        unique_colors = {}
+        matrix = np.zeros((m, n), int)
+        for i, inner in enumerate(colors):
+            for j, color in enumerate(inner):
+                idx = unique_colors.setdefault(color, len(unique_colors))
+                matrix[i, j] = idx
+
+        # Reorder for clustering and transpose for axis
+        matrix = matrix[:, ind]
+        if axis == 0:
+            matrix = matrix.T
+
+        cmap = mpl.colors.ListedColormap(list(unique_colors))
+        return matrix, cmap
+
+    def plot_dendrograms(self, row_cluster, col_cluster, metric, method,
+                         row_linkage, col_linkage, tree_kws):
+        # Plot the row dendrogram
+        if row_cluster:
+            self.dendrogram_row = dendrogram(
+                self.data2d, metric=metric, method=method, label=False, axis=0,
+                ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage,
+                tree_kws=tree_kws
+            )
+        else:
+            self.ax_row_dendrogram.set_xticks([])
+            self.ax_row_dendrogram.set_yticks([])
+        # PLot the column dendrogram
+        if col_cluster:
+            self.dendrogram_col = dendrogram(
+                self.data2d, metric=metric, method=method, label=False,
+                axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage,
+                tree_kws=tree_kws
+            )
+        else:
+            self.ax_col_dendrogram.set_xticks([])
+            self.ax_col_dendrogram.set_yticks([])
+        despine(ax=self.ax_row_dendrogram, bottom=True, left=True)
+        despine(ax=self.ax_col_dendrogram, bottom=True, left=True)

     def plot_colors(self, xind, yind, **kws):
         """Plots color labels between the dendrogram and the heatmap
@@ -504,15 +1001,159 @@ class ClusterGrid(Grid):
             Keyword arguments heatmap

         """
-        pass
+        # Remove any custom colormap and centering
+        # TODO this code has consistently caused problems when we
+        # have missed kwargs that need to be excluded that it might
+        # be better to rewrite *in*clusively.
+        kws = kws.copy()
+        kws.pop('cmap', None)
+        kws.pop('norm', None)
+        kws.pop('center', None)
+        kws.pop('annot', None)
+        kws.pop('vmin', None)
+        kws.pop('vmax', None)
+        kws.pop('robust', None)
+        kws.pop('xticklabels', None)
+        kws.pop('yticklabels', None)
+
+        # Plot the row colors
+        if self.row_colors is not None:
+            matrix, cmap = self.color_list_to_matrix_and_cmap(
+                self.row_colors, yind, axis=0)
+
+            # Get row_color labels
+            if self.row_color_labels is not None:
+                row_color_labels = self.row_color_labels
+            else:
+                row_color_labels = False
+
+            heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,
+                    xticklabels=row_color_labels, yticklabels=False, **kws)
+
+            # Adjust rotation of labels
+            if row_color_labels is not False:
+                plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)
+        else:
+            despine(self.ax_row_colors, left=True, bottom=True)
+
+        # Plot the column colors
+        if self.col_colors is not None:
+            matrix, cmap = self.color_list_to_matrix_and_cmap(
+                self.col_colors, xind, axis=1)
+
+            # Get col_color labels
+            if self.col_color_labels is not None:
+                col_color_labels = self.col_color_labels
+            else:
+                col_color_labels = False

+            heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,
+                    xticklabels=False, yticklabels=col_color_labels, **kws)

-def clustermap(data, *, pivot_kws=None, method='average', metric=
-    'euclidean', z_score=None, standard_scale=None, figsize=(10, 10),
-    cbar_kws=None, row_cluster=True, col_cluster=True, row_linkage=None,
-    col_linkage=None, row_colors=None, col_colors=None, mask=None,
-    dendrogram_ratio=0.2, colors_ratio=0.03, cbar_pos=(0.02, 0.8, 0.05, 
-    0.18), tree_kws=None, **kwargs):
+            # Adjust rotation of labels, place on right side
+            if col_color_labels is not False:
+                self.ax_col_colors.yaxis.tick_right()
+                plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)
+        else:
+            despine(self.ax_col_colors, left=True, bottom=True)
+
+    def plot_matrix(self, colorbar_kws, xind, yind, **kws):
+        self.data2d = self.data2d.iloc[yind, xind]
+        self.mask = self.mask.iloc[yind, xind]
+
+        # Try to reorganize specified tick labels, if provided
+        xtl = kws.pop("xticklabels", "auto")
+        try:
+            xtl = np.asarray(xtl)[xind]
+        except (TypeError, IndexError):
+            pass
+        ytl = kws.pop("yticklabels", "auto")
+        try:
+            ytl = np.asarray(ytl)[yind]
+        except (TypeError, IndexError):
+            pass
+
+        # Reorganize the annotations to match the heatmap
+        annot = kws.pop("annot", None)
+        if annot is None or annot is False:
+            pass
+        else:
+            if isinstance(annot, bool):
+                annot_data = self.data2d
+            else:
+                annot_data = np.asarray(annot)
+                if annot_data.shape != self.data2d.shape:
+                    err = "`data` and `annot` must have same shape."
+                    raise ValueError(err)
+                annot_data = annot_data[yind][:, xind]
+            annot = annot_data
+
+        # Setting ax_cbar=None in clustermap call implies no colorbar
+        kws.setdefault("cbar", self.ax_cbar is not None)
+        heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,
+                cbar_kws=colorbar_kws, mask=self.mask,
+                xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)
+
+        ytl = self.ax_heatmap.get_yticklabels()
+        ytl_rot = None if not ytl else ytl[0].get_rotation()
+        self.ax_heatmap.yaxis.set_ticks_position('right')
+        self.ax_heatmap.yaxis.set_label_position('right')
+        if ytl_rot is not None:
+            ytl = self.ax_heatmap.get_yticklabels()
+            plt.setp(ytl, rotation=ytl_rot)
+
+        tight_params = dict(h_pad=.02, w_pad=.02)
+        if self.ax_cbar is None:
+            self._figure.tight_layout(**tight_params)
+        else:
+            # Turn the colorbar axes off for tight layout so that its
+            # ticks don't interfere with the rest of the plot layout.
+            # Then move it.
+            self.ax_cbar.set_axis_off()
+            self._figure.tight_layout(**tight_params)
+            self.ax_cbar.set_axis_on()
+            self.ax_cbar.set_position(self.cbar_pos)
+
+    def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,
+             row_linkage, col_linkage, tree_kws, **kws):
+
+        # heatmap square=True sets the aspect ratio on the axes, but that is
+        # not compatible with the multi-axes layout of clustergrid
+        if kws.get("square", False):
+            msg = "``square=True`` ignored in clustermap"
+            warnings.warn(msg)
+            kws.pop("square")
+
+        colorbar_kws = {} if colorbar_kws is None else colorbar_kws
+
+        self.plot_dendrograms(row_cluster, col_cluster, metric, method,
+                              row_linkage=row_linkage, col_linkage=col_linkage,
+                              tree_kws=tree_kws)
+        try:
+            xind = self.dendrogram_col.reordered_ind
+        except AttributeError:
+            xind = np.arange(self.data2d.shape[1])
+        try:
+            yind = self.dendrogram_row.reordered_ind
+        except AttributeError:
+            yind = np.arange(self.data2d.shape[0])
+
+        self.plot_colors(xind, yind, **kws)
+        self.plot_matrix(colorbar_kws, xind, yind, **kws)
+        return self
+
+
+def clustermap(
+    data, *,
+    pivot_kws=None, method='average', metric='euclidean',
+    z_score=None, standard_scale=None, figsize=(10, 10),
+    cbar_kws=None, row_cluster=True, col_cluster=True,
+    row_linkage=None, col_linkage=None,
+    row_colors=None, col_colors=None, mask=None,
+    dendrogram_ratio=.2, colors_ratio=0.03,
+    cbar_pos=(.02, .8, .05, .18), tree_kws=None,
+    **kwargs
+):
     """
     Plot a matrix dataset as a hierarchically-clustered heatmap.

@@ -605,4 +1246,17 @@ def clustermap(data, *, pivot_kws=None, method='average', metric=
     .. include:: ../docstrings/clustermap.rst

     """
-    pass
+    if _no_scipy:
+        raise RuntimeError("clustermap requires scipy to be available")
+
+    plotter = ClusterGrid(data, pivot_kws=pivot_kws, figsize=figsize,
+                          row_colors=row_colors, col_colors=col_colors,
+                          z_score=z_score, standard_scale=standard_scale,
+                          mask=mask, dendrogram_ratio=dendrogram_ratio,
+                          colors_ratio=colors_ratio, cbar_pos=cbar_pos)
+
+    return plotter.plot(metric=metric, method=method,
+                        colorbar_kws=cbar_kws,
+                        row_cluster=row_cluster, col_cluster=col_cluster,
+                        row_linkage=row_linkage, col_linkage=col_linkage,
+                        tree_kws=tree_kws, **kwargs)
diff --git a/seaborn/miscplot.py b/seaborn/miscplot.py
index 3bbc63a0..3fb290c8 100644
--- a/seaborn/miscplot.py
+++ b/seaborn/miscplot.py
@@ -2,7 +2,8 @@ import numpy as np
 import matplotlib as mpl
 import matplotlib.pyplot as plt
 import matplotlib.ticker as ticker
-__all__ = ['palplot', 'dogplot']
+
+__all__ = ["palplot", "dogplot"]


 def palplot(pal, size=1):
@@ -16,9 +17,29 @@ def palplot(pal, size=1):
         scaling factor for size of plot

     """
-    pass
+    n = len(pal)
+    _, ax = plt.subplots(1, 1, figsize=(n * size, size))
+    ax.imshow(np.arange(n).reshape(1, n),
+              cmap=mpl.colors.ListedColormap(list(pal)),
+              interpolation="nearest", aspect="auto")
+    ax.set_xticks(np.arange(n) - .5)
+    ax.set_yticks([-.5, .5])
+    # Ensure nice border between colors
+    ax.set_xticklabels(["" for _ in range(n)])
+    # The proper way to set no ticks
+    ax.yaxis.set_major_locator(ticker.NullLocator())


 def dogplot(*_, **__):
     """Who's a good boy?"""
-    pass
+    from urllib.request import urlopen
+    from io import BytesIO
+
+    url = "https://github.com/mwaskom/seaborn-data/raw/master/png/img{}.png"
+    pic = np.random.randint(2, 7)
+    data = BytesIO(urlopen(url.format(pic)).read())
+    img = plt.imread(data)
+    f, ax = plt.subplots(figsize=(5, 5), dpi=100)
+    f.subplots_adjust(0, 0, 1, 1)
+    ax.imshow(img)
+    ax.set_axis_off()
diff --git a/seaborn/objects.py b/seaborn/objects.py
index 45349c24..123e57f0 100644
--- a/seaborn/objects.py
+++ b/seaborn/objects.py
@@ -26,18 +26,24 @@ See the documentation for other :class:`Plot` methods to learn about the many
 ways that a plot can be enhanced and customized.

 """
-from seaborn._core.plot import Plot
-from seaborn._marks.base import Mark
-from seaborn._marks.area import Area, Band
-from seaborn._marks.bar import Bar, Bars
-from seaborn._marks.dot import Dot, Dots
-from seaborn._marks.line import Dash, Line, Lines, Path, Paths, Range
-from seaborn._marks.text import Text
-from seaborn._stats.base import Stat
-from seaborn._stats.aggregation import Agg, Est
-from seaborn._stats.counting import Count, Hist
-from seaborn._stats.density import KDE
-from seaborn._stats.order import Perc
-from seaborn._stats.regression import PolyFit
-from seaborn._core.moves import Dodge, Jitter, Norm, Shift, Stack, Move
-from seaborn._core.scales import Boolean, Continuous, Nominal, Temporal, Scale
+from seaborn._core.plot import Plot  # noqa: F401
+
+from seaborn._marks.base import Mark  # noqa: F401
+from seaborn._marks.area import Area, Band  # noqa: F401
+from seaborn._marks.bar import Bar, Bars  # noqa: F401
+from seaborn._marks.dot import Dot, Dots  # noqa: F401
+from seaborn._marks.line import Dash, Line, Lines, Path, Paths, Range  # noqa: F401
+from seaborn._marks.text import Text  # noqa: F401
+
+from seaborn._stats.base import Stat  # noqa: F401
+from seaborn._stats.aggregation import Agg, Est  # noqa: F401
+from seaborn._stats.counting import Count, Hist  # noqa: F401
+from seaborn._stats.density import KDE  # noqa: F401
+from seaborn._stats.order import Perc  # noqa: F401
+from seaborn._stats.regression import PolyFit  # noqa: F401
+
+from seaborn._core.moves import Dodge, Jitter, Norm, Shift, Stack, Move  # noqa: F401
+
+from seaborn._core.scales import (  # noqa: F401
+    Boolean, Continuous, Nominal, Temporal, Scale
+)
diff --git a/seaborn/palettes.py b/seaborn/palettes.py
index 6447384e..f7f42984 100644
--- a/seaborn/palettes.py
+++ b/seaborn/palettes.py
@@ -1,35 +1,58 @@
 import colorsys
 from itertools import cycle
+
 import numpy as np
 import matplotlib as mpl
+
 from .external import husl
+
 from .utils import desaturate, get_color_cycle
 from .colors import xkcd_rgb, crayons
 from ._compat import get_colormap
-__all__ = ['color_palette', 'hls_palette', 'husl_palette', 'mpl_palette',
-    'dark_palette', 'light_palette', 'diverging_palette', 'blend_palette',
-    'xkcd_palette', 'crayon_palette', 'cubehelix_palette', 'set_color_codes']
-SEABORN_PALETTES = dict(deep=['#4C72B0', '#DD8452', '#55A868', '#C44E52',
-    '#8172B3', '#937860', '#DA8BC3', '#8C8C8C', '#CCB974', '#64B5CD'],
-    deep6=['#4C72B0', '#55A868', '#C44E52', '#8172B3', '#CCB974', '#64B5CD'
-    ], muted=['#4878D0', '#EE854A', '#6ACC64', '#D65F5F', '#956CB4',
-    '#8C613C', '#DC7EC0', '#797979', '#D5BB67', '#82C6E2'], muted6=[
-    '#4878D0', '#6ACC64', '#D65F5F', '#956CB4', '#D5BB67', '#82C6E2'],
-    pastel=['#A1C9F4', '#FFB482', '#8DE5A1', '#FF9F9B', '#D0BBFF',
-    '#DEBB9B', '#FAB0E4', '#CFCFCF', '#FFFEA3', '#B9F2F0'], pastel6=[
-    '#A1C9F4', '#8DE5A1', '#FF9F9B', '#D0BBFF', '#FFFEA3', '#B9F2F0'],
-    bright=['#023EFF', '#FF7C00', '#1AC938', '#E8000B', '#8B2BE2',
-    '#9F4800', '#F14CC1', '#A3A3A3', '#FFC400', '#00D7FF'], bright6=[
-    '#023EFF', '#1AC938', '#E8000B', '#8B2BE2', '#FFC400', '#00D7FF'], dark
-    =['#001C7F', '#B1400D', '#12711C', '#8C0800', '#591E71', '#592F0D',
-    '#A23582', '#3C3C3C', '#B8850A', '#006374'], dark6=['#001C7F',
-    '#12711C', '#8C0800', '#591E71', '#B8850A', '#006374'], colorblind=[
-    '#0173B2', '#DE8F05', '#029E73', '#D55E00', '#CC78BC', '#CA9161',
-    '#FBAFE4', '#949494', '#ECE133', '#56B4E9'], colorblind6=['#0173B2',
-    '#029E73', '#D55E00', '#CC78BC', '#ECE133', '#56B4E9'])
-MPL_QUAL_PALS = {'tab10': 10, 'tab20': 20, 'tab20b': 20, 'tab20c': 20,
-    'Set1': 9, 'Set2': 8, 'Set3': 12, 'Accent': 8, 'Paired': 12, 'Pastel1':
-    9, 'Pastel2': 8, 'Dark2': 8}
+
+
+__all__ = ["color_palette", "hls_palette", "husl_palette", "mpl_palette",
+           "dark_palette", "light_palette", "diverging_palette",
+           "blend_palette", "xkcd_palette", "crayon_palette",
+           "cubehelix_palette", "set_color_codes"]
+
+
+SEABORN_PALETTES = dict(
+    deep=["#4C72B0", "#DD8452", "#55A868", "#C44E52", "#8172B3",
+          "#937860", "#DA8BC3", "#8C8C8C", "#CCB974", "#64B5CD"],
+    deep6=["#4C72B0", "#55A868", "#C44E52",
+           "#8172B3", "#CCB974", "#64B5CD"],
+    muted=["#4878D0", "#EE854A", "#6ACC64", "#D65F5F", "#956CB4",
+           "#8C613C", "#DC7EC0", "#797979", "#D5BB67", "#82C6E2"],
+    muted6=["#4878D0", "#6ACC64", "#D65F5F",
+            "#956CB4", "#D5BB67", "#82C6E2"],
+    pastel=["#A1C9F4", "#FFB482", "#8DE5A1", "#FF9F9B", "#D0BBFF",
+            "#DEBB9B", "#FAB0E4", "#CFCFCF", "#FFFEA3", "#B9F2F0"],
+    pastel6=["#A1C9F4", "#8DE5A1", "#FF9F9B",
+             "#D0BBFF", "#FFFEA3", "#B9F2F0"],
+    bright=["#023EFF", "#FF7C00", "#1AC938", "#E8000B", "#8B2BE2",
+            "#9F4800", "#F14CC1", "#A3A3A3", "#FFC400", "#00D7FF"],
+    bright6=["#023EFF", "#1AC938", "#E8000B",
+             "#8B2BE2", "#FFC400", "#00D7FF"],
+    dark=["#001C7F", "#B1400D", "#12711C", "#8C0800", "#591E71",
+          "#592F0D", "#A23582", "#3C3C3C", "#B8850A", "#006374"],
+    dark6=["#001C7F", "#12711C", "#8C0800",
+           "#591E71", "#B8850A", "#006374"],
+    colorblind=["#0173B2", "#DE8F05", "#029E73", "#D55E00", "#CC78BC",
+                "#CA9161", "#FBAFE4", "#949494", "#ECE133", "#56B4E9"],
+    colorblind6=["#0173B2", "#029E73", "#D55E00",
+                 "#CC78BC", "#ECE133", "#56B4E9"]
+)
+
+
+MPL_QUAL_PALS = {
+    "tab10": 10, "tab20": 20, "tab20b": 20, "tab20c": 20,
+    "Set1": 9, "Set2": 8, "Set3": 12,
+    "Accent": 8, "Paired": 12,
+    "Pastel1": 9, "Pastel2": 8, "Dark2": 8,
+}
+
+
 QUAL_PALETTE_SIZES = MPL_QUAL_PALS.copy()
 QUAL_PALETTE_SIZES.update({k: len(v) for k, v in SEABORN_PALETTES.items()})
 QUAL_PALETTES = list(QUAL_PALETTE_SIZES.keys())
@@ -37,7 +60,6 @@ QUAL_PALETTES = list(QUAL_PALETTE_SIZES.keys())

 class _ColorPalette(list):
     """Set the color palette in a with statement, otherwise be a list."""
-
     def __enter__(self):
         """Open the context."""
         from .rcmod import set_palette
@@ -52,16 +74,49 @@ class _ColorPalette(list):

     def as_hex(self):
         """Return a color palette with hex codes instead of RGB values."""
-        pass
+        hex = [mpl.colors.rgb2hex(rgb) for rgb in self]
+        return _ColorPalette(hex)

     def _repr_html_(self):
         """Rich display of the color palette in an HTML frontend."""
-        pass
+        s = 55
+        n = len(self)
+        html = f'<svg  width="{n * s}" height="{s}">'
+        for i, c in enumerate(self.as_hex()):
+            html += (
+                f'<rect x="{i * s}" y="0" width="{s}" height="{s}" style="fill:{c};'
+                'stroke-width:2;stroke:rgb(255,255,255)"/>'
+            )
+        html += '</svg>'
+        return html


 def _patch_colormap_display():
     """Simplify the rich display of matplotlib color maps in a notebook."""
-    pass
+    def _repr_png_(self):
+        """Generate a PNG representation of the Colormap."""
+        import io
+        from PIL import Image
+        import numpy as np
+        IMAGE_SIZE = (400, 50)
+        X = np.tile(np.linspace(0, 1, IMAGE_SIZE[0]), (IMAGE_SIZE[1], 1))
+        pixels = self(X, bytes=True)
+        png_bytes = io.BytesIO()
+        Image.fromarray(pixels).save(png_bytes, format='png')
+        return png_bytes.getvalue()
+
+    def _repr_html_(self):
+        """Generate an HTML representation of the Colormap."""
+        import base64
+        png_bytes = self._repr_png_()
+        png_base64 = base64.b64encode(png_bytes).decode('ascii')
+        return ('<img '
+                + 'alt="' + self.name + ' color map" '
+                + 'title="' + self.name + '"'
+                + 'src="data:image/png;base64,' + png_base64 + '">')
+
+    mpl.colors.Colormap._repr_png_ = _repr_png_
+    mpl.colors.Colormap._repr_html_ = _repr_html_


 def color_palette(palette=None, n_colors=None, desat=None, as_cmap=False):
@@ -116,10 +171,91 @@ def color_palette(palette=None, n_colors=None, desat=None, as_cmap=False):
     .. include:: ../docstrings/color_palette.rst

     """
-    pass
-
-
-def hls_palette(n_colors=6, h=0.01, l=0.6, s=0.65, as_cmap=False):
+    if palette is None:
+        palette = get_color_cycle()
+        if n_colors is None:
+            n_colors = len(palette)
+
+    elif not isinstance(palette, str):
+        palette = palette
+        if n_colors is None:
+            n_colors = len(palette)
+    else:
+
+        if n_colors is None:
+            # Use all colors in a qualitative palette or 6 of another kind
+            n_colors = QUAL_PALETTE_SIZES.get(palette, 6)
+
+        if palette in SEABORN_PALETTES:
+            # Named "seaborn variant" of matplotlib default color cycle
+            palette = SEABORN_PALETTES[palette]
+
+        elif palette == "hls":
+            # Evenly spaced colors in cylindrical RGB space
+            palette = hls_palette(n_colors, as_cmap=as_cmap)
+
+        elif palette == "husl":
+            # Evenly spaced colors in cylindrical Lab space
+            palette = husl_palette(n_colors, as_cmap=as_cmap)
+
+        elif palette.lower() == "jet":
+            # Paternalism
+            raise ValueError("No.")
+
+        elif palette.startswith("ch:"):
+            # Cubehelix palette with params specified in string
+            args, kwargs = _parse_cubehelix_args(palette)
+            palette = cubehelix_palette(n_colors, *args, **kwargs, as_cmap=as_cmap)
+
+        elif palette.startswith("light:"):
+            # light palette to color specified in string
+            _, color = palette.split(":")
+            reverse = color.endswith("_r")
+            if reverse:
+                color = color[:-2]
+            palette = light_palette(color, n_colors, reverse=reverse, as_cmap=as_cmap)
+
+        elif palette.startswith("dark:"):
+            # light palette to color specified in string
+            _, color = palette.split(":")
+            reverse = color.endswith("_r")
+            if reverse:
+                color = color[:-2]
+            palette = dark_palette(color, n_colors, reverse=reverse, as_cmap=as_cmap)
+
+        elif palette.startswith("blend:"):
+            # blend palette between colors specified in string
+            _, colors = palette.split(":")
+            colors = colors.split(",")
+            palette = blend_palette(colors, n_colors, as_cmap=as_cmap)
+
+        else:
+            try:
+                # Perhaps a named matplotlib colormap?
+                palette = mpl_palette(palette, n_colors, as_cmap=as_cmap)
+            except (ValueError, KeyError):  # Error class changed in mpl36
+                raise ValueError(f"{palette!r} is not a valid palette name")
+
+    if desat is not None:
+        palette = [desaturate(c, desat) for c in palette]
+
+    if not as_cmap:
+
+        # Always return as many colors as we asked for
+        pal_cycle = cycle(palette)
+        palette = [next(pal_cycle) for _ in range(n_colors)]
+
+        # Always return in r, g, b tuple format
+        try:
+            palette = map(mpl.colors.colorConverter.to_rgb, palette)
+            palette = _ColorPalette(palette)
+        except ValueError:
+            raise ValueError(f"Could not generate a palette for {palette}")
+
+    return palette
+
+
+def hls_palette(n_colors=6, h=.01, l=.6, s=.65, as_cmap=False):  # noqa
     """
     Return hues with constant lightness and saturation in the HLS system.

@@ -160,10 +296,20 @@ def hls_palette(n_colors=6, h=0.01, l=0.6, s=0.65, as_cmap=False):
     .. include:: ../docstrings/hls_palette.rst

     """
-    pass
-
-
-def husl_palette(n_colors=6, h=0.01, s=0.9, l=0.65, as_cmap=False):
+    if as_cmap:
+        n_colors = 256
+    hues = np.linspace(0, 1, int(n_colors) + 1)[:-1]
+    hues += h
+    hues %= 1
+    hues -= hues.astype(int)
+    palette = [colorsys.hls_to_rgb(h_i, l, s) for h_i in hues]
+    if as_cmap:
+        return mpl.colors.ListedColormap(palette, "hls")
+    else:
+        return _ColorPalette(palette)
+
+
+def husl_palette(n_colors=6, h=.01, s=.9, l=.65, as_cmap=False):  # noqa
     """
     Return hues with constant lightness and saturation in the HUSL system.

@@ -202,7 +348,19 @@ def husl_palette(n_colors=6, h=0.01, s=0.9, l=0.65, as_cmap=False):
     .. include:: ../docstrings/husl_palette.rst

     """
-    pass
+    if as_cmap:
+        n_colors = 256
+    hues = np.linspace(0, 1, int(n_colors) + 1)[:-1]
+    hues += h
+    hues %= 1
+    hues *= 359
+    s *= 99
+    l *= 99  # noqa
+    palette = [_color_to_rgb((h_i, s, l), input="husl") for h_i in hues]
+    if as_cmap:
+        return mpl.colors.ListedColormap(palette, "hsl")
+    else:
+        return _ColorPalette(palette)


 def mpl_palette(name, n_colors=6, as_cmap=False):
@@ -233,15 +391,46 @@ def mpl_palette(name, n_colors=6, as_cmap=False):
     .. include:: ../docstrings/mpl_palette.rst

     """
-    pass
+    if name.endswith("_d"):
+        sub_name = name[:-2]
+        if sub_name.endswith("_r"):
+            reverse = True
+            sub_name = sub_name[:-2]
+        else:
+            reverse = False
+        pal = color_palette(sub_name, 2) + ["#333333"]
+        if reverse:
+            pal = pal[::-1]
+        cmap = blend_palette(pal, n_colors, as_cmap=True)
+    else:
+        cmap = get_colormap(name)
+
+    if name in MPL_QUAL_PALS:
+        bins = np.linspace(0, 1, MPL_QUAL_PALS[name])[:n_colors]
+    else:
+        bins = np.linspace(0, 1, int(n_colors) + 2)[1:-1]
+    palette = list(map(tuple, cmap(bins)[:, :3]))
+
+    if as_cmap:
+        return cmap
+    else:
+        return _ColorPalette(palette)


 def _color_to_rgb(color, input):
     """Add some more flexibility to color choices."""
-    pass
+    if input == "hls":
+        color = colorsys.hls_to_rgb(*color)
+    elif input == "husl":
+        color = husl.husl_to_rgb(*color)
+        color = tuple(np.clip(color, 0, 1))
+    elif input == "xkcd":
+        color = xkcd_rgb[color]

+    return mpl.colors.to_rgb(color)

-def dark_palette(color, n_colors=6, reverse=False, as_cmap=False, input='rgb'):
+
+def dark_palette(color, n_colors=6, reverse=False, as_cmap=False, input="rgb"):
     """Make a sequential palette that blends from dark to ``color``.

     This kind of palette is good for data that range between relatively
@@ -284,11 +473,15 @@ def dark_palette(color, n_colors=6, reverse=False, as_cmap=False, input='rgb'):
     .. include:: ../docstrings/dark_palette.rst

     """
-    pass
+    rgb = _color_to_rgb(color, input)
+    hue, sat, _ = husl.rgb_to_husl(*rgb)
+    gray_s, gray_l = .15 * sat, 15
+    gray = _color_to_rgb((hue, gray_s, gray_l), input="husl")
+    colors = [rgb, gray] if reverse else [gray, rgb]
+    return blend_palette(colors, n_colors, as_cmap)


-def light_palette(color, n_colors=6, reverse=False, as_cmap=False, input='rgb'
-    ):
+def light_palette(color, n_colors=6, reverse=False, as_cmap=False, input="rgb"):
     """Make a sequential palette that blends from light to ``color``.

     The ``color`` parameter can be specified in a number of ways, including
@@ -328,11 +521,16 @@ def light_palette(color, n_colors=6, reverse=False, as_cmap=False, input='rgb'
     .. include:: ../docstrings/light_palette.rst

     """
-    pass
+    rgb = _color_to_rgb(color, input)
+    hue, sat, _ = husl.rgb_to_husl(*rgb)
+    gray_s, gray_l = .15 * sat, 95
+    gray = _color_to_rgb((hue, gray_s, gray_l), input="husl")
+    colors = [rgb, gray] if reverse else [gray, rgb]
+    return blend_palette(colors, n_colors, as_cmap)


-def diverging_palette(h_neg, h_pos, s=75, l=50, sep=1, n=6, center='light',
-    as_cmap=False):
+def diverging_palette(h_neg, h_pos, s=75, l=50, sep=1, n=6,  # noqa
+                      center="light", as_cmap=False):
     """Make a diverging palette between two HUSL colors.

     If you are using the IPython notebook, you can also choose this palette
@@ -370,10 +568,17 @@ def diverging_palette(h_neg, h_pos, s=75, l=50, sep=1, n=6, center='light',
     .. include: ../docstrings/diverging_palette.rst

     """
-    pass
+    palfunc = dict(dark=dark_palette, light=light_palette)[center]
+    n_half = int(128 - (sep // 2))
+    neg = palfunc((h_neg, s, l), n_half, reverse=True, input="husl")
+    pos = palfunc((h_pos, s, l), n_half, input="husl")
+    midpoint = dict(light=[(.95, .95, .95)], dark=[(.133, .133, .133)])[center]
+    mid = midpoint * sep
+    pal = blend_palette(np.concatenate([neg, mid, pos]), n, as_cmap=as_cmap)
+    return pal


-def blend_palette(colors, n_colors=6, as_cmap=False, input='rgb'):
+def blend_palette(colors, n_colors=6, as_cmap=False, input="rgb"):
     """Make a palette that blends between a list of colors.

     Parameters
@@ -395,7 +600,13 @@ def blend_palette(colors, n_colors=6, as_cmap=False, input='rgb'):
     .. include: ../docstrings/blend_palette.rst

     """
-    pass
+    colors = [_color_to_rgb(color, input) for color in colors]
+    name = "blend"
+    pal = mpl.colors.LinearSegmentedColormap.from_list(name, colors)
+    if not as_cmap:
+        rgb_array = pal(np.linspace(0, 1, int(n_colors)))[:, :3]  # no alpha
+        pal = _ColorPalette(map(tuple, rgb_array))
+    return pal


 def xkcd_palette(colors):
@@ -420,7 +631,8 @@ def xkcd_palette(colors):
     crayon_palette : Make a palette with Crayola crayon colors.

     """
-    pass
+    palette = [xkcd_rgb[name] for name in colors]
+    return color_palette(palette, len(palette))


 def crayon_palette(colors):
@@ -446,11 +658,12 @@ def crayon_palette(colors):
     xkcd_palette : Make a palette with named colors from the XKCD color survey.

     """
-    pass
+    palette = [crayons[name] for name in colors]
+    return color_palette(palette, len(palette))


-def cubehelix_palette(n_colors=6, start=0, rot=0.4, gamma=1.0, hue=0.8,
-    light=0.85, dark=0.15, reverse=False, as_cmap=False):
+def cubehelix_palette(n_colors=6, start=0, rot=.4, gamma=1.0, hue=0.8,
+                      light=.85, dark=.15, reverse=False, as_cmap=False):
     """Make a sequential palette from the cubehelix system.

     This produces a colormap with linearly-decreasing (or increasing)
@@ -508,15 +721,82 @@ def cubehelix_palette(n_colors=6, start=0, rot=0.4, gamma=1.0, hue=0.8,
     .. include:: ../docstrings/cubehelix_palette.rst

     """
-    pass
+    def get_color_function(p0, p1):
+        # Copied from matplotlib because it lives in private module
+        def color(x):
+            # Apply gamma factor to emphasise low or high intensity values
+            xg = x ** gamma
+
+            # Calculate amplitude and angle of deviation from the black
+            # to white diagonal in the plane of constant
+            # perceived intensity.
+            a = hue * xg * (1 - xg) / 2
+
+            phi = 2 * np.pi * (start / 3 + rot * x)
+
+            return xg + a * (p0 * np.cos(phi) + p1 * np.sin(phi))
+        return color
+
+    cdict = {
+        "red": get_color_function(-0.14861, 1.78277),
+        "green": get_color_function(-0.29227, -0.90649),
+        "blue": get_color_function(1.97294, 0.0),
+    }
+
+    cmap = mpl.colors.LinearSegmentedColormap("cubehelix", cdict)
+
+    x = np.linspace(light, dark, int(n_colors))
+    pal = cmap(x)[:, :3].tolist()
+    if reverse:
+        pal = pal[::-1]
+
+    if as_cmap:
+        x_256 = np.linspace(light, dark, 256)
+        if reverse:
+            x_256 = x_256[::-1]
+        pal_256 = cmap(x_256)
+        cmap = mpl.colors.ListedColormap(pal_256, "seaborn_cubehelix")
+        return cmap
+    else:
+        return _ColorPalette(pal)


 def _parse_cubehelix_args(argstr):
     """Turn stringified cubehelix params into args/kwargs."""
-    pass
+
+    if argstr.startswith("ch:"):
+        argstr = argstr[3:]
+
+    if argstr.endswith("_r"):
+        reverse = True
+        argstr = argstr[:-2]
+    else:
+        reverse = False
+
+    if not argstr:
+        return [], {"reverse": reverse}
+
+    all_args = argstr.split(",")
+
+    args = [float(a.strip(" ")) for a in all_args if "=" not in a]
+
+    kwargs = [a.split("=") for a in all_args if "=" in a]
+    kwargs = {k.strip(" "): float(v.strip(" ")) for k, v in kwargs}
+
+    kwarg_map = dict(
+        s="start", r="rot", g="gamma",
+        h="hue", l="light", d="dark",  # noqa: E741
+    )
+
+    kwargs = {kwarg_map.get(k, k): v for k, v in kwargs.items()}
+
+    if reverse:
+        kwargs["reverse"] = True
+
+    return args, kwargs


-def set_color_codes(palette='deep'):
+def set_color_codes(palette="deep"):
     """Change how matplotlib color shorthands are interpreted.

     Calling this will change how shorthand codes like "b" or "g"
@@ -535,4 +815,27 @@ def set_color_codes(palette='deep'):
                   sets the matplotlib color cycle.

     """
-    pass
+    if palette == "reset":
+        colors = [
+            (0., 0., 1.),
+            (0., .5, 0.),
+            (1., 0., 0.),
+            (.75, 0., .75),
+            (.75, .75, 0.),
+            (0., .75, .75),
+            (0., 0., 0.)
+        ]
+    elif not isinstance(palette, str):
+        err = "set_color_codes requires a named seaborn palette"
+        raise TypeError(err)
+    elif palette in SEABORN_PALETTES:
+        if not palette.endswith("6"):
+            palette = palette + "6"
+        colors = SEABORN_PALETTES[palette] + [(.1, .1, .1)]
+    else:
+        err = f"Cannot set colors with palette '{palette}'"
+        raise ValueError(err)
+
+    for code, color in zip("bgrmyck", colors):
+        rgb = mpl.colors.colorConverter.to_rgb(color)
+        mpl.colors.colorConverter.colors[code] = rgb
diff --git a/seaborn/rcmod.py b/seaborn/rcmod.py
index 28795aea..de238323 100644
--- a/seaborn/rcmod.py
+++ b/seaborn/rcmod.py
@@ -3,27 +3,84 @@ import functools
 import matplotlib as mpl
 from cycler import cycler
 from . import palettes
-__all__ = ['set_theme', 'set', 'reset_defaults', 'reset_orig', 'axes_style',
-    'set_style', 'plotting_context', 'set_context', 'set_palette']
-_style_keys = ['axes.facecolor', 'axes.edgecolor', 'axes.grid',
-    'axes.axisbelow', 'axes.labelcolor', 'figure.facecolor', 'grid.color',
-    'grid.linestyle', 'text.color', 'xtick.color', 'ytick.color',
-    'xtick.direction', 'ytick.direction', 'lines.solid_capstyle',
-    'patch.edgecolor', 'patch.force_edgecolor', 'image.cmap', 'font.family',
-    'font.sans-serif', 'xtick.bottom', 'xtick.top', 'ytick.left',
-    'ytick.right', 'axes.spines.left', 'axes.spines.bottom',
-    'axes.spines.right', 'axes.spines.top']
-_context_keys = ['font.size', 'axes.labelsize', 'axes.titlesize',
-    'xtick.labelsize', 'ytick.labelsize', 'legend.fontsize',
-    'legend.title_fontsize', 'axes.linewidth', 'grid.linewidth',
-    'lines.linewidth', 'lines.markersize', 'patch.linewidth',
-    'xtick.major.width', 'ytick.major.width', 'xtick.minor.width',
-    'ytick.minor.width', 'xtick.major.size', 'ytick.major.size',
-    'xtick.minor.size', 'ytick.minor.size']
-
-
-def set_theme(context='notebook', style='darkgrid', palette='deep', font=
-    'sans-serif', font_scale=1, color_codes=True, rc=None):
+
+
+__all__ = ["set_theme", "set", "reset_defaults", "reset_orig",
+           "axes_style", "set_style", "plotting_context", "set_context",
+           "set_palette"]
+
+
+_style_keys = [
+
+    "axes.facecolor",
+    "axes.edgecolor",
+    "axes.grid",
+    "axes.axisbelow",
+    "axes.labelcolor",
+
+    "figure.facecolor",
+
+    "grid.color",
+    "grid.linestyle",
+
+    "text.color",
+
+    "xtick.color",
+    "ytick.color",
+    "xtick.direction",
+    "ytick.direction",
+    "lines.solid_capstyle",
+
+    "patch.edgecolor",
+    "patch.force_edgecolor",
+
+    "image.cmap",
+    "font.family",
+    "font.sans-serif",
+
+    "xtick.bottom",
+    "xtick.top",
+    "ytick.left",
+    "ytick.right",
+
+    "axes.spines.left",
+    "axes.spines.bottom",
+    "axes.spines.right",
+    "axes.spines.top",
+
+]
+
+_context_keys = [
+
+    "font.size",
+    "axes.labelsize",
+    "axes.titlesize",
+    "xtick.labelsize",
+    "ytick.labelsize",
+    "legend.fontsize",
+    "legend.title_fontsize",
+
+    "axes.linewidth",
+    "grid.linewidth",
+    "lines.linewidth",
+    "lines.markersize",
+    "patch.linewidth",
+
+    "xtick.major.width",
+    "ytick.major.width",
+    "xtick.minor.width",
+    "ytick.minor.width",
+
+    "xtick.major.size",
+    "ytick.major.size",
+    "xtick.minor.size",
+    "ytick.minor.size",
+
+]
+
+
+def set_theme(context="notebook", style="darkgrid", palette="deep",
+              font="sans-serif", font_scale=1, color_codes=True, rc=None):
     """
     Set aspects of the visual theme for all matplotlib and seaborn plots.

@@ -59,7 +116,11 @@ def set_theme(context='notebook', style='darkgrid', palette='deep', font=
     .. include:: ../docstrings/set_theme.rst

     """
-    pass
+    set_context(context, font_scale)
+    set_style(style, rc={"font.family": font})
+    set_palette(palette, color_codes=color_codes)
+    if rc is not None:
+        mpl.rcParams.update(rc)


 def set(*args, **kwargs):
@@ -68,17 +129,18 @@ def set(*args, **kwargs):

     This function may be removed in the future.
     """
-    pass
+    set_theme(*args, **kwargs)


 def reset_defaults():
     """Restore all RC params to default settings."""
-    pass
+    mpl.rcParams.update(mpl.rcParamsDefault)


 def reset_orig():
     """Restore all RC params to original settings (respects custom rc)."""
-    pass
+    from . import _orig_rc_params
+    mpl.rcParams.update(_orig_rc_params)


 def axes_style(style=None, rc=None):
@@ -111,7 +173,131 @@ def axes_style(style=None, rc=None):
     .. include:: ../docstrings/axes_style.rst

     """
-    pass
+    if style is None:
+        style_dict = {k: mpl.rcParams[k] for k in _style_keys}
+
+    elif isinstance(style, dict):
+        style_dict = style
+
+    else:
+        styles = ["white", "dark", "whitegrid", "darkgrid", "ticks"]
+        if style not in styles:
+            raise ValueError(f"style must be one of {', '.join(styles)}")
+
+        # Define colors here
+        dark_gray = ".15"
+        light_gray = ".8"
+
+        # Common parameters
+        style_dict = {
+
+            "figure.facecolor": "white",
+            "axes.labelcolor": dark_gray,
+
+            "xtick.direction": "out",
+            "ytick.direction": "out",
+            "xtick.color": dark_gray,
+            "ytick.color": dark_gray,
+
+            "axes.axisbelow": True,
+            "grid.linestyle": "-",
+
+
+            "text.color": dark_gray,
+            "font.family": ["sans-serif"],
+            "font.sans-serif": ["Arial", "DejaVu Sans", "Liberation Sans",
+                                "Bitstream Vera Sans", "sans-serif"],
+
+
+            "lines.solid_capstyle": "round",
+            "patch.edgecolor": "w",
+            "patch.force_edgecolor": True,
+
+            "image.cmap": "rocket",
+
+            "xtick.top": False,
+            "ytick.right": False,
+
+        }
+
+        # Set grid on or off
+        if "grid" in style:
+            style_dict.update({
+                "axes.grid": True,
+            })
+        else:
+            style_dict.update({
+                "axes.grid": False,
+            })
+
+        # Set the color of the background, spines, and grids
+        if style.startswith("dark"):
+            style_dict.update({
+
+                "axes.facecolor": "#EAEAF2",
+                "axes.edgecolor": "white",
+                "grid.color": "white",
+
+                "axes.spines.left": True,
+                "axes.spines.bottom": True,
+                "axes.spines.right": True,
+                "axes.spines.top": True,
+
+            })
+
+        elif style == "whitegrid":
+            style_dict.update({
+
+                "axes.facecolor": "white",
+                "axes.edgecolor": light_gray,
+                "grid.color": light_gray,
+
+                "axes.spines.left": True,
+                "axes.spines.bottom": True,
+                "axes.spines.right": True,
+                "axes.spines.top": True,
+
+            })
+
+        elif style in ["white", "ticks"]:
+            style_dict.update({
+
+                "axes.facecolor": "white",
+                "axes.edgecolor": dark_gray,
+                "grid.color": light_gray,
+
+                "axes.spines.left": True,
+                "axes.spines.bottom": True,
+                "axes.spines.right": True,
+                "axes.spines.top": True,
+
+            })
+
+        # Show or hide the axes ticks
+        if style == "ticks":
+            style_dict.update({
+                "xtick.bottom": True,
+                "ytick.left": True,
+            })
+        else:
+            style_dict.update({
+                "xtick.bottom": False,
+                "ytick.left": False,
+            })
+
+    # Remove entries that are not defined in the base list of valid keys
+    # This lets us handle matplotlib <=/> 2.0
+    style_dict = {k: v for k, v in style_dict.items() if k in _style_keys}
+
+    # Override these settings with the provided rc dictionary
+    if rc is not None:
+        rc = {k: v for k, v in rc.items() if k in _style_keys}
+        style_dict.update(rc)
+
+    # Wrap in an _AxesStyle object so this can be used in a with statement
+    style_object = _AxesStyle(style_dict)
+
+    return style_object


 def set_style(style=None, rc=None):
@@ -142,7 +328,8 @@ def set_style(style=None, rc=None):
     .. include:: ../docstrings/set_style.rst

     """
-    pass
+    style_object = axes_style(style, rc)
+    mpl.rcParams.update(style_object)


 def plotting_context(context=None, font_scale=1, rc=None):
@@ -179,7 +366,70 @@ def plotting_context(context=None, font_scale=1, rc=None):
     .. include:: ../docstrings/plotting_context.rst

     """
-    pass
+    if context is None:
+        context_dict = {k: mpl.rcParams[k] for k in _context_keys}
+
+    elif isinstance(context, dict):
+        context_dict = context
+
+    else:
+
+        contexts = ["paper", "notebook", "talk", "poster"]
+        if context not in contexts:
+            raise ValueError(f"context must be in {', '.join(contexts)}")
+
+        # Set up dictionary of default parameters
+        texts_base_context = {
+
+            "font.size": 12,
+            "axes.labelsize": 12,
+            "axes.titlesize": 12,
+            "xtick.labelsize": 11,
+            "ytick.labelsize": 11,
+            "legend.fontsize": 11,
+            "legend.title_fontsize": 12,
+
+        }
+
+        base_context = {
+
+            "axes.linewidth": 1.25,
+            "grid.linewidth": 1,
+            "lines.linewidth": 1.5,
+            "lines.markersize": 6,
+            "patch.linewidth": 1,
+
+            "xtick.major.width": 1.25,
+            "ytick.major.width": 1.25,
+            "xtick.minor.width": 1,
+            "ytick.minor.width": 1,
+
+            "xtick.major.size": 6,
+            "ytick.major.size": 6,
+            "xtick.minor.size": 4,
+            "ytick.minor.size": 4,
+
+        }
+        base_context.update(texts_base_context)
+
+        # Scale all the parameters by the same factor depending on the context
+        scaling = dict(paper=.8, notebook=1, talk=1.5, poster=2)[context]
+        context_dict = {k: v * scaling for k, v in base_context.items()}
+
+        # Now independently scale the fonts
+        font_keys = texts_base_context.keys()
+        font_dict = {k: context_dict[k] * font_scale for k in font_keys}
+        context_dict.update(font_dict)
+
+    # Override these settings with the provided rc dictionary
+    if rc is not None:
+        rc = {k: v for k, v in rc.items() if k in _context_keys}
+        context_dict.update(rc)
+
+    # Wrap in a _PlottingContext object so this can be used in a with statement
+    context_object = _PlottingContext(context_dict)
+
+    return context_object


 def set_context(context=None, font_scale=1, rc=None):
@@ -215,11 +465,11 @@ def set_context(context=None, font_scale=1, rc=None):
     .. include:: ../docstrings/set_context.rst

     """
-    pass
+    context_object = plotting_context(context, font_scale, rc)
+    mpl.rcParams.update(context_object)


 class _RCAesthetics(dict):
-
     def __enter__(self):
         rc = mpl.rcParams
         self._orig = {k: rc[k] for k in self._keys}
@@ -229,7 +479,6 @@ class _RCAesthetics(dict):
         self._set(self._orig)

     def __call__(self, func):
-
         @functools.wraps(func)
         def wrapper(*args, **kwargs):
             with self:
@@ -274,4 +523,11 @@ def set_palette(palette, n_colors=None, desat=None, color_codes=False):
     set_style : set the default parameters for figure style

     """
-    pass
+    colors = palettes.color_palette(palette, n_colors, desat)
+    cyl = cycler('color', colors)
+    mpl.rcParams['axes.prop_cycle'] = cyl
+    if color_codes:
+        try:
+            palettes.set_color_codes(palette)
+        except (ValueError, TypeError):
+            pass
diff --git a/seaborn/regression.py b/seaborn/regression.py
index 9a41fe79..5e5503a4 100644
--- a/seaborn/regression.py
+++ b/seaborn/regression.py
@@ -6,16 +6,20 @@ import numpy as np
 import pandas as pd
 import matplotlib as mpl
 import matplotlib.pyplot as plt
+
 try:
     import statsmodels
     assert statsmodels
     _has_statsmodels = True
 except ImportError:
     _has_statsmodels = False
+
 from . import utils
 from . import algorithms as algo
 from .axisgrid import FacetGrid, _facet_docs
-__all__ = ['lmplot', 'regplot', 'residplot']
+
+
+__all__ = ["lmplot", "regplot", "residplot"]


 class _LinearPlotter:
@@ -25,14 +29,42 @@ class _LinearPlotter:
     code that can be abstracted out should be put here.

     """
-
     def establish_variables(self, data, **kws):
         """Extract variables from data or use directly."""
-        pass
+        self.data = data
+
+        # Validate the inputs
+        any_strings = any([isinstance(v, str) for v in kws.values()])
+        if any_strings and data is None:
+            raise ValueError("Must pass `data` if using named variables.")
+
+        # Set the variables
+        for var, val in kws.items():
+            if isinstance(val, str):
+                vector = data[val]
+            elif isinstance(val, list):
+                vector = np.asarray(val)
+            else:
+                vector = val
+            if vector is not None and vector.shape != (1,):
+                vector = np.squeeze(vector)
+            if np.ndim(vector) > 1:
+                err = "regplot inputs must be 1d"
+                raise ValueError(err)
+            setattr(self, var, vector)

     def dropna(self, *vars):
         """Remove observations with missing data."""
-        pass
+        vals = [getattr(self, var) for var in vars]
+        vals = [v for v in vals if v is not None]
+        not_na = np.all(np.column_stack([pd.notnull(v) for v in vals]), axis=1)
+        for var in vars:
+            val = getattr(self, var)
+            if val is not None:
+                setattr(self, var, val[not_na])
+
+    def plot(self, ax):
+        raise NotImplementedError


 class _RegressionPlotter(_LinearPlotter):
@@ -41,15 +73,17 @@ class _RegressionPlotter(_LinearPlotter):
     This does the computations and drawing for the `regplot` function, and
     is thus also used indirectly by `lmplot`.
     """
-
-    def __init__(self, x, y, data=None, x_estimator=None, x_bins=None, x_ci
-        ='ci', scatter=True, fit_reg=True, ci=95, n_boot=1000, units=None,
-        seed=None, order=1, logistic=False, lowess=False, robust=False,
-        logx=False, x_partial=None, y_partial=None, truncate=False, dropna=
-        True, x_jitter=None, y_jitter=None, color=None, label=None):
+    def __init__(self, x, y, data=None, x_estimator=None, x_bins=None,
+                 x_ci="ci", scatter=True, fit_reg=True, ci=95, n_boot=1000,
+                 units=None, seed=None, order=1, logistic=False, lowess=False,
+                 robust=False, logx=False, x_partial=None, y_partial=None,
+                 truncate=False, dropna=True, x_jitter=None, y_jitter=None,
+                 color=None, label=None):
+
+        # Set member attributes
         self.x_estimator = x_estimator
         self.ci = ci
-        self.x_ci = ci if x_ci == 'ci' else x_ci
+        self.x_ci = ci if x_ci == "ci" else x_ci
         self.n_boot = n_boot
         self.seed = seed
         self.scatter = scatter
@@ -64,196 +98,567 @@ class _RegressionPlotter(_LinearPlotter):
         self.y_jitter = y_jitter
         self.color = color
         self.label = label
+
+        # Validate the regression options:
         if sum((order > 1, logistic, robust, lowess, logx)) > 1:
-            raise ValueError('Mutually exclusive regression options.')
-        self.establish_variables(data, x=x, y=y, units=units, x_partial=
-            x_partial, y_partial=y_partial)
+            raise ValueError("Mutually exclusive regression options.")
+
+        # Extract the data vals from the arguments or passed dataframe
+        self.establish_variables(data, x=x, y=y, units=units,
+                                 x_partial=x_partial, y_partial=y_partial)
+
+        # Drop null observations
         if dropna:
-            self.dropna('x', 'y', 'units', 'x_partial', 'y_partial')
+            self.dropna("x", "y", "units", "x_partial", "y_partial")
+
+        # Regress nuisance variables out of the data
         if self.x_partial is not None:
             self.x = self.regress_out(self.x, self.x_partial)
         if self.y_partial is not None:
             self.y = self.regress_out(self.y, self.y_partial)
+
+        # Possibly bin the predictor variable, which implies a point estimate
         if x_bins is not None:
             self.x_estimator = np.mean if x_estimator is None else x_estimator
             x_discrete, x_bins = self.bin_predictor(x_bins)
             self.x_discrete = x_discrete
         else:
             self.x_discrete = self.x
+
+        # Disable regression in case of singleton inputs
         if len(self.x) <= 1:
             self.fit_reg = False
+
+        # Save the range of the x variable for the grid later
         if self.fit_reg:
             self.x_range = self.x.min(), self.x.max()

     @property
     def scatter_data(self):
         """Data where each observation is a point."""
-        pass
+        x_j = self.x_jitter
+        if x_j is None:
+            x = self.x
+        else:
+            x = self.x + np.random.uniform(-x_j, x_j, len(self.x))
+
+        y_j = self.y_jitter
+        if y_j is None:
+            y = self.y
+        else:
+            y = self.y + np.random.uniform(-y_j, y_j, len(self.y))
+
+        return x, y

     @property
     def estimate_data(self):
         """Data with a point estimate and CI for each discrete x value."""
-        pass
+        x, y = self.x_discrete, self.y
+        vals = sorted(np.unique(x))
+        points, cis = [], []
+
+        for val in vals:
+
+            # Get the point estimate of the y variable
+            _y = y[x == val]
+            est = self.x_estimator(_y)
+            points.append(est)
+
+            # Compute the confidence interval for this estimate
+            if self.x_ci is None:
+                cis.append(None)
+            else:
+                units = None
+                if self.x_ci == "sd":
+                    sd = np.std(_y)
+                    _ci = est - sd, est + sd
+                else:
+                    if self.units is not None:
+                        units = self.units[x == val]
+                    boots = algo.bootstrap(_y,
+                                           func=self.x_estimator,
+                                           n_boot=self.n_boot,
+                                           units=units,
+                                           seed=self.seed)
+                    _ci = utils.ci(boots, self.x_ci)
+                cis.append(_ci)
+
+        return vals, points, cis

     def _check_statsmodels(self):
         """Check whether statsmodels is installed if any boolean options require it."""
-        pass
+        options = "logistic", "robust", "lowess"
+        err = "`{}=True` requires statsmodels, an optional dependency, to be installed."
+        for option in options:
+            if getattr(self, option) and not _has_statsmodels:
+                raise RuntimeError(err.format(option))

     def fit_regression(self, ax=None, x_range=None, grid=None):
         """Fit the regression model."""
-        pass
+        self._check_statsmodels()
+
+        # Create the grid for the regression
+        if grid is None:
+            if self.truncate:
+                x_min, x_max = self.x_range
+            else:
+                if ax is None:
+                    x_min, x_max = x_range
+                else:
+                    x_min, x_max = ax.get_xlim()
+            grid = np.linspace(x_min, x_max, 100)
+        ci = self.ci
+
+        # Fit the regression
+        if self.order > 1:
+            yhat, yhat_boots = self.fit_poly(grid, self.order)
+        elif self.logistic:
+            from statsmodels.genmod.generalized_linear_model import GLM
+            from statsmodels.genmod.families import Binomial
+            yhat, yhat_boots = self.fit_statsmodels(grid, GLM,
+                                                    family=Binomial())
+        elif self.lowess:
+            ci = None
+            grid, yhat = self.fit_lowess()
+        elif self.robust:
+            from statsmodels.robust.robust_linear_model import RLM
+            yhat, yhat_boots = self.fit_statsmodels(grid, RLM)
+        elif self.logx:
+            yhat, yhat_boots = self.fit_logx(grid)
+        else:
+            yhat, yhat_boots = self.fit_fast(grid)
+
+        # Compute the confidence interval at each grid point
+        if ci is None:
+            err_bands = None
+        else:
+            err_bands = utils.ci(yhat_boots, ci, axis=0)
+
+        return grid, yhat, err_bands

     def fit_fast(self, grid):
         """Low-level regression and prediction using linear algebra."""
-        pass
+        def reg_func(_x, _y):
+            return np.linalg.pinv(_x).dot(_y)
+
+        X, y = np.c_[np.ones(len(self.x)), self.x], self.y
+        grid = np.c_[np.ones(len(grid)), grid]
+        yhat = grid.dot(reg_func(X, y))
+        if self.ci is None:
+            return yhat, None
+
+        beta_boots = algo.bootstrap(X, y,
+                                    func=reg_func,
+                                    n_boot=self.n_boot,
+                                    units=self.units,
+                                    seed=self.seed).T
+        yhat_boots = grid.dot(beta_boots).T
+        return yhat, yhat_boots

     def fit_poly(self, grid, order):
         """Regression using numpy polyfit for higher-order trends."""
-        pass
+        def reg_func(_x, _y):
+            return np.polyval(np.polyfit(_x, _y, order), grid)
+
+        x, y = self.x, self.y
+        yhat = reg_func(x, y)
+        if self.ci is None:
+            return yhat, None
+
+        yhat_boots = algo.bootstrap(x, y,
+                                    func=reg_func,
+                                    n_boot=self.n_boot,
+                                    units=self.units,
+                                    seed=self.seed)
+        return yhat, yhat_boots

     def fit_statsmodels(self, grid, model, **kwargs):
         """More general regression function using statsmodels objects."""
-        pass
+        import statsmodels.tools.sm_exceptions as sme
+        X, y = np.c_[np.ones(len(self.x)), self.x], self.y
+        grid = np.c_[np.ones(len(grid)), grid]
+
+        def reg_func(_x, _y):
+            err_classes = (sme.PerfectSeparationError,)
+            try:
+                with warnings.catch_warnings():
+                    if hasattr(sme, "PerfectSeparationWarning"):
+                        # statsmodels>=0.14.0
+                        warnings.simplefilter("error", sme.PerfectSeparationWarning)
+                        err_classes = (*err_classes, sme.PerfectSeparationWarning)
+                    yhat = model(_y, _x, **kwargs).fit().predict(grid)
+            except err_classes:
+                yhat = np.empty(len(grid))
+                yhat.fill(np.nan)
+            return yhat
+
+        yhat = reg_func(X, y)
+        if self.ci is None:
+            return yhat, None
+
+        yhat_boots = algo.bootstrap(X, y,
+                                    func=reg_func,
+                                    n_boot=self.n_boot,
+                                    units=self.units,
+                                    seed=self.seed)
+        return yhat, yhat_boots

     def fit_lowess(self):
         """Fit a locally-weighted regression, which returns its own grid."""
-        pass
+        from statsmodels.nonparametric.smoothers_lowess import lowess
+        grid, yhat = lowess(self.y, self.x).T
+        return grid, yhat

     def fit_logx(self, grid):
         """Fit the model in log-space."""
-        pass
+        X, y = np.c_[np.ones(len(self.x)), self.x], self.y
+        grid = np.c_[np.ones(len(grid)), np.log(grid)]
+
+        def reg_func(_x, _y):
+            _x = np.c_[_x[:, 0], np.log(_x[:, 1])]
+            return np.linalg.pinv(_x).dot(_y)
+
+        yhat = grid.dot(reg_func(X, y))
+        if self.ci is None:
+            return yhat, None
+
+        beta_boots = algo.bootstrap(X, y,
+                                    func=reg_func,
+                                    n_boot=self.n_boot,
+                                    units=self.units,
+                                    seed=self.seed).T
+        yhat_boots = grid.dot(beta_boots).T
+        return yhat, yhat_boots

     def bin_predictor(self, bins):
         """Discretize a predictor by assigning value to closest bin."""
-        pass
+        x = np.asarray(self.x)
+        if np.isscalar(bins):
+            percentiles = np.linspace(0, 100, bins + 2)[1:-1]
+            bins = np.percentile(x, percentiles)
+        else:
+            bins = np.ravel(bins)
+
+        dist = np.abs(np.subtract.outer(x, bins))
+        x_binned = bins[np.argmin(dist, axis=1)].ravel()
+
+        return x_binned, bins

     def regress_out(self, a, b):
         """Regress b from a keeping a's original mean."""
-        pass
+        a_mean = a.mean()
+        a = a - a_mean
+        b = b - b.mean()
+        b = np.c_[b]
+        a_prime = a - b.dot(np.linalg.pinv(b).dot(a))
+        return np.asarray(a_prime + a_mean).reshape(a.shape)

     def plot(self, ax, scatter_kws, line_kws):
         """Draw the full plot."""
-        pass
+        # Insert the plot label into the correct set of keyword arguments
+        if self.scatter:
+            scatter_kws["label"] = self.label
+        else:
+            line_kws["label"] = self.label
+
+        # Use the current color cycle state as a default
+        if self.color is None:
+            lines, = ax.plot([], [])
+            color = lines.get_color()
+            lines.remove()
+        else:
+            color = self.color
+
+        # Ensure that color is hex to avoid matplotlib weirdness
+        color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))
+
+        # Let color in keyword arguments override overall plot color
+        scatter_kws.setdefault("color", color)
+        line_kws.setdefault("color", color)
+
+        # Draw the constituent plots
+        if self.scatter:
+            self.scatterplot(ax, scatter_kws)
+
+        if self.fit_reg:
+            self.lineplot(ax, line_kws)
+
+        # Label the axes
+        if hasattr(self.x, "name"):
+            ax.set_xlabel(self.x.name)
+        if hasattr(self.y, "name"):
+            ax.set_ylabel(self.y.name)

     def scatterplot(self, ax, kws):
         """Draw the data."""
-        pass
+        # Treat the line-based markers specially, explicitly setting larger
+        # linewidth than is provided by the seaborn style defaults.
+        # This would ideally be handled better in matplotlib (i.e., distinguish
+        # between edgewidth for solid glyphs and linewidth for line glyphs
+        # but this should do for now.
+        line_markers = ["1", "2", "3", "4", "+", "x", "|", "_"]
+        if self.x_estimator is None:
+            if "marker" in kws and kws["marker"] in line_markers:
+                lw = mpl.rcParams["lines.linewidth"]
+            else:
+                lw = mpl.rcParams["lines.markeredgewidth"]
+            kws.setdefault("linewidths", lw)
+
+            if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:
+                kws.setdefault("alpha", .8)
+
+            x, y = self.scatter_data
+            ax.scatter(x, y, **kws)
+        else:
+            # TODO abstraction
+            ci_kws = {"color": kws["color"]}
+            if "alpha" in kws:
+                ci_kws["alpha"] = kws["alpha"]
+            ci_kws["linewidth"] = mpl.rcParams["lines.linewidth"] * 1.75
+            kws.setdefault("s", 50)
+
+            xs, ys, cis = self.estimate_data
+            if [ci for ci in cis if ci is not None]:
+                for x, ci in zip(xs, cis):
+                    ax.plot([x, x], ci, **ci_kws)
+            ax.scatter(xs, ys, **kws)

     def lineplot(self, ax, kws):
         """Draw the model."""
-        pass
+        # Fit the regression model
+        grid, yhat, err_bands = self.fit_regression(ax)
+        edges = grid[0], grid[-1]
+
+        # Get set default aesthetics
+        fill_color = kws["color"]
+        lw = kws.pop("lw", mpl.rcParams["lines.linewidth"] * 1.5)
+        kws.setdefault("linewidth", lw)
+
+        # Draw the regression line and confidence interval
+        line, = ax.plot(grid, yhat, **kws)
+        if not self.truncate:
+            line.sticky_edges.x[:] = edges  # Prevent mpl from adding margin
+        if err_bands is not None:
+            ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)


-_regression_docs = dict(model_api=dedent(
-    """    There are a number of mutually exclusive options for estimating the
+_regression_docs = dict(
+
+    model_api=dedent("""\
+    There are a number of mutually exclusive options for estimating the
     regression model. See the :ref:`tutorial <regression_tutorial>` for more
-    information.    """
-    ), regplot_vs_lmplot=dedent(
-    """    The :func:`regplot` and :func:`lmplot` functions are closely related, but
+    information.\
+    """),
+    regplot_vs_lmplot=dedent("""\
+    The :func:`regplot` and :func:`lmplot` functions are closely related, but
     the former is an axes-level function while the latter is a figure-level
-    function that combines :func:`regplot` and :class:`FacetGrid`.    """
-    ), x_estimator=dedent(
-    """    x_estimator : callable that maps vector -> scalar, optional
+    function that combines :func:`regplot` and :class:`FacetGrid`.\
+    """),
+    x_estimator=dedent("""\
+    x_estimator : callable that maps vector -> scalar, optional
         Apply this function to each unique value of ``x`` and plot the
         resulting estimate. This is useful when ``x`` is a discrete variable.
         If ``x_ci`` is given, this estimate will be bootstrapped and a
-        confidence interval will be drawn.    """
-    ), x_bins=dedent(
-    """    x_bins : int or vector, optional
+        confidence interval will be drawn.\
+    """),
+    x_bins=dedent("""\
+    x_bins : int or vector, optional
         Bin the ``x`` variable into discrete bins and then estimate the central
         tendency and a confidence interval. This binning only influences how
         the scatterplot is drawn; the regression is still fit to the original
         data.  This parameter is interpreted either as the number of
         evenly-sized (not necessary spaced) bins or the positions of the bin
         centers. When this parameter is used, it implies that the default of
-        ``x_estimator`` is ``numpy.mean``.    """
-    ), x_ci=dedent(
-    """    x_ci : "ci", "sd", int in [0, 100] or None, optional
+        ``x_estimator`` is ``numpy.mean``.\
+    """),
+    x_ci=dedent("""\
+    x_ci : "ci", "sd", int in [0, 100] or None, optional
         Size of the confidence interval used when plotting a central tendency
         for discrete values of ``x``. If ``"ci"``, defer to the value of the
         ``ci`` parameter. If ``"sd"``, skip bootstrapping and show the
-        standard deviation of the observations in each bin.    """
-    ), scatter=dedent(
-    """    scatter : bool, optional
+        standard deviation of the observations in each bin.\
+    """),
+    scatter=dedent("""\
+    scatter : bool, optional
         If ``True``, draw a scatterplot with the underlying observations (or
-        the ``x_estimator`` values).    """
-    ), fit_reg=dedent(
-    """    fit_reg : bool, optional
+        the ``x_estimator`` values).\
+    """),
+    fit_reg=dedent("""\
+    fit_reg : bool, optional
         If ``True``, estimate and plot a regression model relating the ``x``
-        and ``y`` variables.    """
-    ), ci=dedent(
-    """    ci : int in [0, 100] or None, optional
+        and ``y`` variables.\
+    """),
+    ci=dedent("""\
+    ci : int in [0, 100] or None, optional
         Size of the confidence interval for the regression estimate. This will
         be drawn using translucent bands around the regression line. The
         confidence interval is estimated using a bootstrap; for large
         datasets, it may be advisable to avoid that computation by setting
-        this parameter to None.    """
-    ), n_boot=dedent(
-    """    n_boot : int, optional
+        this parameter to None.\
+    """),
+    n_boot=dedent("""\
+    n_boot : int, optional
         Number of bootstrap resamples used to estimate the ``ci``. The default
         value attempts to balance time and stability; you may want to increase
-        this value for "final" versions of plots.    """
-    ), units=dedent(
-    """    units : variable name in ``data``, optional
+        this value for "final" versions of plots.\
+    """),
+    units=dedent("""\
+    units : variable name in ``data``, optional
         If the ``x`` and ``y`` observations are nested within sampling units,
         those can be specified here. This will be taken into account when
         computing the confidence intervals by performing a multilevel bootstrap
         that resamples both units and observations (within unit). This does not
-        otherwise influence how the regression is estimated or drawn.    """
-    ), seed=dedent(
-    """    seed : int, numpy.random.Generator, or numpy.random.RandomState, optional
-        Seed or random number generator for reproducible bootstrapping.    """
-    ), order=dedent(
-    """    order : int, optional
+        otherwise influence how the regression is estimated or drawn.\
+    """),
+    seed=dedent("""\
+    seed : int, numpy.random.Generator, or numpy.random.RandomState, optional
+        Seed or random number generator for reproducible bootstrapping.\
+    """),
+    order=dedent("""\
+    order : int, optional
         If ``order`` is greater than 1, use ``numpy.polyfit`` to estimate a
-        polynomial regression.    """
-    ), logistic=dedent(
-    """    logistic : bool, optional
+        polynomial regression.\
+    """),
+    logistic=dedent("""\
+    logistic : bool, optional
         If ``True``, assume that ``y`` is a binary variable and use
         ``statsmodels`` to estimate a logistic regression model. Note that this
         is substantially more computationally intensive than linear regression,
         so you may wish to decrease the number of bootstrap resamples
-        (``n_boot``) or set ``ci`` to None.    """
-    ), lowess=dedent(
-    """    lowess : bool, optional
+        (``n_boot``) or set ``ci`` to None.\
+    """),
+    lowess=dedent("""\
+    lowess : bool, optional
         If ``True``, use ``statsmodels`` to estimate a nonparametric lowess
         model (locally weighted linear regression). Note that confidence
-        intervals cannot currently be drawn for this kind of model.    """
-    ), robust=dedent(
-    """    robust : bool, optional
+        intervals cannot currently be drawn for this kind of model.\
+    """),
+    robust=dedent("""\
+    robust : bool, optional
         If ``True``, use ``statsmodels`` to estimate a robust regression. This
         will de-weight outliers. Note that this is substantially more
         computationally intensive than standard linear regression, so you may
         wish to decrease the number of bootstrap resamples (``n_boot``) or set
-        ``ci`` to None.    """
-    ), logx=dedent(
-    """    logx : bool, optional
+        ``ci`` to None.\
+    """),
+    logx=dedent("""\
+    logx : bool, optional
         If ``True``, estimate a linear regression of the form y ~ log(x), but
         plot the scatterplot and regression model in the input space. Note that
-        ``x`` must be positive for this to work.    """
-    ), xy_partial=dedent(
-    """    {x,y}_partial : strings in ``data`` or matrices
+        ``x`` must be positive for this to work.\
+    """),
+    xy_partial=dedent("""\
+    {x,y}_partial : strings in ``data`` or matrices
         Confounding variables to regress out of the ``x`` or ``y`` variables
-        before plotting.    """
-    ), truncate=dedent(
-    """    truncate : bool, optional
+        before plotting.\
+    """),
+    truncate=dedent("""\
+    truncate : bool, optional
         If ``True``, the regression line is bounded by the data limits. If
         ``False``, it extends to the ``x`` axis limits.
-    """
-    ), xy_jitter=dedent(
-    """    {x,y}_jitter : floats, optional
+    """),
+    xy_jitter=dedent("""\
+    {x,y}_jitter : floats, optional
         Add uniform random noise of this size to either the ``x`` or ``y``
         variables. The noise is added to a copy of the data after fitting the
         regression, and only influences the look of the scatterplot. This can
-        be helpful when plotting variables that take discrete values.    """
-    ), scatter_line_kws=dedent(
-    """    {scatter,line}_kws : dictionaries
+        be helpful when plotting variables that take discrete values.\
+    """),
+    scatter_line_kws=dedent("""\
+    {scatter,line}_kws : dictionaries
         Additional keyword arguments to pass to ``plt.scatter`` and
-        ``plt.plot``.    """
-    ))
+        ``plt.plot``.\
+    """),
+)
 _regression_docs.update(_facet_docs)
-lmplot.__doc__ = dedent(
-    """    Plot data and regression model fits across a FacetGrid.
+
+
+def lmplot(
+    data, *,
+    x=None, y=None, hue=None, col=None, row=None,
+    palette=None, col_wrap=None, height=5, aspect=1, markers="o",
+    sharex=None, sharey=None, hue_order=None, col_order=None, row_order=None,
+    legend=True, legend_out=None, x_estimator=None, x_bins=None,
+    x_ci="ci", scatter=True, fit_reg=True, ci=95, n_boot=1000,
+    units=None, seed=None, order=1, logistic=False, lowess=False,
+    robust=False, logx=False, x_partial=None, y_partial=None,
+    truncate=True, x_jitter=None, y_jitter=None, scatter_kws=None,
+    line_kws=None, facet_kws=None,
+):
+
+    if facet_kws is None:
+        facet_kws = {}
+
+    def facet_kw_deprecation(key, val):
+        msg = (
+            f"{key} is deprecated from the `lmplot` function signature. "
+            "Please update your code to pass it using `facet_kws`."
+        )
+        if val is not None:
+            warnings.warn(msg, UserWarning)
+            facet_kws[key] = val
+
+    facet_kw_deprecation("sharex", sharex)
+    facet_kw_deprecation("sharey", sharey)
+    facet_kw_deprecation("legend_out", legend_out)
+
+    if data is None:
+        raise TypeError("Missing required keyword argument `data`.")
+
+    # Reduce the dataframe to only needed columns
+    need_cols = [x, y, hue, col, row, units, x_partial, y_partial]
+    cols = np.unique([a for a in need_cols if a is not None]).tolist()
+    data = data[cols]
+
+    # Initialize the grid
+    facets = FacetGrid(
+        data, row=row, col=col, hue=hue,
+        palette=palette,
+        row_order=row_order, col_order=col_order, hue_order=hue_order,
+        height=height, aspect=aspect, col_wrap=col_wrap,
+        **facet_kws,
+    )
+
+    # Add the markers here as FacetGrid has figured out how many levels of the
+    # hue variable are needed and we don't want to duplicate that process
+    if facets.hue_names is None:
+        n_markers = 1
+    else:
+        n_markers = len(facets.hue_names)
+    if not isinstance(markers, list):
+        markers = [markers] * n_markers
+    if len(markers) != n_markers:
+        raise ValueError("markers must be a singleton or a list of markers "
+                         "for each level of the hue variable")
+    facets.hue_kws = {"marker": markers}
+
+    def update_datalim(data, x, y, ax, **kws):
+        xys = data[[x, y]].to_numpy().astype(float)
+        ax.update_datalim(xys, updatey=False)
+        ax.autoscale_view(scaley=False)
+
+    facets.map_dataframe(update_datalim, x=x, y=y)
+
+    # Draw the regression plot on each facet
+    regplot_kws = dict(
+        x_estimator=x_estimator, x_bins=x_bins, x_ci=x_ci,
+        scatter=scatter, fit_reg=fit_reg, ci=ci, n_boot=n_boot, units=units,
+        seed=seed, order=order, logistic=logistic, lowess=lowess,
+        robust=robust, logx=logx, x_partial=x_partial, y_partial=y_partial,
+        truncate=truncate, x_jitter=x_jitter, y_jitter=y_jitter,
+        scatter_kws=scatter_kws, line_kws=line_kws,
+    )
+    facets.map_dataframe(regplot, x=x, y=y, **regplot_kws)
+    facets.set_axis_labels(x, y)
+
+    # Add a legend
+    if legend and (hue is not None) and (hue not in [col, row]):
+        facets.add_legend()
+    return facets
+
+
+lmplot.__doc__ = dedent("""\
+    Plot data and regression model fits across a FacetGrid.

     This function combines :func:`regplot` and :class:`FacetGrid`. It is
     intended as a convenient interface to fit regression models across
@@ -341,10 +746,38 @@ lmplot.__doc__ = dedent(

     .. include:: ../docstrings/lmplot.rst

-    """
-    ).format(**_regression_docs)
-regplot.__doc__ = dedent(
-    """    Plot data and a linear regression model fit.
+    """).format(**_regression_docs)
+
+
+def regplot(
+    data=None, *, x=None, y=None,
+    x_estimator=None, x_bins=None, x_ci="ci",
+    scatter=True, fit_reg=True, ci=95, n_boot=1000, units=None,
+    seed=None, order=1, logistic=False, lowess=False, robust=False,
+    logx=False, x_partial=None, y_partial=None,
+    truncate=True, dropna=True, x_jitter=None, y_jitter=None,
+    label=None, color=None, marker="o",
+    scatter_kws=None, line_kws=None, ax=None
+):
+
+    plotter = _RegressionPlotter(x, y, data, x_estimator, x_bins, x_ci,
+                                 scatter, fit_reg, ci, n_boot, units, seed,
+                                 order, logistic, lowess, robust, logx,
+                                 x_partial, y_partial, truncate, dropna,
+                                 x_jitter, y_jitter, color, label)
+
+    if ax is None:
+        ax = plt.gca()
+
+    scatter_kws = {} if scatter_kws is None else copy.copy(scatter_kws)
+    scatter_kws["marker"] = marker
+    line_kws = {} if line_kws is None else copy.copy(line_kws)
+    plotter.plot(ax, scatter_kws, line_kws)
+    return ax
+
+
+regplot.__doc__ = dedent("""\
+    Plot data and a linear regression model fit.

     {model_api}

@@ -415,13 +848,15 @@ regplot.__doc__ = dedent(

     .. include:: ../docstrings/regplot.rst

-    """
-    ).format(**_regression_docs)
+    """).format(**_regression_docs)


-def residplot(data=None, *, x=None, y=None, x_partial=None, y_partial=None,
-    lowess=False, order=1, robust=False, dropna=True, label=None, color=
-    None, scatter_kws=None, line_kws=None, ax=None):
+def residplot(
+    data=None, *, x=None, y=None,
+    x_partial=None, y_partial=None, lowess=False,
+    order=1, robust=False, dropna=True, label=None, color=None,
+    scatter_kws=None, line_kws=None, ax=None
+):
     """Plot the residuals of a linear regression.

     This function will regress y on x (possibly as a robust or polynomial
@@ -477,4 +912,29 @@ def residplot(data=None, *, x=None, y=None, x_partial=None, y_partial=None,
     .. include:: ../docstrings/residplot.rst

     """
-    pass
+    plotter = _RegressionPlotter(x, y, data, ci=None,
+                                 order=order, robust=robust,
+                                 x_partial=x_partial, y_partial=y_partial,
+                                 dropna=dropna, color=color, label=label)
+
+    if ax is None:
+        ax = plt.gca()
+
+    # Calculate the residual from a linear regression
+    _, yhat, _ = plotter.fit_regression(grid=plotter.x)
+    plotter.y = plotter.y - yhat
+
+    # Set the regression option on the plotter
+    if lowess:
+        plotter.lowess = True
+    else:
+        plotter.fit_reg = False
+
+    # Plot a horizontal line at 0
+    ax.axhline(0, ls=":", c=".2")
+
+    # Draw the scatterplot
+    scatter_kws = {} if scatter_kws is None else scatter_kws.copy()
+    line_kws = {} if line_kws is None else line_kws.copy()
+    plotter.plot(ax, scatter_kws, line_kws)
+    return ax
diff --git a/seaborn/relational.py b/seaborn/relational.py
index 76efb2d1..ff0701c7 100644
--- a/seaborn/relational.py
+++ b/seaborn/relational.py
@@ -1,19 +1,35 @@
 from functools import partial
 import warnings
+
 import numpy as np
 import pandas as pd
 import matplotlib as mpl
 import matplotlib.pyplot as plt
 from matplotlib.cbook import normalize_kwargs
-from ._base import VectorPlotter
-from .utils import adjust_legend_subtitles, _default_color, _deprecate_ci, _get_transform_functions, _scatter_legend_artist
+
+from ._base import (
+    VectorPlotter,
+)
+from .utils import (
+    adjust_legend_subtitles,
+    _default_color,
+    _deprecate_ci,
+    _get_transform_functions,
+    _scatter_legend_artist,
+)
 from ._compat import groupby_apply_include_groups
 from ._statistics import EstimateAggregator, WeightedAggregator
 from .axisgrid import FacetGrid, _facet_docs
 from ._docstrings import DocstringComponents, _core_docs
-__all__ = ['relplot', 'scatterplot', 'lineplot']
-_relational_narrative = DocstringComponents(dict(main_api=
-    """
+
+
+__all__ = ["relplot", "scatterplot", "lineplot"]
+
+
+_relational_narrative = DocstringComponents(dict(
+
+    # ---  Introductory prose
+    main_api="""
 The relationship between `x` and `y` can be shown for different subsets
 of the data using the `hue`, `size`, and `style` parameters. These
 parameters control what visual semantics are used to identify the different
@@ -24,9 +40,9 @@ interpret and is often ineffective. Using redundant semantics (i.e. both
 graphics more accessible.

 See the :ref:`tutorial <relational_tutorial>` for more information.
-    """
-    , relational_semantic=
-    """
+    """,
+
+    relational_semantic="""
 The default treatment of the `hue` (and to a lesser extent, `size`)
 semantic, if present, depends on whether the variable is inferred to
 represent "numeric" or "categorical" data. In particular, numeric variables
@@ -34,67 +50,62 @@ are represented with a sequential colormap by default, and the legend
 entries show regular "ticks" with values that may or may not exist in the
 data. This behavior can be controlled through various parameters, as
 described and illustrated below.
-    """
-    ))
-_relational_docs = dict(data_vars=
-    """
+    """,
+))
+
+_relational_docs = dict(
+
+    # --- Shared function parameters
+    data_vars="""
 x, y : names of variables in `data` or vector data
     Input data variables; must be numeric. Can pass data directly or
     reference columns in `data`.
-    """
-    , data=
-    """
+    """,
+    data="""
 data : DataFrame, array, or list of arrays
     Input data structure. If `x` and `y` are specified as names, this
     should be a "long-form" DataFrame containing those columns. Otherwise
     it is treated as "wide-form" data and grouping variables are ignored.
     See the examples for the various ways this parameter can be specified
     and the different effects of each.
-    """
-    , palette=
-    """
+    """,
+    palette="""
 palette : string, list, dict, or matplotlib colormap
     An object that determines how colors are chosen when `hue` is used.
     It can be the name of a seaborn palette or matplotlib colormap, a list
     of colors (anything matplotlib understands), a dict mapping levels
     of the `hue` variable to colors, or a matplotlib colormap object.
-    """
-    , hue_order=
-    """
+    """,
+    hue_order="""
 hue_order : list
     Specified order for the appearance of the `hue` variable levels,
     otherwise they are determined from the data. Not relevant when the
     `hue` variable is numeric.
-    """
-    , hue_norm=
-    """
+    """,
+    hue_norm="""
 hue_norm : tuple or :class:`matplotlib.colors.Normalize` object
     Normalization in data units for colormap applied to the `hue`
     variable when it is numeric. Not relevant if `hue` is categorical.
-    """
-    , sizes=
-    """
+    """,
+    sizes="""
 sizes : list, dict, or tuple
     An object that determines how sizes are chosen when `size` is used.
     List or dict arguments should provide a size for each unique data value,
     which forces a categorical interpretation. The argument may also be a
     min, max tuple.
-    """
-    , size_order=
-    """
+    """,
+    size_order="""
 size_order : list
     Specified order for appearance of the `size` variable levels,
     otherwise they are determined from the data. Not relevant when the
     `size` variable is numeric.
-    """
-    , size_norm=
-    """
+    """,
+    size_norm="""
 size_norm : tuple or Normalize object
     Normalization in data units for scaling plot objects when the
     `size` variable is numeric.
-    """
-    , dashes=
-    """
+    """,
+    dashes="""
 dashes : boolean, list, or dictionary
     Object determining how to draw the lines for different levels of the
     `style` variable. Setting to `True` will use default dash codes, or
@@ -102,98 +113,108 @@ dashes : boolean, list, or dictionary
     `style` variable to dash codes. Setting to `False` will use solid
     lines for all subsets. Dashes are specified as in matplotlib: a tuple
     of `(segment, gap)` lengths, or an empty string to draw a solid line.
-    """
-    , markers=
-    """
+    """,
+    markers="""
 markers : boolean, list, or dictionary
     Object determining how to draw the markers for different levels of the
     `style` variable. Setting to `True` will use default markers, or
     you can pass a list of markers or a dictionary mapping levels of the
     `style` variable to markers. Setting to `False` will draw
     marker-less lines.  Markers are specified as in matplotlib.
-    """
-    , style_order=
-    """
+    """,
+    style_order="""
 style_order : list
     Specified order for appearance of the `style` variable levels
     otherwise they are determined from the data. Not relevant when the
     `style` variable is numeric.
-    """
-    , units=
-    """
+    """,
+    units="""
 units : vector or key in `data`
     Grouping variable identifying sampling units. When used, a separate
     line will be drawn for each unit with appropriate semantics, but no
     legend entry will be added. Useful for showing distribution of
     experimental replicates when exact identities are not needed.
-    """
-    , estimator=
-    """
+    """,
+    estimator="""
 estimator : name of pandas method or callable or None
     Method for aggregating across multiple observations of the `y`
     variable at the same `x` level. If `None`, all observations will
     be drawn.
-    """
-    , ci=
-    """
+    """,
+    ci="""
 ci : int or "sd" or None
     Size of the confidence interval to draw when aggregating.

     .. deprecated:: 0.12.0
         Use the new `errorbar` parameter for more flexibility.

-    """
-    , n_boot=
-    """
+    """,
+    n_boot="""
 n_boot : int
     Number of bootstraps to use for computing the confidence interval.
-    """
-    , seed=
-    """
+    """,
+    seed="""
 seed : int, numpy.random.Generator, or numpy.random.RandomState
     Seed or random number generator for reproducible bootstrapping.
-    """
-    , legend=
-    """
+    """,
+    legend="""
 legend : "auto", "brief", "full", or False
     How to draw the legend. If "brief", numeric `hue` and `size`
     variables will be represented with a sample of evenly spaced values.
     If "full", every group will get an entry in the legend. If "auto",
     choose between brief or full representation based on number of levels.
     If `False`, no legend data is added and no legend is drawn.
-    """
-    , ax_in=
-    """
+    """,
+    ax_in="""
 ax : matplotlib Axes
     Axes object to draw the plot onto, otherwise uses the current Axes.
-    """
-    , ax_out=
-    """
+    """,
+    ax_out="""
 ax : matplotlib Axes
     Returns the Axes object with the plot drawn onto it.
-    """
-    )
-_param_docs = DocstringComponents.from_nested_components(core=_core_docs[
-    'params'], facets=DocstringComponents(_facet_docs), rel=
-    DocstringComponents(_relational_docs), stat=DocstringComponents.
-    from_function_params(EstimateAggregator.__init__))
+    """,
+
+)
+
+
+_param_docs = DocstringComponents.from_nested_components(
+    core=_core_docs["params"],
+    facets=DocstringComponents(_facet_docs),
+    rel=DocstringComponents(_relational_docs),
+    stat=DocstringComponents.from_function_params(EstimateAggregator.__init__),
+)


 class _RelationalPlotter(VectorPlotter):
-    wide_structure = {'x': '@index', 'y': '@values', 'hue': '@columns',
-        'style': '@columns'}
+
+    wide_structure = {
+        "x": "@index", "y": "@values", "hue": "@columns", "style": "@columns",
+    }
+
+    # TODO where best to define default parameters?
     sort = True


 class _LinePlotter(_RelationalPlotter):
-    _legend_attributes = ['color', 'linewidth', 'marker', 'dashes']

-    def __init__(self, *, data=None, variables={}, estimator=None, n_boot=
-        None, seed=None, errorbar=None, sort=True, orient='x', err_style=
-        None, err_kws=None, legend=None):
-        self._default_size_range = np.r_[0.5, 2] * mpl.rcParams[
-            'lines.linewidth']
+    _legend_attributes = ["color", "linewidth", "marker", "dashes"]
+
+    def __init__(
+        self, *,
+        data=None, variables={},
+        estimator=None, n_boot=None, seed=None, errorbar=None,
+        sort=True, orient="x", err_style=None, err_kws=None, legend=None
+    ):
+
+        # TODO this is messy, we want the mapping to be agnostic about
+        # the kind of plot to draw, but for the time being we need to set
+        # this information so the SizeMapping can use it
+        self._default_size_range = (
+            np.r_[.5, 2] * mpl.rcParams["lines.linewidth"]
+        )
+
         super().__init__(data=data, variables=variables)
+
         self.estimator = estimator
         self.errorbar = errorbar
         self.n_boot = n_boot
@@ -202,25 +223,301 @@ class _LinePlotter(_RelationalPlotter):
         self.orient = orient
         self.err_style = err_style
         self.err_kws = {} if err_kws is None else err_kws
+
         self.legend = legend

     def plot(self, ax, kws):
         """Draw the plot onto an axes, passing matplotlib kwargs."""
-        pass
+
+        # Draw a test plot, using the passed in kwargs. The goal here is to
+        # honor both (a) the current state of the plot cycler and (b) the
+        # specified kwargs on all the lines we will draw, overriding when
+        # relevant with the data semantics. Note that we won't cycle
+        # internally; in other words, if `hue` is not used, all elements will
+        # have the same color, but they will have the color that you would have
+        # gotten from the corresponding matplotlib function, and calling the
+        # function will advance the axes property cycle.
+
+        kws = normalize_kwargs(kws, mpl.lines.Line2D)
+        kws.setdefault("markeredgewidth", 0.75)
+        kws.setdefault("markeredgecolor", "w")
+
+        # Set default error kwargs
+        err_kws = self.err_kws.copy()
+        if self.err_style == "band":
+            err_kws.setdefault("alpha", .2)
+        elif self.err_style == "bars":
+            pass
+        elif self.err_style is not None:
+            err = "`err_style` must be 'band' or 'bars', not {}"
+            raise ValueError(err.format(self.err_style))
+
+        # Initialize the aggregation object
+        weighted = "weight" in self.plot_data
+        agg = (WeightedAggregator if weighted else EstimateAggregator)(
+            self.estimator, self.errorbar, n_boot=self.n_boot, seed=self.seed,
+        )
+
+        # TODO abstract variable to aggregate over here-ish. Better name?
+        orient = self.orient
+        if orient not in {"x", "y"}:
+            err = f"`orient` must be either 'x' or 'y', not {orient!r}."
+            raise ValueError(err)
+        other = {"x": "y", "y": "x"}[orient]
+
+        # TODO How to handle NA? We don't want NA to propagate through to the
+        # estimate/CI when some values are present, but we would also like
+        # matplotlib to show "gaps" in the line when all values are missing.
+        # This is straightforward absent aggregation, but complicated with it.
+        # If we want to use nas, we need to conditionalize dropna in iter_data.
+
+        # Loop over the semantic subsets and add to the plot
+        grouping_vars = "hue", "size", "style"
+        for sub_vars, sub_data in self.iter_data(grouping_vars, from_comp_data=True):
+
+            if self.sort:
+                sort_vars = ["units", orient, other]
+                sort_cols = [var for var in sort_vars if var in self.variables]
+                sub_data = sub_data.sort_values(sort_cols)
+
+            if (
+                self.estimator is not None
+                and sub_data[orient].value_counts().max() > 1
+            ):
+                if "units" in self.variables:
+                    # TODO eventually relax this constraint
+                    err = "estimator must be None when specifying units"
+                    raise ValueError(err)
+                grouped = sub_data.groupby(orient, sort=self.sort)
+                # Could pass as_index=False instead of reset_index,
+                # but that fails on a corner case with older pandas.
+                sub_data = (
+                    grouped
+                    .apply(agg, other, **groupby_apply_include_groups(False))
+                    .reset_index()
+                )
+            else:
+                sub_data[f"{other}min"] = np.nan
+                sub_data[f"{other}max"] = np.nan
+
+            # Apply inverse axis scaling
+            for var in "xy":
+                _, inv = _get_transform_functions(ax, var)
+                for col in sub_data.filter(regex=f"^{var}"):
+                    sub_data[col] = inv(sub_data[col])
+
+            # --- Draw the main line(s)
+
+            if "units" in self.variables:   # XXX why not add to grouping variables?
+                lines = []
+                for _, unit_data in sub_data.groupby("units"):
+                    lines.extend(ax.plot(unit_data["x"], unit_data["y"], **kws))
+            else:
+                lines = ax.plot(sub_data["x"], sub_data["y"], **kws)
+
+            for line in lines:
+
+                if "hue" in sub_vars:
+                    line.set_color(self._hue_map(sub_vars["hue"]))
+
+                if "size" in sub_vars:
+                    line.set_linewidth(self._size_map(sub_vars["size"]))
+
+                if "style" in sub_vars:
+                    attributes = self._style_map(sub_vars["style"])
+                    if "dashes" in attributes:
+                        line.set_dashes(attributes["dashes"])
+                    if "marker" in attributes:
+                        line.set_marker(attributes["marker"])
+
+            line_color = line.get_color()
+            line_alpha = line.get_alpha()
+            line_capstyle = line.get_solid_capstyle()
+
+            # --- Draw the confidence intervals
+
+            if self.estimator is not None and self.errorbar is not None:
+
+                # TODO handling of orientation will need to happen here
+
+                if self.err_style == "band":
+
+                    func = {"x": ax.fill_between, "y": ax.fill_betweenx}[orient]
+                    func(
+                        sub_data[orient],
+                        sub_data[f"{other}min"], sub_data[f"{other}max"],
+                        color=line_color, **err_kws
+                    )
+
+                elif self.err_style == "bars":
+
+                    error_param = {
+                        f"{other}err": (
+                            sub_data[other] - sub_data[f"{other}min"],
+                            sub_data[f"{other}max"] - sub_data[other],
+                        )
+                    }
+                    ebars = ax.errorbar(
+                        sub_data["x"], sub_data["y"], **error_param,
+                        linestyle="", color=line_color, alpha=line_alpha,
+                        **err_kws
+                    )
+
+                    # Set the capstyle properly on the error bars
+                    for obj in ebars.get_children():
+                        if isinstance(obj, mpl.collections.LineCollection):
+                            obj.set_capstyle(line_capstyle)
+
+        # Finalize the axes details
+        self._add_axis_labels(ax)
+        if self.legend:
+            legend_artist = partial(mpl.lines.Line2D, xdata=[], ydata=[])
+            attrs = {"hue": "color", "size": "linewidth", "style": None}
+            self.add_legend_data(ax, legend_artist, kws, attrs)
+            handles, _ = ax.get_legend_handles_labels()
+            if handles:
+                legend = ax.legend(title=self.legend_title)
+                adjust_legend_subtitles(legend)


 class _ScatterPlotter(_RelationalPlotter):
-    _legend_attributes = ['color', 's', 'marker']
+
+    _legend_attributes = ["color", "s", "marker"]

     def __init__(self, *, data=None, variables={}, legend=None):
-        self._default_size_range = np.r_[0.5, 2] * np.square(mpl.rcParams[
-            'lines.markersize'])
+
+        # TODO this is messy, we want the mapping to be agnostic about
+        # the kind of plot to draw, but for the time being we need to set
+        # this information so the SizeMapping can use it
+        self._default_size_range = (
+            np.r_[.5, 2] * np.square(mpl.rcParams["lines.markersize"])
+        )
+
         super().__init__(data=data, variables=variables)
+
         self.legend = legend

+    def plot(self, ax, kws):

-lineplot.__doc__ = (
-    """Draw a line plot with possibility of several semantic groupings.
+        # --- Determine the visual attributes of the plot
+
+        data = self.comp_data.dropna()
+        if data.empty:
+            return
+
+        kws = normalize_kwargs(kws, mpl.collections.PathCollection)
+
+        # Define the vectors of x and y positions
+        empty = np.full(len(data), np.nan)
+        x = data.get("x", empty)
+        y = data.get("y", empty)
+
+        # Apply inverse scaling to the coordinate variables
+        _, inv_x = _get_transform_functions(ax, "x")
+        _, inv_y = _get_transform_functions(ax, "y")
+        x, y = inv_x(x), inv_y(y)
+
+        if "style" in self.variables:
+            # Use a representative marker so scatter sets the edgecolor
+            # properly for line art markers. We currently enforce either
+            # all or none line art so this works.
+            example_level = self._style_map.levels[0]
+            example_marker = self._style_map(example_level, "marker")
+            kws.setdefault("marker", example_marker)
+
+        # Conditionally set the marker edgecolor based on whether the marker is "filled"
+        # See https://github.com/matplotlib/matplotlib/issues/17849 for context
+        m = kws.get("marker", mpl.rcParams.get("marker", "o"))
+        if not isinstance(m, mpl.markers.MarkerStyle):
+            # TODO in more recent matplotlib (which?) can pass a MarkerStyle here
+            m = mpl.markers.MarkerStyle(m)
+        if m.is_filled():
+            kws.setdefault("edgecolor", "w")
+
+        # Draw the scatter plot
+        points = ax.scatter(x=x, y=y, **kws)
+
+        # Apply the mapping from semantic variables to artist attributes
+
+        if "hue" in self.variables:
+            points.set_facecolors(self._hue_map(data["hue"]))
+
+        if "size" in self.variables:
+            points.set_sizes(self._size_map(data["size"]))
+
+        if "style" in self.variables:
+            p = [self._style_map(val, "path") for val in data["style"]]
+            points.set_paths(p)
+
+        # Apply dependent default attributes
+
+        if "linewidth" not in kws:
+            sizes = points.get_sizes()
+            linewidth = .08 * np.sqrt(np.percentile(sizes, 10))
+            points.set_linewidths(linewidth)
+            kws["linewidth"] = linewidth
+
+        # Finalize the axes details
+        self._add_axis_labels(ax)
+        if self.legend:
+            attrs = {"hue": "color", "size": "s", "style": None}
+            self.add_legend_data(ax, _scatter_legend_artist, kws, attrs)
+            handles, _ = ax.get_legend_handles_labels()
+            if handles:
+                legend = ax.legend(title=self.legend_title)
+                adjust_legend_subtitles(legend)
+
+
+def lineplot(
+    data=None, *,
+    x=None, y=None, hue=None, size=None, style=None, units=None, weights=None,
+    palette=None, hue_order=None, hue_norm=None,
+    sizes=None, size_order=None, size_norm=None,
+    dashes=True, markers=None, style_order=None,
+    estimator="mean", errorbar=("ci", 95), n_boot=1000, seed=None,
+    orient="x", sort=True, err_style="band", err_kws=None,
+    legend="auto", ci="deprecated", ax=None, **kwargs
+):
+
+    # Handle deprecation of ci parameter
+    errorbar = _deprecate_ci(errorbar, ci)
+
+    p = _LinePlotter(
+        data=data,
+        variables=dict(
+            x=x, y=y, hue=hue, size=size, style=style, units=units, weight=weights
+        ),
+        estimator=estimator, n_boot=n_boot, seed=seed, errorbar=errorbar,
+        sort=sort, orient=orient, err_style=err_style, err_kws=err_kws,
+        legend=legend,
+    )
+
+    p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
+    p.map_size(sizes=sizes, order=size_order, norm=size_norm)
+    p.map_style(markers=markers, dashes=dashes, order=style_order)
+
+    if ax is None:
+        ax = plt.gca()
+
+    if "style" not in p.variables and not {"ls", "linestyle"} & set(kwargs):  # XXX
+        kwargs["dashes"] = "" if dashes is None or isinstance(dashes, bool) else dashes
+
+    if not p.has_xy_data:
+        return ax
+
+    p._attach(ax)
+
+    # Other functions have color as an explicit param,
+    # and we should probably do that here too
+    color = kwargs.pop("color", kwargs.pop("c", None))
+    kwargs["color"] = _default_color(ax.plot, hue, color, kwargs)
+
+    p.plot(ax, kwargs)
+    return ax
+
+
+lineplot.__doc__ = """\
+Draw a line plot with possibility of several semantic groupings.

 {narrative.main_api}

@@ -298,11 +595,51 @@ Examples

 .. include:: ../docstrings/lineplot.rst

-"""
-    .format(narrative=_relational_narrative, params=_param_docs, returns=
-    _core_docs['returns'], seealso=_core_docs['seealso']))
-scatterplot.__doc__ = (
-    """Draw a scatter plot with possibility of several semantic groupings.
+""".format(
+    narrative=_relational_narrative,
+    params=_param_docs,
+    returns=_core_docs["returns"],
+    seealso=_core_docs["seealso"],
+)
+
+
+def scatterplot(
+    data=None, *,
+    x=None, y=None, hue=None, size=None, style=None,
+    palette=None, hue_order=None, hue_norm=None,
+    sizes=None, size_order=None, size_norm=None,
+    markers=True, style_order=None, legend="auto", ax=None,
+    **kwargs
+):
+
+    p = _ScatterPlotter(
+        data=data,
+        variables=dict(x=x, y=y, hue=hue, size=size, style=style),
+        legend=legend
+    )
+
+    p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
+    p.map_size(sizes=sizes, order=size_order, norm=size_norm)
+    p.map_style(markers=markers, order=style_order)
+
+    if ax is None:
+        ax = plt.gca()
+
+    if not p.has_xy_data:
+        return ax
+
+    p._attach(ax)
+
+    color = kwargs.pop("color", None)
+    kwargs["color"] = _default_color(ax.scatter, hue, color, kwargs)
+
+    p.plot(ax, kwargs)
+
+    return ax
+
+
+scatterplot.__doc__ = """\
+Draw a scatter plot with possibility of several semantic groupings.

 {narrative.main_api}

@@ -352,11 +689,218 @@ Examples

 .. include:: ../docstrings/scatterplot.rst

-"""
-    .format(narrative=_relational_narrative, params=_param_docs, returns=
-    _core_docs['returns'], seealso=_core_docs['seealso']))
-relplot.__doc__ = (
-    """Figure-level interface for drawing relational plots onto a FacetGrid.
+""".format(
+    narrative=_relational_narrative,
+    params=_param_docs,
+    returns=_core_docs["returns"],
+    seealso=_core_docs["seealso"],
+)
+
+
+def relplot(
+    data=None, *,
+    x=None, y=None, hue=None, size=None, style=None, units=None, weights=None,
+    row=None, col=None, col_wrap=None, row_order=None, col_order=None,
+    palette=None, hue_order=None, hue_norm=None,
+    sizes=None, size_order=None, size_norm=None,
+    markers=None, dashes=None, style_order=None,
+    legend="auto", kind="scatter", height=5, aspect=1, facet_kws=None,
+    **kwargs
+):
+
+    if kind == "scatter":
+
+        Plotter = _ScatterPlotter
+        func = scatterplot
+        markers = True if markers is None else markers
+
+    elif kind == "line":
+
+        Plotter = _LinePlotter
+        func = lineplot
+        dashes = True if dashes is None else dashes
+
+    else:
+        err = f"Plot kind {kind} not recognized"
+        raise ValueError(err)
+
+    # Check for attempt to plot onto specific axes and warn
+    if "ax" in kwargs:
+        msg = (
+            "relplot is a figure-level function and does not accept "
+            "the `ax` parameter. You may wish to try {}".format(kind + "plot")
+        )
+        warnings.warn(msg, UserWarning)
+        kwargs.pop("ax")
+
+    # Use the full dataset to map the semantics
+    variables = dict(x=x, y=y, hue=hue, size=size, style=style)
+    if kind == "line":
+        variables["units"] = units
+        variables["weight"] = weights
+    else:
+        if units is not None:
+            msg = "The `units` parameter has no effect with kind='scatter'."
+            warnings.warn(msg, stacklevel=2)
+        if weights is not None:
+            msg = "The `weights` parameter has no effect with kind='scatter'."
+            warnings.warn(msg, stacklevel=2)
+    p = Plotter(
+        data=data,
+        variables=variables,
+        legend=legend,
+    )
+    p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
+    p.map_size(sizes=sizes, order=size_order, norm=size_norm)
+    p.map_style(markers=markers, dashes=dashes, order=style_order)
+
+    # Extract the semantic mappings
+    if "hue" in p.variables:
+        palette = p._hue_map.lookup_table
+        hue_order = p._hue_map.levels
+        hue_norm = p._hue_map.norm
+    else:
+        palette = hue_order = hue_norm = None
+
+    if "size" in p.variables:
+        sizes = p._size_map.lookup_table
+        size_order = p._size_map.levels
+        size_norm = p._size_map.norm
+
+    if "style" in p.variables:
+        style_order = p._style_map.levels
+        if markers:
+            markers = {k: p._style_map(k, "marker") for k in style_order}
+        else:
+            markers = None
+        if dashes:
+            dashes = {k: p._style_map(k, "dashes") for k in style_order}
+        else:
+            dashes = None
+    else:
+        markers = dashes = style_order = None
+
+    # Now extract the data that would be used to draw a single plot
+    variables = p.variables
+    plot_data = p.plot_data
+
+    # Define the common plotting parameters
+    plot_kws = dict(
+        palette=palette, hue_order=hue_order, hue_norm=hue_norm,
+        sizes=sizes, size_order=size_order, size_norm=size_norm,
+        markers=markers, dashes=dashes, style_order=style_order,
+        legend=False,
+    )
+    plot_kws.update(kwargs)
+    if kind == "scatter":
+        plot_kws.pop("dashes")
+
+    # Add the grid semantics onto the plotter
+    grid_variables = dict(
+        x=x, y=y, row=row, col=col, hue=hue, size=size, style=style,
+    )
+    if kind == "line":
+        grid_variables.update(units=units, weights=weights)
+    p.assign_variables(data, grid_variables)
+
+    # Define the named variables for plotting on each facet
+    # Rename the variables with a leading underscore to avoid
+    # collisions with faceting variable names
+    plot_variables = {v: f"_{v}" for v in variables}
+    if "weight" in plot_variables:
+        plot_variables["weights"] = plot_variables.pop("weight")
+    plot_kws.update(plot_variables)
+
+    # Pass the row/col variables to FacetGrid with their original
+    # names so that the axes titles render correctly
+    for var in ["row", "col"]:
+        # Handle faceting variables that lack name information
+        if var in p.variables and p.variables[var] is None:
+            p.variables[var] = f"_{var}_"
+    grid_kws = {v: p.variables.get(v) for v in ["row", "col"]}
+
+    # Rename the columns of the plot_data structure appropriately
+    new_cols = plot_variables.copy()
+    new_cols.update(grid_kws)
+    full_data = p.plot_data.rename(columns=new_cols)
+
+    # Set up the FacetGrid object
+    facet_kws = {} if facet_kws is None else facet_kws.copy()
+    g = FacetGrid(
+        data=full_data.dropna(axis=1, how="all"),
+        **grid_kws,
+        col_wrap=col_wrap, row_order=row_order, col_order=col_order,
+        height=height, aspect=aspect, dropna=False,
+        **facet_kws
+    )
+
+    # Draw the plot
+    g.map_dataframe(func, **plot_kws)
+
+    # Label the axes, using the original variables
+    # Pass "" when the variable name is None to overwrite internal variables
+    g.set_axis_labels(variables.get("x") or "", variables.get("y") or "")
+
+    if legend:
+        # Replace the original plot data so the legend uses numeric data with
+        # the correct type, since we force a categorical mapping above.
+        p.plot_data = plot_data
+
+        # Handle the additional non-semantic keyword arguments out here.
+        # We're selective because some kwargs may be seaborn function specific
+        # and not relevant to the matplotlib artists going into the legend.
+        # Ideally, we will have a better solution where we don't need to re-make
+        # the legend out here and will have parity with the axes-level functions.
+        keys = ["c", "color", "alpha", "m", "marker"]
+        if kind == "scatter":
+            legend_artist = _scatter_legend_artist
+            keys += ["s", "facecolor", "fc", "edgecolor", "ec", "linewidth", "lw"]
+        else:
+            legend_artist = partial(mpl.lines.Line2D, xdata=[], ydata=[])
+            keys += [
+                "markersize", "ms",
+                "markeredgewidth", "mew",
+                "markeredgecolor", "mec",
+                "linestyle", "ls",
+                "linewidth", "lw",
+            ]
+
+        common_kws = {k: v for k, v in kwargs.items() if k in keys}
+        attrs = {"hue": "color", "style": None}
+        if kind == "scatter":
+            attrs["size"] = "s"
+        elif kind == "line":
+            attrs["size"] = "linewidth"
+        p.add_legend_data(g.axes.flat[0], legend_artist, common_kws, attrs)
+        if p.legend_data:
+            g.add_legend(legend_data=p.legend_data,
+                         label_order=p.legend_order,
+                         title=p.legend_title,
+                         adjust_subtitles=True)
+
+    # Rename the columns of the FacetGrid's `data` attribute
+    # to match the original column names
+    orig_cols = {
+        f"_{k}": f"_{k}_" if v is None else v for k, v in variables.items()
+    }
+    grid_data = g.data.rename(columns=orig_cols)
+    if data is not None and (x is not None or y is not None):
+        if not isinstance(data, pd.DataFrame):
+            data = pd.DataFrame(data)
+        g.data = pd.merge(
+            data,
+            grid_data[grid_data.columns.difference(data.columns)],
+            left_index=True,
+            right_index=True,
+        )
+    else:
+        g.data = grid_data
+
+    return g
+
+
+relplot.__doc__ = """\
+Figure-level interface for drawing relational plots onto a FacetGrid.

 This function provides access to several different axes-level functions
 that show the relationship between two variables with semantic mappings
@@ -431,6 +975,8 @@ Examples

 .. include:: ../docstrings/relplot.rst

-"""
-    .format(narrative=_relational_narrative, params=_param_docs, returns=
-    _core_docs['returns']))
+""".format(
+    narrative=_relational_narrative,
+    params=_param_docs,
+    returns=_core_docs["returns"],
+)
diff --git a/seaborn/utils.py b/seaborn/utils.py
index ba736ec6..98720ba3 100644
--- a/seaborn/utils.py
+++ b/seaborn/utils.py
@@ -6,20 +6,23 @@ import colorsys
 from contextlib import contextmanager
 from urllib.request import urlopen, urlretrieve
 from types import ModuleType
+
 import numpy as np
 import pandas as pd
 import matplotlib as mpl
 from matplotlib.colors import to_rgb
 import matplotlib.pyplot as plt
 from matplotlib.cbook import normalize_kwargs
+
 from seaborn._core.typing import deprecated
 from seaborn.external.version import Version
 from seaborn.external.appdirs import user_cache_dir
-__all__ = ['desaturate', 'saturate', 'set_hls_values', 'move_legend',
-    'despine', 'get_dataset_names', 'get_data_home', 'load_dataset']
-DATASET_SOURCE = (
-    'https://raw.githubusercontent.com/mwaskom/seaborn-data/master')
-DATASET_NAMES_URL = f'{DATASET_SOURCE}/dataset_names.txt'
+
+__all__ = ["desaturate", "saturate", "set_hls_values", "move_legend",
+           "despine", "get_dataset_names", "get_data_home", "load_dataset"]
+
+DATASET_SOURCE = "https://raw.githubusercontent.com/mwaskom/seaborn-data/master"
+DATASET_NAMES_URL = f"{DATASET_SOURCE}/dataset_names.txt"


 def ci_to_errsize(cis, heights):
@@ -39,17 +42,106 @@ def ci_to_errsize(cis, heights):
         format as argument for plt.bar

     """
-    pass
+    cis = np.atleast_2d(cis).reshape(2, -1)
+    heights = np.atleast_1d(heights)
+    errsize = []
+    for i, (low, high) in enumerate(np.transpose(cis)):
+        h = heights[i]
+        elow = h - low
+        ehigh = high - h
+        errsize.append([elow, ehigh])
+
+    errsize = np.asarray(errsize).T
+    return errsize


 def _draw_figure(fig):
     """Force draw of a matplotlib figure, accounting for back-compat."""
-    pass
+    # See https://github.com/matplotlib/matplotlib/issues/19197 for context
+    fig.canvas.draw()
+    if fig.stale:
+        try:
+            fig.draw(fig.canvas.get_renderer())
+        except AttributeError:
+            pass


 def _default_color(method, hue, color, kws, saturation=1):
     """If needed, get a default color by using the matplotlib property cycle."""
-    pass
+
+    if hue is not None:
+        # This warning is probably user-friendly, but it's currently triggered
+        # in a FacetGrid context and I don't want to mess with that logic right now
+        #  if color is not None:
+        #      msg = "`color` is ignored when `hue` is assigned."
+        #      warnings.warn(msg)
+        return None
+
+    kws = kws.copy()
+    kws.pop("label", None)
+
+    if color is not None:
+        if saturation < 1:
+            color = desaturate(color, saturation)
+        return color
+
+    elif method.__name__ == "plot":
+
+        color = normalize_kwargs(kws, mpl.lines.Line2D).get("color")
+        scout, = method([], [], scalex=False, scaley=False, color=color)
+        color = scout.get_color()
+        scout.remove()
+
+    elif method.__name__ == "scatter":
+
+        # Matplotlib will raise if the size of x/y don't match s/c,
+        # and the latter might be in the kws dict
+        scout_size = max(
+            np.atleast_1d(kws.get(key, [])).shape[0]
+            for key in ["s", "c", "fc", "facecolor", "facecolors"]
+        )
+        scout_x = scout_y = np.full(scout_size, np.nan)
+
+        scout = method(scout_x, scout_y, **kws)
+        facecolors = scout.get_facecolors()
+
+        if not len(facecolors):
+            # Handle bug in matplotlib <= 3.2 (I think)
+            # This will limit the ability to use non color= kwargs to specify
+            # a color in versions of matplotlib with the bug, but trying to
+            # work out what the user wanted by re-implementing the broken logic
+            # of inspecting the kwargs is probably too brittle.
+            single_color = False
+        else:
+            single_color = np.unique(facecolors, axis=0).shape[0] == 1
+
+        # Allow the user to specify an array of colors through various kwargs
+        if "c" not in kws and single_color:
+            color = to_rgb(facecolors[0])
+
+        scout.remove()
+
+    elif method.__name__ == "bar":
+
+        # bar() needs masked, not empty data, to generate a patch
+        scout, = method([np.nan], [np.nan], **kws)
+        color = to_rgb(scout.get_facecolor())
+        scout.remove()
+        # Axes.bar adds both a patch and a container
+        method.__self__.containers.pop(-1)
+
+    elif method.__name__ == "fill_between":
+
+        kws = normalize_kwargs(kws, mpl.collections.PolyCollection)
+        scout = method([], [], **kws)
+        facecolor = scout.get_facecolor()
+        color = to_rgb(facecolor[0])
+        scout.remove()
+
+    if saturation < 1:
+        color = desaturate(color, saturation)
+
+    return color


 def desaturate(color, prop):
@@ -68,7 +160,27 @@ def desaturate(color, prop):
         desaturated color code in RGB tuple representation

     """
-    pass
+    # Check inputs
+    if not 0 <= prop <= 1:
+        raise ValueError("prop must be between 0 and 1")
+
+    # Get rgb tuple rep
+    rgb = to_rgb(color)
+
+    # Short circuit to avoid floating point issues
+    if prop == 1:
+        return rgb
+
+    # Convert to hls
+    h, l, s = colorsys.rgb_to_hls(*rgb)
+
+    # Desaturate the saturation channel
+    s *= prop
+
+    # Convert back to rgb
+    new_color = colorsys.hls_to_rgb(h, l, s)
+
+    return new_color


 def saturate(color):
@@ -85,10 +197,10 @@ def saturate(color):
         saturated color code in RGB tuple representation

     """
-    pass
+    return set_hls_values(color, s=1)


-def set_hls_values(color, h=None, l=None, s=None):
+def set_hls_values(color, h=None, l=None, s=None):  # noqa
     """Independently manipulate the h, l, or s channels of a color.

     Parameters
@@ -104,7 +216,15 @@ def set_hls_values(color, h=None, l=None, s=None):
         new color code in RGB tuple representation

     """
-    pass
+    # Get an RGB tuple representation
+    rgb = to_rgb(color)
+    vals = list(colorsys.rgb_to_hls(*rgb))
+    for i, val in enumerate([h, l, s]):
+        if val is not None:
+            vals[i] = val
+
+    rgb = colorsys.hls_to_rgb(*vals)
+    return rgb


 def axlabel(xlabel, ylabel, **kwargs):
@@ -113,7 +233,11 @@ def axlabel(xlabel, ylabel, **kwargs):
     DEPRECATED: will be removed in a future version.

     """
-    pass
+    msg = "This function is deprecated and will be removed in a future version"
+    warnings.warn(msg, FutureWarning)
+    ax = plt.gca()
+    ax.set_xlabel(xlabel, **kwargs)
+    ax.set_ylabel(ylabel, **kwargs)


 def remove_na(vector):
@@ -130,7 +254,7 @@ def remove_na(vector):
         Vector of data with null values removed. May be a copy or a view.

     """
-    pass
+    return vector[pd.notnull(vector)]


 def get_color_cycle():
@@ -146,11 +270,12 @@ def get_color_cycle():
         List of matplotlib colors in the current cycle, or dark gray if
         the current color cycle is empty.
     """
-    pass
+    cycler = mpl.rcParams['axes.prop_cycle']
+    return cycler.by_key()['color'] if 'color' in cycler.keys else [".15"]


-def despine(fig=None, ax=None, top=True, right=True, left=False, bottom=
-    False, offset=None, trim=False):
+def despine(fig=None, ax=None, top=True, right=True, left=False,
+            bottom=False, offset=None, trim=False):
     """Remove the top and right spines from plot(s).

     fig : matplotlib figure, optional
@@ -173,7 +298,82 @@ def despine(fig=None, ax=None, top=True, right=True, left=False, bottom=
     None

     """
-    pass
+    # Get references to the axes we want
+    if fig is None and ax is None:
+        axes = plt.gcf().axes
+    elif fig is not None:
+        axes = fig.axes
+    elif ax is not None:
+        axes = [ax]
+
+    for ax_i in axes:
+        for side in ["top", "right", "left", "bottom"]:
+            # Toggle the spine objects
+            is_visible = not locals()[side]
+            ax_i.spines[side].set_visible(is_visible)
+            if offset is not None and is_visible:
+                try:
+                    val = offset.get(side, 0)
+                except AttributeError:
+                    val = offset
+                ax_i.spines[side].set_position(('outward', val))
+
+        # Potentially move the ticks
+        if left and not right:
+            maj_on = any(
+                t.tick1line.get_visible()
+                for t in ax_i.yaxis.majorTicks
+            )
+            min_on = any(
+                t.tick1line.get_visible()
+                for t in ax_i.yaxis.minorTicks
+            )
+            ax_i.yaxis.set_ticks_position("right")
+            for t in ax_i.yaxis.majorTicks:
+                t.tick2line.set_visible(maj_on)
+            for t in ax_i.yaxis.minorTicks:
+                t.tick2line.set_visible(min_on)
+
+        if bottom and not top:
+            maj_on = any(
+                t.tick1line.get_visible()
+                for t in ax_i.xaxis.majorTicks
+            )
+            min_on = any(
+                t.tick1line.get_visible()
+                for t in ax_i.xaxis.minorTicks
+            )
+            ax_i.xaxis.set_ticks_position("top")
+            for t in ax_i.xaxis.majorTicks:
+                t.tick2line.set_visible(maj_on)
+            for t in ax_i.xaxis.minorTicks:
+                t.tick2line.set_visible(min_on)
+
+        if trim:
+            # clip off the parts of the spines that extend past major ticks
+            xticks = np.asarray(ax_i.get_xticks())
+            if xticks.size:
+                firsttick = np.compress(xticks >= min(ax_i.get_xlim()),
+                                        xticks)[0]
+                lasttick = np.compress(xticks <= max(ax_i.get_xlim()),
+                                       xticks)[-1]
+                ax_i.spines['bottom'].set_bounds(firsttick, lasttick)
+                ax_i.spines['top'].set_bounds(firsttick, lasttick)
+                newticks = xticks.compress(xticks <= lasttick)
+                newticks = newticks.compress(newticks >= firsttick)
+                ax_i.set_xticks(newticks)
+
+            yticks = np.asarray(ax_i.get_yticks())
+            if yticks.size:
+                firsttick = np.compress(yticks >= min(ax_i.get_ylim()),
+                                        yticks)[0]
+                lasttick = np.compress(yticks <= max(ax_i.get_ylim()),
+                                       yticks)[-1]
+                ax_i.spines['left'].set_bounds(firsttick, lasttick)
+                ax_i.spines['right'].set_bounds(firsttick, lasttick)
+                newticks = yticks.compress(yticks <= lasttick)
+                newticks = newticks.compress(newticks >= firsttick)
+                ax_i.set_yticks(newticks)


 def move_legend(obj, loc, **kwargs):
@@ -204,17 +404,90 @@ def move_legend(obj, loc, **kwargs):
     .. include:: ../docstrings/move_legend.rst

     """
-    pass
+    # This is a somewhat hackish solution that will hopefully be obviated by
+    # upstream improvements to matplotlib legends that make them easier to
+    # modify after creation.
+
+    from seaborn.axisgrid import Grid  # Avoid circular import
+
+    # Locate the legend object and a method to recreate the legend
+    if isinstance(obj, Grid):
+        old_legend = obj.legend
+        legend_func = obj.figure.legend
+    elif isinstance(obj, mpl.axes.Axes):
+        old_legend = obj.legend_
+        legend_func = obj.legend
+    elif isinstance(obj, mpl.figure.Figure):
+        if obj.legends:
+            old_legend = obj.legends[-1]
+        else:
+            old_legend = None
+        legend_func = obj.legend
+    else:
+        err = "`obj` must be a seaborn Grid or matplotlib Axes or Figure instance."
+        raise TypeError(err)
+
+    if old_legend is None:
+        err = f"{obj} has no legend attached."
+        raise ValueError(err)
+
+    # Extract the components of the legend we need to reuse
+    # Import here to avoid a circular import
+    from seaborn._compat import get_legend_handles
+    handles = get_legend_handles(old_legend)
+    labels = [t.get_text() for t in old_legend.get_texts()]
+
+    # Handle the case where the user is trying to override the labels
+    if (new_labels := kwargs.pop("labels", None)) is not None:
+        if len(new_labels) != len(labels):
+            err = "Length of new labels does not match existing legend."
+            raise ValueError(err)
+        labels = new_labels
+
+    # Extract legend properties that can be passed to the recreation method
+    # (Vexingly, these don't all round-trip)
+    legend_kws = inspect.signature(mpl.legend.Legend).parameters
+    props = {k: v for k, v in old_legend.properties().items() if k in legend_kws}
+
+    # Delegate default bbox_to_anchor rules to matplotlib
+    props.pop("bbox_to_anchor")
+
+    # Try to propagate the existing title and font properties; respect new ones too
+    title = props.pop("title")
+    if "title" in kwargs:
+        title.set_text(kwargs.pop("title"))
+    title_kwargs = {k: v for k, v in kwargs.items() if k.startswith("title_")}
+    for key, val in title_kwargs.items():
+        title.set(**{key[6:]: val})
+        kwargs.pop(key)
+
+    # Try to respect the frame visibility
+    kwargs.setdefault("frameon", old_legend.legendPatch.get_visible())
+
+    # Remove the old legend and create the new one
+    props.update(kwargs)
+    old_legend.remove()
+    new_legend = legend_func(handles, labels, loc=loc, **props)
+    new_legend.set_title(title.get_text(), title.get_fontproperties())
+
+    # Let the Grid object continue to track the correct legend object
+    if isinstance(obj, Grid):
+        obj._legend = new_legend


 def _kde_support(data, bw, gridsize, cut, clip):
     """Establish support for a kernel density estimate."""
-    pass
+    support_min = max(data.min() - bw * cut, clip[0])
+    support_max = min(data.max() + bw * cut, clip[1])
+    support = np.linspace(support_min, support_max, gridsize)
+
+    return support


 def ci(a, which=95, axis=None):
     """Return a percentile range from an array of values."""
-    pass
+    p = 50 - which / 2, 50 + which / 2
+    return np.nanpercentile(a, p, axis)


 def get_dataset_names():
@@ -223,7 +496,11 @@ def get_dataset_names():
     Requires an internet connection.

     """
-    pass
+    with urlopen(DATASET_NAMES_URL) as resp:
+        txt = resp.read()
+
+    dataset_names = [name.strip() for name in txt.decode().split("\n")]
+    return list(filter(None, dataset_names))


 def get_data_home(data_home=None):
@@ -236,7 +513,12 @@ def get_data_home(data_home=None):
     or otherwise default to an OS-appropriate user cache location.

     """
-    pass
+    if data_home is None:
+        data_home = os.environ.get("SEABORN_DATA", user_cache_dir("seaborn"))
+    data_home = os.path.expanduser(data_home)
+    if not os.path.exists(data_home):
+        os.makedirs(data_home)
+    return data_home


 def load_dataset(name, cache=True, data_home=None, **kws):
@@ -271,7 +553,80 @@ def load_dataset(name, cache=True, data_home=None, **kws):
         Tabular data, possibly with some preprocessing applied.

     """
-    pass
+    # A common beginner mistake is to assume that one's personal data needs
+    # to be passed through this function to be usable with seaborn.
+    # Let's provide a more helpful error than you would otherwise get.
+    if isinstance(name, pd.DataFrame):
+        err = (
+            "This function accepts only strings (the name of an example dataset). "
+            "You passed a pandas DataFrame. If you have your own dataset, "
+            "it is not necessary to use this function before plotting."
+        )
+        raise TypeError(err)
+
+    url = f"{DATASET_SOURCE}/{name}.csv"
+
+    if cache:
+        cache_path = os.path.join(get_data_home(data_home), os.path.basename(url))
+        if not os.path.exists(cache_path):
+            if name not in get_dataset_names():
+                raise ValueError(f"'{name}' is not one of the example datasets.")
+            urlretrieve(url, cache_path)
+        full_path = cache_path
+    else:
+        full_path = url
+
+    df = pd.read_csv(full_path, **kws)
+
+    if df.iloc[-1].isnull().all():
+        df = df.iloc[:-1]
+
+    # Set some columns as a categorical type with ordered levels
+
+    if name == "tips":
+        df["day"] = pd.Categorical(df["day"], ["Thur", "Fri", "Sat", "Sun"])
+        df["sex"] = pd.Categorical(df["sex"], ["Male", "Female"])
+        df["time"] = pd.Categorical(df["time"], ["Lunch", "Dinner"])
+        df["smoker"] = pd.Categorical(df["smoker"], ["Yes", "No"])
+
+    elif name == "flights":
+        months = df["month"].str[:3]
+        df["month"] = pd.Categorical(months, months.unique())
+
+    elif name == "exercise":
+        df["time"] = pd.Categorical(df["time"], ["1 min", "15 min", "30 min"])
+        df["kind"] = pd.Categorical(df["kind"], ["rest", "walking", "running"])
+        df["diet"] = pd.Categorical(df["diet"], ["no fat", "low fat"])
+
+    elif name == "titanic":
+        df["class"] = pd.Categorical(df["class"], ["First", "Second", "Third"])
+        df["deck"] = pd.Categorical(df["deck"], list("ABCDEFG"))
+
+    elif name == "penguins":
+        df["sex"] = df["sex"].str.title()
+
+    elif name == "diamonds":
+        df["color"] = pd.Categorical(
+            df["color"], ["D", "E", "F", "G", "H", "I", "J"],
+        )
+        df["clarity"] = pd.Categorical(
+            df["clarity"], ["IF", "VVS1", "VVS2", "VS1", "VS2", "SI1", "SI2", "I1"],
+        )
+        df["cut"] = pd.Categorical(
+            df["cut"], ["Ideal", "Premium", "Very Good", "Good", "Fair"],
+        )
+
+    elif name == "taxis":
+        df["pickup"] = pd.to_datetime(df["pickup"])
+        df["dropoff"] = pd.to_datetime(df["dropoff"])
+
+    elif name == "seaice":
+        df["Date"] = pd.to_datetime(df["Date"])
+
+    elif name == "dowjones":
+        df["Date"] = pd.to_datetime(df["Date"])
+
+    return df


 def axis_ticklabels_overlap(labels):
@@ -287,7 +642,15 @@ def axis_ticklabels_overlap(labels):
         True if any of the labels overlap.

     """
-    pass
+    if not labels:
+        return False
+    try:
+        bboxes = [l.get_window_extent() for l in labels]
+        overlaps = [b.count_overlaps(bboxes) for b in bboxes]
+        return max(overlaps) > 1
+    except RuntimeError:
+        # Issue on macos backend raises an error in the above code
+        return False


 def axes_ticklabels_overlap(ax):
@@ -303,12 +666,34 @@ def axes_ticklabels_overlap(ax):
         True when the labels on that axis overlap.

     """
-    pass
+    return (axis_ticklabels_overlap(ax.get_xticklabels()),
+            axis_ticklabels_overlap(ax.get_yticklabels()))


 def locator_to_legend_entries(locator, limits, dtype):
     """Return levels and formatted levels for brief numeric legends."""
-    pass
+    raw_levels = locator.tick_values(*limits).astype(dtype)
+
+    # The locator can return ticks outside the limits, clip them here
+    raw_levels = [l for l in raw_levels if l >= limits[0] and l <= limits[1]]
+
+    class dummy_axis:
+        def get_view_interval(self):
+            return limits
+
+    if isinstance(locator, mpl.ticker.LogLocator):
+        formatter = mpl.ticker.LogFormatter()
+    else:
+        formatter = mpl.ticker.ScalarFormatter()
+        # Avoid having an offset/scientific notation which we don't currently
+        # have any way of representing in the legend
+        formatter.set_useOffset(False)
+        formatter.set_scientific(False)
+    formatter.axis = dummy_axis()
+
+    formatted_levels = formatter.format_ticks(raw_levels)
+
+    return raw_levels, formatted_levels


 def relative_luminance(color):
@@ -324,7 +709,13 @@ def relative_luminance(color):
     luminance : float(s) between 0 and 1

     """
-    pass
+    rgb = mpl.colors.colorConverter.to_rgba_array(color)[:, :3]
+    rgb = np.where(rgb <= .03928, rgb / 12.92, ((rgb + .055) / 1.055) ** 2.4)
+    lum = rgb.dot([.2126, .7152, .0722])
+    try:
+        return lum.item()
+    except ValueError:
+        return lum


 def to_utf8(obj):
@@ -348,17 +739,45 @@ def to_utf8(obj):
         UTF-8-decoded string representation of ``obj``

     """
-    pass
+    if isinstance(obj, str):
+        return obj
+    try:
+        return obj.decode(encoding="utf-8")
+    except AttributeError:  # obj is not bytes-like
+        return str(obj)


 def _check_argument(param, options, value, prefix=False):
     """Raise if value for param is not in options."""
-    pass
+    if prefix and value is not None:
+        failure = not any(value.startswith(p) for p in options if isinstance(p, str))
+    else:
+        failure = value not in options
+    if failure:
+        raise ValueError(
+            f"The value for `{param}` must be one of {options}, "
+            f"but {repr(value)} was passed."
+        )
+    return value


 def _assign_default_kwargs(kws, call_func, source_func):
     """Assign default kwargs for call_func using values from source_func."""
-    pass
+    # This exists so that axes-level functions and figure-level functions can
+    # both call a Plotter method while having the default kwargs be defined in
+    # the signature of the axes-level function.
+    # An alternative would be to have a decorator on the method that sets its
+    # defaults based on those defined in the axes-level function.
+    # Then the figure-level function would not need to worry about defaults.
+    # I am not sure which is better.
+    needed = inspect.signature(call_func).parameters
+    defaults = inspect.signature(source_func).parameters
+
+    for param in needed:
+        if param in defaults and param not in kws:
+            kws[param] = defaults[param].default
+
+    return kws


 def adjust_legend_subtitles(legend):
@@ -368,7 +787,17 @@ def adjust_legend_subtitles(legend):
     Note: This function is not part of the public API and may be changed or removed.

     """
-    pass
+    # Legend title not in rcParams until 3.0
+    font_size = plt.rcParams.get("legend.title_fontsize", None)
+    hpackers = legend.findobj(mpl.offsetbox.VPacker)[0].get_children()
+    for hpack in hpackers:
+        draw_area, text_area = hpack.get_children()
+        handles = draw_area.get_children()
+        if not all(artist.get_visible() for artist in handles):
+            draw_area.set_width(0)
+            for text in text_area.get_children():
+                if font_size is not None:
+                    text.set_size(font_size)


 def _deprecate_ci(errorbar, ci):
@@ -380,20 +809,89 @@ def _deprecate_ci(errorbar, ci):
     (and extracted from kwargs) after one cycle.

     """
-    pass
+    if ci is not deprecated and ci != "deprecated":
+        if ci is None:
+            errorbar = None
+        elif ci == "sd":
+            errorbar = "sd"
+        else:
+            errorbar = ("ci", ci)
+        msg = (
+            "\n\nThe `ci` parameter is deprecated. "
+            f"Use `errorbar={repr(errorbar)}` for the same effect.\n"
+        )
+        warnings.warn(msg, FutureWarning, stacklevel=3)
+
+    return errorbar


 def _get_transform_functions(ax, axis):
     """Return the forward and inverse transforms for a given axis."""
-    pass
+    axis_obj = getattr(ax, f"{axis}axis")
+    transform = axis_obj.get_transform()
+    return transform.transform, transform.inverted().transform


 @contextmanager
 def _disable_autolayout():
     """Context manager for preventing rc-controlled auto-layout behavior."""
-    pass
+    # This is a workaround for an issue in matplotlib, for details see
+    # https://github.com/mwaskom/seaborn/issues/2914
+    # The only affect of this rcParam is to set the default value for
+    # layout= in plt.figure, so we could just do that instead.
+    # But then we would need to own the complexity of the transition
+    # from tight_layout=True -> layout="tight". This seems easier,
+    # but can be removed when (if) that is simpler on the matplotlib side,
+    # or if the layout algorithms are improved to handle figure legends.
+    orig_val = mpl.rcParams["figure.autolayout"]
+    try:
+        mpl.rcParams["figure.autolayout"] = False
+        yield
+    finally:
+        mpl.rcParams["figure.autolayout"] = orig_val
+
+
+def _version_predates(lib: ModuleType, version: str) -> bool:
+    """Helper function for checking version compatibility."""
+    return Version(lib.__version__) < Version(version)


-def _version_predates(lib: ModuleType, version: str) ->bool:
-    """Helper function for checking version compatibility."""
-    pass
+def _scatter_legend_artist(**kws):
+
+    kws = normalize_kwargs(kws, mpl.collections.PathCollection)
+
+    edgecolor = kws.pop("edgecolor", None)
+    rc = mpl.rcParams
+    line_kws = {
+        "linestyle": "",
+        "marker": kws.pop("marker", "o"),
+        "markersize": np.sqrt(kws.pop("s", rc["lines.markersize"] ** 2)),
+        "markerfacecolor": kws.pop("facecolor", kws.get("color")),
+        "markeredgewidth": kws.pop("linewidth", 0),
+        **kws,
+    }
+
+    if edgecolor is not None:
+        if edgecolor == "face":
+            line_kws["markeredgecolor"] = line_kws["markerfacecolor"]
+        else:
+            line_kws["markeredgecolor"] = edgecolor
+
+    return mpl.lines.Line2D([], [], **line_kws)
+
+
+def _get_patch_legend_artist(fill):
+
+    def legend_artist(**kws):
+
+        color = kws.pop("color", None)
+        if color is not None:
+            if fill:
+                kws["facecolor"] = color
+            else:
+                kws["edgecolor"] = color
+                kws["facecolor"] = "none"
+
+        return mpl.patches.Rectangle((0, 0), 0, 0, **kws)
+
+    return legend_artist
diff --git a/seaborn/widgets.py b/seaborn/widgets.py
index 3941f65b..502812af 100644
--- a/seaborn/widgets.py
+++ b/seaborn/widgets.py
@@ -1,28 +1,47 @@
 import numpy as np
 import matplotlib.pyplot as plt
 from matplotlib.colors import LinearSegmentedColormap
+
 try:
     from ipywidgets import interact, FloatSlider, IntSlider
 except ImportError:
+    def interact(f):
+        msg = "Interactive palettes require `ipywidgets`, which is not installed."
+        raise ImportError(msg)
+
 from .miscplot import palplot
-from .palettes import color_palette, dark_palette, light_palette, diverging_palette, cubehelix_palette
-__all__ = ['choose_colorbrewer_palette', 'choose_cubehelix_palette',
-    'choose_dark_palette', 'choose_light_palette', 'choose_diverging_palette']
+from .palettes import (color_palette, dark_palette, light_palette,
+                       diverging_palette, cubehelix_palette)
+
+
+__all__ = ["choose_colorbrewer_palette", "choose_cubehelix_palette",
+           "choose_dark_palette", "choose_light_palette",
+           "choose_diverging_palette"]


 def _init_mutable_colormap():
     """Create a matplotlib colormap that will be updated by the widgets."""
-    pass
+    greys = color_palette("Greys", 256)
+    cmap = LinearSegmentedColormap.from_list("interactive", greys)
+    cmap._init()
+    cmap._set_extremes()
+    return cmap


 def _update_lut(cmap, colors):
     """Change the LUT values in a matplotlib colormap in-place."""
-    pass
+    cmap._lut[:256] = colors
+    cmap._set_extremes()


 def _show_cmap(cmap):
     """Show a continuous matplotlib colormap."""
-    pass
+    from .rcmod import axes_style  # Avoid circular import
+    with axes_style("white"):
+        f, ax = plt.subplots(figsize=(8.25, .75))
+    ax.set(xticks=[], yticks=[])
+    x = np.linspace(0, 1, 256)[np.newaxis, :]
+    ax.pcolormesh(x, cmap=cmap)


 def choose_colorbrewer_palette(data_type, as_cmap=False):
@@ -57,10 +76,71 @@ def choose_colorbrewer_palette(data_type, as_cmap=False):


     """
-    pass
-
-
-def choose_dark_palette(input='husl', as_cmap=False):
+    if data_type.startswith("q") and as_cmap:
+        raise ValueError("Qualitative palettes cannot be colormaps.")
+
+    pal = []
+    if as_cmap:
+        cmap = _init_mutable_colormap()
+
+    if data_type.startswith("s"):
+        opts = ["Greys", "Reds", "Greens", "Blues", "Oranges", "Purples",
+                "BuGn", "BuPu", "GnBu", "OrRd", "PuBu", "PuRd", "RdPu", "YlGn",
+                "PuBuGn", "YlGnBu", "YlOrBr", "YlOrRd"]
+        variants = ["regular", "reverse", "dark"]
+
+        @interact
+        def choose_sequential(name=opts, n=(2, 18),
+                              desat=FloatSlider(min=0, max=1, value=1),
+                              variant=variants):
+            if variant == "reverse":
+                name += "_r"
+            elif variant == "dark":
+                name += "_d"
+
+            if as_cmap:
+                colors = color_palette(name, 256, desat)
+                _update_lut(cmap, np.c_[colors, np.ones(256)])
+                _show_cmap(cmap)
+            else:
+                pal[:] = color_palette(name, n, desat)
+                palplot(pal)
+
+    elif data_type.startswith("d"):
+        opts = ["RdBu", "RdGy", "PRGn", "PiYG", "BrBG",
+                "RdYlBu", "RdYlGn", "Spectral"]
+        variants = ["regular", "reverse"]
+
+        @interact
+        def choose_diverging(name=opts, n=(2, 16),
+                             desat=FloatSlider(min=0, max=1, value=1),
+                             variant=variants):
+            if variant == "reverse":
+                name += "_r"
+            if as_cmap:
+                colors = color_palette(name, 256, desat)
+                _update_lut(cmap, np.c_[colors, np.ones(256)])
+                _show_cmap(cmap)
+            else:
+                pal[:] = color_palette(name, n, desat)
+                palplot(pal)
+
+    elif data_type.startswith("q"):
+        opts = ["Set1", "Set2", "Set3", "Paired", "Accent",
+                "Pastel1", "Pastel2", "Dark2"]
+
+        @interact
+        def choose_qualitative(name=opts, n=(2, 16),
+                               desat=FloatSlider(min=0, max=1, value=1)):
+            pal[:] = color_palette(name, n, desat)
+            palplot(pal)
+
+    if as_cmap:
+        return cmap
+    return pal
+
+
+def choose_dark_palette(input="husl", as_cmap=False):
     """Launch an interactive widget to create a dark sequential palette.

     This corresponds with the :func:`dark_palette` function. This kind
@@ -91,10 +171,61 @@ def choose_dark_palette(input='husl', as_cmap=False):
                         cubehelix system.

     """
-    pass
-
-
-def choose_light_palette(input='husl', as_cmap=False):
+    pal = []
+    if as_cmap:
+        cmap = _init_mutable_colormap()
+
+    if input == "rgb":
+        @interact
+        def choose_dark_palette_rgb(r=(0., 1.),
+                                    g=(0., 1.),
+                                    b=(0., 1.),
+                                    n=(3, 17)):
+            color = r, g, b
+            if as_cmap:
+                colors = dark_palette(color, 256, input="rgb")
+                _update_lut(cmap, colors)
+                _show_cmap(cmap)
+            else:
+                pal[:] = dark_palette(color, n, input="rgb")
+                palplot(pal)
+
+    elif input == "hls":
+        @interact
+        def choose_dark_palette_hls(h=(0., 1.),
+                                    l=(0., 1.),  # noqa: E741
+                                    s=(0., 1.),
+                                    n=(3, 17)):
+            color = h, l, s
+            if as_cmap:
+                colors = dark_palette(color, 256, input="hls")
+                _update_lut(cmap, colors)
+                _show_cmap(cmap)
+            else:
+                pal[:] = dark_palette(color, n, input="hls")
+                palplot(pal)
+
+    elif input == "husl":
+        @interact
+        def choose_dark_palette_husl(h=(0, 359),
+                                     s=(0, 99),
+                                     l=(0, 99),  # noqa: E741
+                                     n=(3, 17)):
+            color = h, s, l
+            if as_cmap:
+                colors = dark_palette(color, 256, input="husl")
+                _update_lut(cmap, colors)
+                _show_cmap(cmap)
+            else:
+                pal[:] = dark_palette(color, n, input="husl")
+                palplot(pal)
+
+    if as_cmap:
+        return cmap
+    return pal
+
+
+def choose_light_palette(input="husl", as_cmap=False):
     """Launch an interactive widget to create a light sequential palette.

     This corresponds with the :func:`light_palette` function. This kind
@@ -125,7 +256,58 @@ def choose_light_palette(input='husl', as_cmap=False):
                         cubehelix system.

     """
-    pass
+    pal = []
+    if as_cmap:
+        cmap = _init_mutable_colormap()
+
+    if input == "rgb":
+        @interact
+        def choose_light_palette_rgb(r=(0., 1.),
+                                     g=(0., 1.),
+                                     b=(0., 1.),
+                                     n=(3, 17)):
+            color = r, g, b
+            if as_cmap:
+                colors = light_palette(color, 256, input="rgb")
+                _update_lut(cmap, colors)
+                _show_cmap(cmap)
+            else:
+                pal[:] = light_palette(color, n, input="rgb")
+                palplot(pal)
+
+    elif input == "hls":
+        @interact
+        def choose_light_palette_hls(h=(0., 1.),
+                                     l=(0., 1.),  # noqa: E741
+                                     s=(0., 1.),
+                                     n=(3, 17)):
+            color = h, l, s
+            if as_cmap:
+                colors = light_palette(color, 256, input="hls")
+                _update_lut(cmap, colors)
+                _show_cmap(cmap)
+            else:
+                pal[:] = light_palette(color, n, input="hls")
+                palplot(pal)
+
+    elif input == "husl":
+        @interact
+        def choose_light_palette_husl(h=(0, 359),
+                                      s=(0, 99),
+                                      l=(0, 99),  # noqa: E741
+                                      n=(3, 17)):
+            color = h, s, l
+            if as_cmap:
+                colors = light_palette(color, 256, input="husl")
+                _update_lut(cmap, colors)
+                _show_cmap(cmap)
+            else:
+                pal[:] = light_palette(color, n, input="husl")
+                palplot(pal)
+
+    if as_cmap:
+        return cmap
+    return pal


 def choose_diverging_palette(as_cmap=False):
@@ -156,7 +338,35 @@ def choose_diverging_palette(as_cmap=False):
                                  colorbrewer set, including diverging palettes.

     """
-    pass
+    pal = []
+    if as_cmap:
+        cmap = _init_mutable_colormap()
+
+    @interact
+    def choose_diverging_palette(
+        h_neg=IntSlider(min=0,
+                        max=359,
+                        value=220),
+        h_pos=IntSlider(min=0,
+                        max=359,
+                        value=10),
+        s=IntSlider(min=0, max=99, value=74),
+        l=IntSlider(min=0, max=99, value=50),  # noqa: E741
+        sep=IntSlider(min=1, max=50, value=10),
+        n=(2, 16),
+        center=["light", "dark"]
+    ):
+        if as_cmap:
+            colors = diverging_palette(h_neg, h_pos, s, l, sep, 256, center)
+            _update_lut(cmap, colors)
+            _show_cmap(cmap)
+        else:
+            pal[:] = diverging_palette(h_neg, h_pos, s, l, sep, n, center)
+            palplot(pal)
+
+    if as_cmap:
+        return cmap
+    return pal


 def choose_cubehelix_palette(as_cmap=False):
@@ -187,4 +397,30 @@ def choose_cubehelix_palette(as_cmap=False):
                         cubehelix system.

     """
-    pass
+    pal = []
+    if as_cmap:
+        cmap = _init_mutable_colormap()
+
+    @interact
+    def choose_cubehelix(n_colors=IntSlider(min=2, max=16, value=9),
+                         start=FloatSlider(min=0, max=3, value=0),
+                         rot=FloatSlider(min=-1, max=1, value=.4),
+                         gamma=FloatSlider(min=0, max=5, value=1),
+                         hue=FloatSlider(min=0, max=1, value=.8),
+                         light=FloatSlider(min=0, max=1, value=.85),
+                         dark=FloatSlider(min=0, max=1, value=.15),
+                         reverse=False):
+
+        if as_cmap:
+            colors = cubehelix_palette(256, start, rot, gamma,
+                                       hue, light, dark, reverse)
+            _update_lut(cmap, np.c_[colors, np.ones(256)])
+            _show_cmap(cmap)
+        else:
+            pal[:] = cubehelix_palette(n_colors, start, rot, gamma,
+                                       hue, light, dark, reverse)
+            palplot(pal)
+
+    if as_cmap:
+        return cmap
+    return pal