Skip to content

back to Reference (Gold) summary

Reference (Gold): networkx

Pytest Summary for test networkx

status count
passed 5436
skipped 53
failed 4
total 5493
collected 10986

Failed pytests:

test_pylab.py::test_draw_networkx_edges_multiedge_connectionstyle[G1-4]

test_pylab.py::test_draw_networkx_edges_multiedge_connectionstyle[G1-4]
G = 
expected_n_edges = 4

    @pytest.mark.parametrize(
        ("G", "expected_n_edges"),
        ([nx.DiGraph(), 2], [nx.MultiGraph(), 4], [nx.MultiDiGraph(), 4]),
    )
    def test_draw_networkx_edges_multiedge_connectionstyle(G, expected_n_edges):
        """Draws edges correctly for 3 types of graphs and checks for valid length"""
        for i, (u, v) in enumerate([(0, 1), (0, 1), (0, 1), (0, 2)]):
            G.add_edge(u, v, weight=round(i / 3, 2))
        pos = {n: (n, n) for n in G}
        # Raises on insuficient connectionstyle length
        for conn_style in [
            "arc3,rad=0.1",
            ["arc3,rad=0.1", "arc3,rad=0.1"],
            ["arc3,rad=0.1", "arc3,rad=0.1", "arc3,rad=0.2"],
        ]:
            nx.draw_networkx_edges(G, pos, connectionstyle=conn_style)
            arrows = nx.draw_networkx_edges(G, pos, connectionstyle=conn_style)
>           assert len(arrows) == expected_n_edges
E           assert 8 == 4
E            +  where 8 = len([, , , ...])

networkx/drawing/tests/test_pylab.py:732: AssertionError

test_pylab.py::test_draw_networkx_edges_multiedge_connectionstyle[G2-4]

test_pylab.py::test_draw_networkx_edges_multiedge_connectionstyle[G2-4]
G = 
expected_n_edges = 4

    @pytest.mark.parametrize(
        ("G", "expected_n_edges"),
        ([nx.DiGraph(), 2], [nx.MultiGraph(), 4], [nx.MultiDiGraph(), 4]),
    )
    def test_draw_networkx_edges_multiedge_connectionstyle(G, expected_n_edges):
        """Draws edges correctly for 3 types of graphs and checks for valid length"""
        for i, (u, v) in enumerate([(0, 1), (0, 1), (0, 1), (0, 2)]):
            G.add_edge(u, v, weight=round(i / 3, 2))
        pos = {n: (n, n) for n in G}
        # Raises on insuficient connectionstyle length
        for conn_style in [
            "arc3,rad=0.1",
            ["arc3,rad=0.1", "arc3,rad=0.1"],
            ["arc3,rad=0.1", "arc3,rad=0.1", "arc3,rad=0.2"],
        ]:
            nx.draw_networkx_edges(G, pos, connectionstyle=conn_style)
            arrows = nx.draw_networkx_edges(G, pos, connectionstyle=conn_style)
>           assert len(arrows) == expected_n_edges
E           assert 8 == 4
E            +  where 8 = len([, , , ...])

networkx/drawing/tests/test_pylab.py:732: AssertionError

test_pylab.py::test_draw_networkx_edge_labels_multiedge_connectionstyle[G1-4]

test_pylab.py::test_draw_networkx_edge_labels_multiedge_connectionstyle[G1-4]
G = 
expected_n_edges = 4

    @pytest.mark.parametrize(
        ("G", "expected_n_edges"),
        ([nx.DiGraph(), 2], [nx.MultiGraph(), 4], [nx.MultiDiGraph(), 4]),
    )
    def test_draw_networkx_edge_labels_multiedge_connectionstyle(G, expected_n_edges):
        """Draws labels correctly for 3 types of graphs and checks for valid length and class names"""
        for i, (u, v) in enumerate([(0, 1), (0, 1), (0, 1), (0, 2)]):
            G.add_edge(u, v, weight=round(i / 3, 2))
        pos = {n: (n, n) for n in G}
        # Raises on insuficient connectionstyle length
        arrows = nx.draw_networkx_edges(
            G, pos, connectionstyle=["arc3,rad=0.1", "arc3,rad=0.1", "arc3,rad=0.1"]
        )
        for conn_style in [
            "arc3,rad=0.1",
            ["arc3,rad=0.1", "arc3,rad=0.2"],
            ["arc3,rad=0.1", "arc3,rad=0.1", "arc3,rad=0.1"],
        ]:
            text_items = nx.draw_networkx_edge_labels(G, pos, connectionstyle=conn_style)
>           assert len(text_items) == expected_n_edges
E           assert 8 == 4
E            +  where 8 = len({(0, 1, 0): Text(0.5372660379084617, 0.4329084063193035, "{'weight': 0.0}"), (0, 1, 1): Text(0.5372660379084617, 0.432...4329084063193035, "{'weight': 0.67}"), (0, 1, 3): Text(0.5372660379084617, 0.4329084063193035, "{'weight': 0.0}"), ...})

networkx/drawing/tests/test_pylab.py:754: AssertionError

test_pylab.py::test_draw_networkx_edge_labels_multiedge_connectionstyle[G2-4]

test_pylab.py::test_draw_networkx_edge_labels_multiedge_connectionstyle[G2-4]
G = 
expected_n_edges = 4

    @pytest.mark.parametrize(
        ("G", "expected_n_edges"),
        ([nx.DiGraph(), 2], [nx.MultiGraph(), 4], [nx.MultiDiGraph(), 4]),
    )
    def test_draw_networkx_edge_labels_multiedge_connectionstyle(G, expected_n_edges):
        """Draws labels correctly for 3 types of graphs and checks for valid length and class names"""
        for i, (u, v) in enumerate([(0, 1), (0, 1), (0, 1), (0, 2)]):
            G.add_edge(u, v, weight=round(i / 3, 2))
        pos = {n: (n, n) for n in G}
        # Raises on insuficient connectionstyle length
        arrows = nx.draw_networkx_edges(
            G, pos, connectionstyle=["arc3,rad=0.1", "arc3,rad=0.1", "arc3,rad=0.1"]
        )
        for conn_style in [
            "arc3,rad=0.1",
            ["arc3,rad=0.1", "arc3,rad=0.2"],
            ["arc3,rad=0.1", "arc3,rad=0.1", "arc3,rad=0.1"],
        ]:
            text_items = nx.draw_networkx_edge_labels(G, pos, connectionstyle=conn_style)
>           assert len(text_items) == expected_n_edges
E           assert 8 == 4
E            +  where 8 = len({(0, 1, 0): Text(0.5372660379084617, 0.4329084063193035, "{'weight': 0.0}"), (0, 1, 1): Text(0.5372660379084617, 0.432...4329084063193035, "{'weight': 0.67}"), (0, 1, 3): Text(0.5372660379084617, 0.4329084063193035, "{'weight': 0.0}"), ...})

networkx/drawing/tests/test_pylab.py:754: AssertionError

Patch diff

diff --git a/networkx/algorithms/approximation/clique.py b/networkx/algorithms/approximation/clique.py
index 78320db44..564430686 100644
--- a/networkx/algorithms/approximation/clique.py
+++ b/networkx/algorithms/approximation/clique.py
@@ -2,12 +2,17 @@
 import networkx as nx
 from networkx.algorithms.approximation import ramsey
 from networkx.utils import not_implemented_for
-__all__ = ['clique_removal', 'max_clique', 'large_clique_size',
-    'maximum_independent_set']

+__all__ = [
+    "clique_removal",
+    "max_clique",
+    "large_clique_size",
+    "maximum_independent_set",
+]

-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def maximum_independent_set(G):
     """Returns an approximate maximum independent set.
@@ -59,14 +64,15 @@ def maximum_independent_set(G):
        Approximating maximum independent sets by excluding subgraphs.
        BIT Numerical Mathematics, 32(2), 180–196. Springer.
     """
-    pass
+    iset, _ = clique_removal(G)
+    return iset


-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def max_clique(G):
-    """Find the Maximum Clique
+    r"""Find the Maximum Clique

     Finds the $O(|V|/(log|V|)^2)$ apx of maximum clique/independent set
     in the worst case.
@@ -95,13 +101,13 @@ def max_clique(G):
     Notes
     -----
     A clique in an undirected graph G = (V, E) is a subset of the vertex set
-    `C \\subseteq V` such that for every two vertices in C there exists an edge
+    `C \subseteq V` such that for every two vertices in C there exists an edge
     connecting the two. This is equivalent to saying that the subgraph
     induced by C is complete (in some cases, the term clique may also refer
     to the subgraph).

     A maximum clique is a clique of the largest possible size in a given graph.
-    The clique number `\\omega(G)` of a graph G is the number of
+    The clique number `\omega(G)` of a graph G is the number of
     vertices in a maximum clique in G. The intersection number of
     G is the smallest number of cliques that together cover all edges of G.

@@ -114,16 +120,20 @@ def max_clique(G):
         BIT Numerical Mathematics, 32(2), 180–196. Springer.
         doi:10.1007/BF01994876
     """
-    pass
+    # finding the maximum clique in a graph is equivalent to finding
+    # the independent set in the complementary graph
+    cgraph = nx.complement(G)
+    iset, _ = clique_removal(cgraph)
+    return iset


-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def clique_removal(G):
-    """Repeatedly remove cliques from the graph.
+    r"""Repeatedly remove cliques from the graph.

-    Results in a $O(|V|/(\\log |V|)^2)$ approximation of maximum clique
+    Results in a $O(|V|/(\log |V|)^2)$ approximation of maximum clique
     and independent set. Returns the largest independent set found, along
     with found maximal cliques.

@@ -154,11 +164,24 @@ def clique_removal(G):
         Approximating maximum independent sets by excluding subgraphs.
         BIT Numerical Mathematics, 32(2), 180–196. Springer.
     """
-    pass
-
-
-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+    graph = G.copy()
+    c_i, i_i = ramsey.ramsey_R2(graph)
+    cliques = [c_i]
+    isets = [i_i]
+    while graph:
+        graph.remove_nodes_from(c_i)
+        c_i, i_i = ramsey.ramsey_R2(graph)
+        if c_i:
+            cliques.append(c_i)
+        if i_i:
+            isets.append(i_i)
+    # Determine the largest independent set as measured by cardinality.
+    maxiset = max(isets, key=len)
+    return maxiset, cliques
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def large_clique_size(G):
     """Find the size of a large clique in a graph.
@@ -217,4 +240,19 @@ def large_clique_size(G):
         Functions for finding the exact maximum clique in a graph.

     """
-    pass
+    degrees = G.degree
+
+    def _clique_heuristic(G, U, size, best_size):
+        if not U:
+            return max(best_size, size)
+        u = max(U, key=degrees)
+        U.remove(u)
+        N_prime = {v for v in G[u] if degrees[v] >= best_size}
+        return _clique_heuristic(G, U & N_prime, size + 1, best_size)
+
+    best_size = 0
+    nodes = (u for u in G if degrees[u] >= best_size)
+    for u in nodes:
+        neighbors = {v for v in G[u] if degrees[v] >= best_size}
+        best_size = _clique_heuristic(G, neighbors, 1, best_size)
+    return best_size
diff --git a/networkx/algorithms/approximation/clustering_coefficient.py b/networkx/algorithms/approximation/clustering_coefficient.py
index 30a71d1a9..545fc6553 100644
--- a/networkx/algorithms/approximation/clustering_coefficient.py
+++ b/networkx/algorithms/approximation/clustering_coefficient.py
@@ -1,13 +1,14 @@
 import networkx as nx
 from networkx.utils import not_implemented_for, py_random_state
-__all__ = ['average_clustering']

+__all__ = ["average_clustering"]

-@not_implemented_for('directed')
+
+@not_implemented_for("directed")
 @py_random_state(2)
-@nx._dispatchable(name='approximate_average_clustering')
+@nx._dispatchable(name="approximate_average_clustering")
 def average_clustering(G, trials=1000, seed=None):
-    """Estimates the average clustering coefficient of G.
+    r"""Estimates the average clustering coefficient of G.

     The local clustering of each node in `G` is the fraction of triangles
     that actually exist over all possible triangles in its neighborhood.
@@ -57,4 +58,14 @@ def average_clustering(G, trials=1000, seed=None):
        https://doi.org/10.5445/IR/1000001239

     """
-    pass
+    n = len(G)
+    triangles = 0
+    nodes = list(G)
+    for i in [int(seed.random() * n) for i in range(trials)]:
+        nbrs = list(G[nodes[i]])
+        if len(nbrs) < 2:
+            continue
+        u, v = seed.sample(nbrs, 2)
+        if u in G[v]:
+            triangles += 1
+    return triangles / trials
diff --git a/networkx/algorithms/approximation/connectivity.py b/networkx/algorithms/approximation/connectivity.py
index cd88c5219..a2214ed12 100644
--- a/networkx/algorithms/approximation/connectivity.py
+++ b/networkx/algorithms/approximation/connectivity.py
@@ -2,12 +2,17 @@
 """
 import itertools
 from operator import itemgetter
+
 import networkx as nx
-__all__ = ['local_node_connectivity', 'node_connectivity',
-    'all_pairs_node_connectivity']
+
+__all__ = [
+    "local_node_connectivity",
+    "node_connectivity",
+    "all_pairs_node_connectivity",
+]


-@nx._dispatchable(name='approximate_local_node_connectivity')
+@nx._dispatchable(name="approximate_local_node_connectivity")
 def local_node_connectivity(G, source, target, cutoff=None):
     """Compute node connectivity between source and target.

@@ -75,12 +80,37 @@ def local_node_connectivity(G, source, target, cutoff=None):
         http://eclectic.ss.uci.edu/~drwhite/working.pdf

     """
-    pass
+    if target == source:
+        raise nx.NetworkXError("source and target have to be different nodes.")
+
+    # Maximum possible node independent paths
+    if G.is_directed():
+        possible = min(G.out_degree(source), G.in_degree(target))
+    else:
+        possible = min(G.degree(source), G.degree(target))
+
+    K = 0
+    if not possible:
+        return K
+
+    if cutoff is None:
+        cutoff = float("inf")
+
+    exclude = set()
+    for i in range(min(possible, cutoff)):
+        try:
+            path = _bidirectional_shortest_path(G, source, target, exclude)
+            exclude.update(set(path))
+            K += 1
+        except nx.NetworkXNoPath:
+            break
+
+    return K


-@nx._dispatchable(name='approximate_node_connectivity')
+@nx._dispatchable(name="approximate_node_connectivity")
 def node_connectivity(G, s=None, t=None):
-    """Returns an approximation for node connectivity for a graph or digraph G.
+    r"""Returns an approximation for node connectivity for a graph or digraph G.

     Node connectivity is equal to the minimum number of nodes that
     must be removed to disconnect G or render it trivial. By Menger's theorem,
@@ -142,10 +172,49 @@ def node_connectivity(G, s=None, t=None):
         http://eclectic.ss.uci.edu/~drwhite/working.pdf

     """
-    pass
-
-
-@nx._dispatchable(name='approximate_all_pairs_node_connectivity')
+    if (s is not None and t is None) or (s is None and t is not None):
+        raise nx.NetworkXError("Both source and target must be specified.")
+
+    # Local node connectivity
+    if s is not None and t is not None:
+        if s not in G:
+            raise nx.NetworkXError(f"node {s} not in graph")
+        if t not in G:
+            raise nx.NetworkXError(f"node {t} not in graph")
+        return local_node_connectivity(G, s, t)
+
+    # Global node connectivity
+    if G.is_directed():
+        connected_func = nx.is_weakly_connected
+        iter_func = itertools.permutations
+
+        def neighbors(v):
+            return itertools.chain(G.predecessors(v), G.successors(v))
+
+    else:
+        connected_func = nx.is_connected
+        iter_func = itertools.combinations
+        neighbors = G.neighbors
+
+    if not connected_func(G):
+        return 0
+
+    # Choose a node with minimum degree
+    v, minimum_degree = min(G.degree(), key=itemgetter(1))
+    # Node connectivity is bounded by minimum degree
+    K = minimum_degree
+    # compute local node connectivity with all non-neighbors nodes
+    # and store the minimum
+    for w in set(G) - set(neighbors(v)) - {v}:
+        K = min(K, local_node_connectivity(G, v, w, cutoff=K))
+    # Same for non adjacent pairs of neighbors of v
+    for x, y in iter_func(neighbors(v), 2):
+        if y not in G[x] and x != y:
+            K = min(K, local_node_connectivity(G, x, y, cutoff=K))
+    return K
+
+
+@nx._dispatchable(name="approximate_all_pairs_node_connectivity")
 def all_pairs_node_connectivity(G, nbunch=None, cutoff=None):
     """Compute node connectivity between all pairs of nodes.

@@ -203,7 +272,26 @@ def all_pairs_node_connectivity(G, nbunch=None, cutoff=None):
         Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035
         http://eclectic.ss.uci.edu/~drwhite/working.pdf
     """
-    pass
+    if nbunch is None:
+        nbunch = G
+    else:
+        nbunch = set(nbunch)
+
+    directed = G.is_directed()
+    if directed:
+        iter_func = itertools.permutations
+    else:
+        iter_func = itertools.combinations
+
+    all_pairs = {n: {} for n in nbunch}
+
+    for u, v in iter_func(nbunch, 2):
+        k = local_node_connectivity(G, u, v, cutoff=cutoff)
+        all_pairs[u][v] = k
+        if not directed:
+            all_pairs[v][u] = k
+
+    return all_pairs


 def _bidirectional_shortest_path(G, source, target, exclude):
@@ -249,4 +337,76 @@ def _bidirectional_shortest_path(G, source, target, exclude):
         http://eclectic.ss.uci.edu/~drwhite/working.pdf

     """
-    pass
+    # call helper to do the real work
+    results = _bidirectional_pred_succ(G, source, target, exclude)
+    pred, succ, w = results
+
+    # build path from pred+w+succ
+    path = []
+    # from source to w
+    while w is not None:
+        path.append(w)
+        w = pred[w]
+    path.reverse()
+    # from w to target
+    w = succ[path[-1]]
+    while w is not None:
+        path.append(w)
+        w = succ[w]
+
+    return path
+
+
+def _bidirectional_pred_succ(G, source, target, exclude):
+    # does BFS from both source and target and meets in the middle
+    # excludes nodes in the container "exclude" from the search
+
+    # handle either directed or undirected
+    if G.is_directed():
+        Gpred = G.predecessors
+        Gsucc = G.successors
+    else:
+        Gpred = G.neighbors
+        Gsucc = G.neighbors
+
+    # predecessor and successors in search
+    pred = {source: None}
+    succ = {target: None}
+
+    # initialize fringes, start with forward
+    forward_fringe = [source]
+    reverse_fringe = [target]
+
+    level = 0
+
+    while forward_fringe and reverse_fringe:
+        # Make sure that we iterate one step forward and one step backwards
+        # thus source and target will only trigger "found path" when they are
+        # adjacent and then they can be safely included in the container 'exclude'
+        level += 1
+        if level % 2 != 0:
+            this_level = forward_fringe
+            forward_fringe = []
+            for v in this_level:
+                for w in Gsucc(v):
+                    if w in exclude:
+                        continue
+                    if w not in pred:
+                        forward_fringe.append(w)
+                        pred[w] = v
+                    if w in succ:
+                        return pred, succ, w  # found path
+        else:
+            this_level = reverse_fringe
+            reverse_fringe = []
+            for v in this_level:
+                for w in Gpred(v):
+                    if w in exclude:
+                        continue
+                    if w not in succ:
+                        succ[w] = v
+                        reverse_fringe.append(w)
+                    if w in pred:
+                        return pred, succ, w  # found path
+
+    raise nx.NetworkXNoPath(f"No path between {source} and {target}.")
diff --git a/networkx/algorithms/approximation/distance_measures.py b/networkx/algorithms/approximation/distance_measures.py
index 8f95ba3f3..d5847e65a 100644
--- a/networkx/algorithms/approximation/distance_measures.py
+++ b/networkx/algorithms/approximation/distance_measures.py
@@ -1,11 +1,13 @@
 """Distance measures approximated metrics."""
+
 import networkx as nx
 from networkx.utils.decorators import py_random_state
-__all__ = ['diameter']
+
+__all__ = ["diameter"]


 @py_random_state(1)
-@nx._dispatchable(name='approximate_diameter')
+@nx._dispatchable(name="approximate_diameter")
 def diameter(G, seed=None):
     """Returns a lower bound on the diameter of the graph G.

@@ -69,7 +71,17 @@ def diameter(G, seed=None):
        International Symposium on Experimental Algorithms. Springer, Berlin, Heidelberg, 2012.
        https://courses.cs.ut.ee/MTAT.03.238/2014_fall/uploads/Main/diameter.pdf
     """
-    pass
+    # if G is empty
+    if not G:
+        raise nx.NetworkXError("Expected non-empty NetworkX graph!")
+    # if there's only a node
+    if G.number_of_nodes() == 1:
+        return 0
+    # if G is directed
+    if G.is_directed():
+        return _two_sweep_directed(G, seed)
+    # else if G is undirected
+    return _two_sweep_undirected(G, seed)


 def _two_sweep_undirected(G, seed):
@@ -85,7 +97,17 @@ def _two_sweep_undirected(G, seed):

         ``seed`` is a random.Random or numpy.random.RandomState instance
     """
-    pass
+    # select a random source node
+    source = seed.choice(list(G))
+    # get the distances to the other nodes
+    distances = nx.shortest_path_length(G, source)
+    # if some nodes have not been visited, then the graph is not connected
+    if len(distances) != len(G):
+        raise nx.NetworkXError("Graph not connected.")
+    # take a node that is (one of) the farthest nodes from the source
+    *_, node = distances
+    # return the eccentricity of the node
+    return nx.eccentricity(G, node)


 def _two_sweep_directed(G, seed):
@@ -107,4 +129,22 @@ def _two_sweep_directed(G, seed):

         ``seed`` is a random.Random or numpy.random.RandomState instance
     """
-    pass
+    # get a new digraph G' with the edges reversed in the opposite direction
+    G_reversed = G.reverse()
+    # select a random source node
+    source = seed.choice(list(G))
+    # compute forward distances from source
+    forward_distances = nx.shortest_path_length(G, source)
+    # compute backward distances  from source
+    backward_distances = nx.shortest_path_length(G_reversed, source)
+    # if either the source can't reach every node or not every node
+    # can reach the source, then the graph is not strongly connected
+    n = len(G)
+    if len(forward_distances) != n or len(backward_distances) != n:
+        raise nx.NetworkXError("DiGraph not strongly connected.")
+    # take a node a_1 at the maximum distance from the source in G
+    *_, a_1 = forward_distances
+    # take a node a_2 at the maximum distance from the source in G_reversed
+    *_, a_2 = backward_distances
+    # return the max between the backward eccentricity of a_1 and the forward eccentricity of a_2
+    return max(nx.eccentricity(G_reversed, a_1), nx.eccentricity(G, a_2))
diff --git a/networkx/algorithms/approximation/dominating_set.py b/networkx/algorithms/approximation/dominating_set.py
index 92411f7e6..06ab97d97 100644
--- a/networkx/algorithms/approximation/dominating_set.py
+++ b/networkx/algorithms/approximation/dominating_set.py
@@ -11,15 +11,18 @@ incident to an endpoint of at least one edge in *F*.

 """
 import networkx as nx
+
 from ...utils import not_implemented_for
 from ..matching import maximal_matching
-__all__ = ['min_weighted_dominating_set', 'min_edge_dominating_set']
+
+__all__ = ["min_weighted_dominating_set", "min_edge_dominating_set"]


-@not_implemented_for('directed')
-@nx._dispatchable(node_attrs='weight')
+# TODO Why doesn't this algorithm work for directed graphs?
+@not_implemented_for("directed")
+@nx._dispatchable(node_attrs="weight")
 def min_weighted_dominating_set(G, weight=None):
-    """Returns a dominating set that approximates the minimum weight node
+    r"""Returns a dominating set that approximates the minimum weight node
     dominating set.

     Parameters
@@ -35,7 +38,7 @@ def min_weighted_dominating_set(G, weight=None):
     Returns
     -------
     min_weight_dominating_set : set
-        A set of nodes, the sum of whose weights is no more than `(\\log
+        A set of nodes, the sum of whose weights is no more than `(\log
         w(V)) w(V^*)`, where `w(V)` denotes the sum of the weights of
         each node in the graph and `w(V^*)` denotes the sum of the
         weights of each node in the minimum weight dominating set.
@@ -54,7 +57,7 @@ def min_weighted_dominating_set(G, weight=None):
     Notes
     -----
     This algorithm computes an approximate minimum weighted dominating
-    set for the graph `G`. The returned solution has weight `(\\log
+    set for the graph `G`. The returned solution has weight `(\log
     w(V)) w(V^*)`, where `w(V)` denotes the sum of the weights of each
     node in the graph and `w(V^*)` denotes the sum of the weights of
     each node in the minimum weight dominating set for the graph.
@@ -69,12 +72,49 @@ def min_weighted_dominating_set(G, weight=None):
            Springer Science & Business Media, 2001.

     """
-    pass
+    # The unique dominating set for the null graph is the empty set.
+    if len(G) == 0:
+        return set()
+
+    # This is the dominating set that will eventually be returned.
+    dom_set = set()
+
+    def _cost(node_and_neighborhood):
+        """Returns the cost-effectiveness of greedily choosing the given
+        node.
+
+        `node_and_neighborhood` is a two-tuple comprising a node and its
+        closed neighborhood.
+
+        """
+        v, neighborhood = node_and_neighborhood
+        return G.nodes[v].get(weight, 1) / len(neighborhood - dom_set)
+
+    # This is a set of all vertices not already covered by the
+    # dominating set.
+    vertices = set(G)
+    # This is a dictionary mapping each node to the closed neighborhood
+    # of that node.
+    neighborhoods = {v: {v} | set(G[v]) for v in G}
+
+    # Continue until all vertices are adjacent to some node in the
+    # dominating set.
+    while vertices:
+        # Find the most cost-effective node to add, along with its
+        # closed neighborhood.
+        dom_node, min_set = min(neighborhoods.items(), key=_cost)
+        # Add the node to the dominating set and reduce the remaining
+        # set of nodes to cover.
+        dom_set.add(dom_node)
+        del neighborhoods[dom_node]
+        vertices -= min_set
+
+    return dom_set


 @nx._dispatchable
 def min_edge_dominating_set(G):
-    """Returns minimum cardinality edge dominating set.
+    r"""Returns minimum cardinality edge dominating set.

     Parameters
     ----------
@@ -103,4 +143,6 @@ def min_edge_dominating_set(G):
     problem. The result is no more than 2 * OPT in terms of size of the set.
     Runtime of the algorithm is $O(|E|)$.
     """
-    pass
+    if not G:
+        raise ValueError("Expected non-empty NetworkX graph!")
+    return maximal_matching(G)
diff --git a/networkx/algorithms/approximation/kcomponents.py b/networkx/algorithms/approximation/kcomponents.py
index 93c6193ad..b540bd5f4 100644
--- a/networkx/algorithms/approximation/kcomponents.py
+++ b/networkx/algorithms/approximation/kcomponents.py
@@ -4,17 +4,19 @@ import itertools
 from collections import defaultdict
 from collections.abc import Mapping
 from functools import cached_property
+
 import networkx as nx
 from networkx.algorithms.approximation import local_node_connectivity
 from networkx.exception import NetworkXError
 from networkx.utils import not_implemented_for
-__all__ = ['k_components']
+
+__all__ = ["k_components"]


-@not_implemented_for('directed')
-@nx._dispatchable(name='approximate_k_components')
+@not_implemented_for("directed")
+@nx._dispatchable(name="approximate_k_components")
 def k_components(G, min_density=0.95):
-    """Returns the approximate k-component structure of a graph G.
+    r"""Returns the approximate k-component structure of a graph G.

     A `k`-component is a maximal subgraph of a graph G that has, at least,
     node connectivity `k`: we need to remove at least `k` nodes to break it
@@ -100,7 +102,96 @@ def k_components(G, min_density=0.95):
             https://doi.org/10.2307/3088904

     """
-    pass
+    # Dictionary with connectivity level (k) as keys and a list of
+    # sets of nodes that form a k-component as values
+    k_components = defaultdict(list)
+    # make a few functions local for speed
+    node_connectivity = local_node_connectivity
+    k_core = nx.k_core
+    core_number = nx.core_number
+    biconnected_components = nx.biconnected_components
+    combinations = itertools.combinations
+    # Exact solution for k = {1,2}
+    # There is a linear time algorithm for triconnectivity, if we had an
+    # implementation available we could start from k = 4.
+    for component in nx.connected_components(G):
+        # isolated nodes have connectivity 0
+        comp = set(component)
+        if len(comp) > 1:
+            k_components[1].append(comp)
+    for bicomponent in nx.biconnected_components(G):
+        # avoid considering dyads as bicomponents
+        bicomp = set(bicomponent)
+        if len(bicomp) > 2:
+            k_components[2].append(bicomp)
+    # There is no k-component of k > maximum core number
+    # \kappa(G) <= \lambda(G) <= \delta(G)
+    g_cnumber = core_number(G)
+    max_core = max(g_cnumber.values())
+    for k in range(3, max_core + 1):
+        C = k_core(G, k, core_number=g_cnumber)
+        for nodes in biconnected_components(C):
+            # Build a subgraph SG induced by the nodes that are part of
+            # each biconnected component of the k-core subgraph C.
+            if len(nodes) < k:
+                continue
+            SG = G.subgraph(nodes)
+            # Build auxiliary graph
+            H = _AntiGraph()
+            H.add_nodes_from(SG.nodes())
+            for u, v in combinations(SG, 2):
+                K = node_connectivity(SG, u, v, cutoff=k)
+                if k > K:
+                    H.add_edge(u, v)
+            for h_nodes in biconnected_components(H):
+                if len(h_nodes) <= k:
+                    continue
+                SH = H.subgraph(h_nodes)
+                for Gc in _cliques_heuristic(SG, SH, k, min_density):
+                    for k_nodes in biconnected_components(Gc):
+                        Gk = nx.k_core(SG.subgraph(k_nodes), k)
+                        if len(Gk) <= k:
+                            continue
+                        k_components[k].append(set(Gk))
+    return k_components
+
+
+def _cliques_heuristic(G, H, k, min_density):
+    h_cnumber = nx.core_number(H)
+    for i, c_value in enumerate(sorted(set(h_cnumber.values()), reverse=True)):
+        cands = {n for n, c in h_cnumber.items() if c == c_value}
+        # Skip checking for overlap for the highest core value
+        if i == 0:
+            overlap = False
+        else:
+            overlap = set.intersection(
+                *[{x for x in H[n] if x not in cands} for n in cands]
+            )
+        if overlap and len(overlap) < k:
+            SH = H.subgraph(cands | overlap)
+        else:
+            SH = H.subgraph(cands)
+        sh_cnumber = nx.core_number(SH)
+        SG = nx.k_core(G.subgraph(SH), k)
+        while not (_same(sh_cnumber) and nx.density(SH) >= min_density):
+            # This subgraph must be writable => .copy()
+            SH = H.subgraph(SG).copy()
+            if len(SH) <= k:
+                break
+            sh_cnumber = nx.core_number(SH)
+            sh_deg = dict(SH.degree())
+            min_deg = min(sh_deg.values())
+            SH.remove_nodes_from(n for n, d in sh_deg.items() if d == min_deg)
+            SG = nx.k_core(G.subgraph(SH), k)
+        else:
+            yield SG
+
+
+def _same(measure, tol=0):
+    vals = set(measure.values())
+    if (max(vals) - min(vals)) <= tol:
+        return True
+    return False


 class _AntiGraph(nx.Graph):
@@ -116,8 +207,13 @@ class _AntiGraph(nx.Graph):
     an instance of this class with some of NetworkX functions. In this
     case we only use k-core, connected_components, and biconnected_components.
     """
-    all_edge_dict = {'weight': 1}
-    edge_attr_dict_factory = single_edge_dict
+
+    all_edge_dict = {"weight": 1}
+
+    def single_edge_dict(self):
+        return self.all_edge_dict
+
+    edge_attr_dict_factory = single_edge_dict  # type: ignore[assignment]

     def __getitem__(self, n):
         """Returns a dict of neighbors of node n in the dense graph.
@@ -134,15 +230,18 @@ class _AntiGraph(nx.Graph):

         """
         all_edge_dict = self.all_edge_dict
-        return {node: all_edge_dict for node in set(self._adj) - set(self.
-            _adj[n]) - {n}}
+        return {
+            node: all_edge_dict for node in set(self._adj) - set(self._adj[n]) - {n}
+        }

     def neighbors(self, n):
         """Returns an iterator over all neighbors of node n in the
         dense graph.
         """
-        pass
-
+        try:
+            return iter(set(self._adj) - set(self._adj[n]) - {n})
+        except KeyError as err:
+            raise NetworkXError(f"The node {n} is not in the graph.") from err

     class AntiAtlasView(Mapping):
         """An adjacency inner dict for AntiGraph"""
@@ -156,8 +255,7 @@ class _AntiGraph(nx.Graph):
             return len(self._graph) - len(self._atlas) - 1

         def __iter__(self):
-            return (n for n in self._graph if n not in self._atlas and n !=
-                self._node)
+            return (n for n in self._graph if n not in self._atlas and n != self._node)

         def __getitem__(self, nbr):
             nbrs = set(self._graph._adj) - set(self._atlas) - {self._node}
@@ -165,7 +263,6 @@ class _AntiGraph(nx.Graph):
                 return self._graph.all_edge_dict
             raise KeyError(nbr)

-
     class AntiAdjacencyView(AntiAtlasView):
         """An adjacency outer dict for AntiGraph"""

@@ -184,21 +281,35 @@ class _AntiGraph(nx.Graph):
                 raise KeyError(node)
             return self._graph.AntiAtlasView(self._graph, node)

+    @cached_property
+    def adj(self):
+        return self.AntiAdjacencyView(self)
+
     def subgraph(self, nodes):
         """This subgraph method returns a full AntiGraph. Not a View"""
-        pass
-
+        nodes = set(nodes)
+        G = _AntiGraph()
+        G.add_nodes_from(nodes)
+        for n in G:
+            Gnbrs = G.adjlist_inner_dict_factory()
+            G._adj[n] = Gnbrs
+            for nbr, d in self._adj[n].items():
+                if nbr in G._adj:
+                    Gnbrs[nbr] = d
+                    G._adj[nbr][n] = d
+        G.graph = self.graph
+        return G

     class AntiDegreeView(nx.reportviews.DegreeView):
-
         def __iter__(self):
             all_nodes = set(self._succ)
             for n in self._nodes:
                 nbrs = all_nodes - set(self._succ[n]) - {n}
-                yield n, len(nbrs)
+                yield (n, len(nbrs))

         def __getitem__(self, n):
             nbrs = set(self._succ) - set(self._succ[n]) - {n}
+            # AntiGraph is a ThinGraph so all edges have weight 1
             return len(nbrs) + (n in nbrs)

     @cached_property
@@ -238,7 +349,7 @@ class _AntiGraph(nx.Graph):
         [(0, 1), (1, 2)]

         """
-        pass
+        return self.AntiDegreeView(self)

     def adjacency(self):
         """Returns an iterator of (node, adjacency set) tuples for all nodes
@@ -254,4 +365,5 @@ class _AntiGraph(nx.Graph):
            the graph.

         """
-        pass
+        for n in self._adj:
+            yield (n, set(self._adj) - set(self._adj[n]) - {n})
diff --git a/networkx/algorithms/approximation/matching.py b/networkx/algorithms/approximation/matching.py
index d51a8fa32..3a7c8a39b 100644
--- a/networkx/algorithms/approximation/matching.py
+++ b/networkx/algorithms/approximation/matching.py
@@ -9,12 +9,13 @@ edges; that is, no two edges share a common vertex.
 `Wikipedia: Matching <https://en.wikipedia.org/wiki/Matching_(graph_theory)>`_
 """
 import networkx as nx
-__all__ = ['min_maximal_matching']
+
+__all__ = ["min_maximal_matching"]


 @nx._dispatchable
 def min_maximal_matching(G):
-    """Returns the minimum maximal matching of G. That is, out of all maximal
+    r"""Returns the minimum maximal matching of G. That is, out of all maximal
     matchings of the graph G, the smallest is returned.

     Parameters
@@ -39,4 +40,4 @@ def min_maximal_matching(G):
     ----------
     .. [1] Vazirani, Vijay Approximation Algorithms (2001)
     """
-    pass
+    return nx.maximal_matching(G)
diff --git a/networkx/algorithms/approximation/maxcut.py b/networkx/algorithms/approximation/maxcut.py
index 13460e11f..f4e1da87c 100644
--- a/networkx/algorithms/approximation/maxcut.py
+++ b/networkx/algorithms/approximation/maxcut.py
@@ -1,12 +1,13 @@
 import networkx as nx
 from networkx.utils.decorators import not_implemented_for, py_random_state
-__all__ = ['randomized_partitioning', 'one_exchange']

+__all__ = ["randomized_partitioning", "one_exchange"]

-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @py_random_state(1)
-@nx._dispatchable(edge_attrs='weight')
+@nx._dispatchable(edge_attrs="weight")
 def randomized_partitioning(G, seed=None, p=0.5, weight=None):
     """Compute a random partitioning of the graph nodes and its cut value.

@@ -53,13 +54,20 @@ def randomized_partitioning(G, seed=None, p=0.5, weight=None):
     NetworkXNotImplemented
         If the graph is directed or is a multigraph.
     """
-    pass
+    cut = {node for node in G.nodes() if seed.random() < p}
+    cut_size = nx.algorithms.cut_size(G, cut, weight=weight)
+    partition = (cut, G.nodes - cut)
+    return cut_size, partition
+
+
+def _swap_node_partition(cut, node):
+    return cut - {node} if node in cut else cut.union({node})


-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @py_random_state(2)
-@nx._dispatchable(edge_attrs='weight')
+@nx._dispatchable(edge_attrs="weight")
 def one_exchange(G, initial_cut=None, seed=None, weight=None):
     """Compute a partitioning of the graphs nodes and the corresponding cut value.

@@ -107,4 +115,29 @@ def one_exchange(G, initial_cut=None, seed=None, weight=None):
     NetworkXNotImplemented
         If the graph is directed or is a multigraph.
     """
-    pass
+    if initial_cut is None:
+        initial_cut = set()
+    cut = set(initial_cut)
+    current_cut_size = nx.algorithms.cut_size(G, cut, weight=weight)
+    while True:
+        nodes = list(G.nodes())
+        # Shuffling the nodes ensures random tie-breaks in the following call to max
+        seed.shuffle(nodes)
+        best_node_to_swap = max(
+            nodes,
+            key=lambda v: nx.algorithms.cut_size(
+                G, _swap_node_partition(cut, v), weight=weight
+            ),
+            default=None,
+        )
+        potential_cut = _swap_node_partition(cut, best_node_to_swap)
+        potential_cut_size = nx.algorithms.cut_size(G, potential_cut, weight=weight)
+
+        if potential_cut_size > current_cut_size:
+            cut = potential_cut
+            current_cut_size = potential_cut_size
+        else:
+            break
+
+    partition = (cut, G.nodes - cut)
+    return current_cut_size, partition
diff --git a/networkx/algorithms/approximation/ramsey.py b/networkx/algorithms/approximation/ramsey.py
index 986e5c9da..5cb9fda04 100644
--- a/networkx/algorithms/approximation/ramsey.py
+++ b/networkx/algorithms/approximation/ramsey.py
@@ -3,15 +3,17 @@ Ramsey numbers.
 """
 import networkx as nx
 from networkx.utils import not_implemented_for
+
 from ...utils import arbitrary_element
-__all__ = ['ramsey_R2']
+
+__all__ = ["ramsey_R2"]


-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def ramsey_R2(G):
-    """Compute the largest clique and largest independent set in `G`.
+    r"""Compute the largest clique and largest independent set in `G`.

     This can be used to estimate bounds for the 2-color
     Ramsey number `R(2;s,t)` for `G`.
@@ -34,4 +36,17 @@ def ramsey_R2(G):
     NetworkXNotImplemented
         If the graph is directed or is a multigraph.
     """
-    pass
+    if not G:
+        return set(), set()
+
+    node = arbitrary_element(G)
+    nbrs = (nbr for nbr in nx.all_neighbors(G, node) if nbr != node)
+    nnbrs = nx.non_neighbors(G, node)
+    c_1, i_1 = ramsey_R2(G.subgraph(nbrs).copy())
+    c_2, i_2 = ramsey_R2(G.subgraph(nnbrs).copy())
+
+    c_1.add(node)
+    i_2.add(node)
+    # Choose the larger of the two cliques and the larger of the two
+    # independent sets, according to cardinality.
+    return max(c_1, c_2, key=len), max(i_1, i_2, key=len)
diff --git a/networkx/algorithms/approximation/steinertree.py b/networkx/algorithms/approximation/steinertree.py
index 74895df5d..c6c834f42 100644
--- a/networkx/algorithms/approximation/steinertree.py
+++ b/networkx/algorithms/approximation/steinertree.py
@@ -1,12 +1,14 @@
 from itertools import chain
+
 import networkx as nx
 from networkx.utils import not_implemented_for, pairwise
-__all__ = ['metric_closure', 'steiner_tree']
+
+__all__ = ["metric_closure", "steiner_tree"]


-@not_implemented_for('directed')
-@nx._dispatchable(edge_attrs='weight', returns_graph=True)
-def metric_closure(G, weight='weight'):
+@not_implemented_for("directed")
+@nx._dispatchable(edge_attrs="weight", returns_graph=True)
+def metric_closure(G, weight="weight"):
     """Return the metric closure of a graph.

     The metric closure of a graph *G* is the complete graph in which each edge
@@ -22,16 +24,111 @@ def metric_closure(G, weight='weight'):
         Metric closure of the graph `G`.

     """
-    pass
-
-
-ALGORITHMS = {'kou': _kou_steiner_tree, 'mehlhorn': _mehlhorn_steiner_tree}
-
-
-@not_implemented_for('directed')
+    M = nx.Graph()
+
+    Gnodes = set(G)
+
+    # check for connected graph while processing first node
+    all_paths_iter = nx.all_pairs_dijkstra(G, weight=weight)
+    u, (distance, path) = next(all_paths_iter)
+    if Gnodes - set(distance):
+        msg = "G is not a connected graph. metric_closure is not defined."
+        raise nx.NetworkXError(msg)
+    Gnodes.remove(u)
+    for v in Gnodes:
+        M.add_edge(u, v, distance=distance[v], path=path[v])
+
+    # first node done -- now process the rest
+    for u, (distance, path) in all_paths_iter:
+        Gnodes.remove(u)
+        for v in Gnodes:
+            M.add_edge(u, v, distance=distance[v], path=path[v])
+
+    return M
+
+
+def _mehlhorn_steiner_tree(G, terminal_nodes, weight):
+    paths = nx.multi_source_dijkstra_path(G, terminal_nodes)
+
+    d_1 = {}
+    s = {}
+    for v in G.nodes():
+        s[v] = paths[v][0]
+        d_1[(v, s[v])] = len(paths[v]) - 1
+
+    # G1-G4 names match those from the Mehlhorn 1988 paper.
+    G_1_prime = nx.Graph()
+    for u, v, data in G.edges(data=True):
+        su, sv = s[u], s[v]
+        weight_here = d_1[(u, su)] + data.get(weight, 1) + d_1[(v, sv)]
+        if not G_1_prime.has_edge(su, sv):
+            G_1_prime.add_edge(su, sv, weight=weight_here)
+        else:
+            new_weight = min(weight_here, G_1_prime[su][sv]["weight"])
+            G_1_prime.add_edge(su, sv, weight=new_weight)
+
+    G_2 = nx.minimum_spanning_edges(G_1_prime, data=True)
+
+    G_3 = nx.Graph()
+    for u, v, d in G_2:
+        path = nx.shortest_path(G, u, v, weight)
+        for n1, n2 in pairwise(path):
+            G_3.add_edge(n1, n2)
+
+    G_3_mst = list(nx.minimum_spanning_edges(G_3, data=False))
+    if G.is_multigraph():
+        G_3_mst = (
+            (u, v, min(G[u][v], key=lambda k: G[u][v][k][weight])) for u, v in G_3_mst
+        )
+    G_4 = G.edge_subgraph(G_3_mst).copy()
+    _remove_nonterminal_leaves(G_4, terminal_nodes)
+    return G_4.edges()
+
+
+def _kou_steiner_tree(G, terminal_nodes, weight):
+    # H is the subgraph induced by terminal_nodes in the metric closure M of G.
+    M = metric_closure(G, weight=weight)
+    H = M.subgraph(terminal_nodes)
+
+    # Use the 'distance' attribute of each edge provided by M.
+    mst_edges = nx.minimum_spanning_edges(H, weight="distance", data=True)
+
+    # Create an iterator over each edge in each shortest path; repeats are okay
+    mst_all_edges = chain.from_iterable(pairwise(d["path"]) for u, v, d in mst_edges)
+    if G.is_multigraph():
+        mst_all_edges = (
+            (u, v, min(G[u][v], key=lambda k: G[u][v][k][weight]))
+            for u, v in mst_all_edges
+        )
+
+    # Find the MST again, over this new set of edges
+    G_S = G.edge_subgraph(mst_all_edges)
+    T_S = nx.minimum_spanning_edges(G_S, weight="weight", data=False)
+
+    # Leaf nodes that are not terminal might still remain; remove them here
+    T_H = G.edge_subgraph(T_S).copy()
+    _remove_nonterminal_leaves(T_H, terminal_nodes)
+
+    return T_H.edges()
+
+
+def _remove_nonterminal_leaves(G, terminals):
+    terminals_set = set(terminals)
+    for n in list(G.nodes):
+        if n not in terminals_set and G.degree(n) == 1:
+            G.remove_node(n)
+
+
+ALGORITHMS = {
+    "kou": _kou_steiner_tree,
+    "mehlhorn": _mehlhorn_steiner_tree,
+}
+
+
+@not_implemented_for("directed")
 @nx._dispatchable(preserve_all_attrs=True, returns_graph=True)
-def steiner_tree(G, terminal_nodes, weight='weight', method=None):
-    """Return an approximation to the minimum Steiner tree of a graph.
+def steiner_tree(G, terminal_nodes, weight="weight", method=None):
+    r"""Return an approximation to the minimum Steiner tree of a graph.

     The minimum Steiner tree of `G` w.r.t a set of `terminal_nodes` (also *S*)
     is a tree within `G` that spans those nodes and has minimum size (sum of
@@ -48,7 +145,7 @@ def steiner_tree(G, terminal_nodes, weight='weight', method=None):
       where the metric closure of *G* is the complete graph in which each edge is
       weighted by the shortest path distance between the nodes in *G*.

-    * ``"mehlhorn"`` [3]_ (runtime $O(|E|+|V|\\log|V|)$) modifies Kou et al.'s
+    * ``"mehlhorn"`` [3]_ (runtime $O(|E|+|V|\log|V|)$) modifies Kou et al.'s
       algorithm, beginning by finding the closest terminal node for each
       non-terminal. This data is used to create a complete graph containing only
       the terminal nodes, in which edge is weighted with the shortest path
@@ -105,4 +202,19 @@ def steiner_tree(G, terminal_nodes, weight='weight', method=None):
            Information Processing Letters 27 (3): 125–28.
            https://doi.org/10.1016/0020-0190(88)90066-X.
     """
-    pass
+    if method is None:
+        method = "mehlhorn"
+
+    try:
+        algo = ALGORITHMS[method]
+    except KeyError as e:
+        raise ValueError(f"{method} is not a valid choice for an algorithm.") from e
+
+    edges = algo(G, terminal_nodes, weight)
+    # For multigraph we should add the minimal weight edge keys
+    if G.is_multigraph():
+        edges = (
+            (u, v, min(G[u][v], key=lambda k: G[u][v][k][weight])) for u, v in edges
+        )
+    T = G.edge_subgraph(edges)
+    return T
diff --git a/networkx/algorithms/approximation/traveling_salesman.py b/networkx/algorithms/approximation/traveling_salesman.py
index c18c99cb2..2a31b7281 100644
--- a/networkx/algorithms/approximation/traveling_salesman.py
+++ b/networkx/algorithms/approximation/traveling_salesman.py
@@ -34,11 +34,19 @@ important in operations research and theoretical computer science.
 http://en.wikipedia.org/wiki/Travelling_salesman_problem
 """
 import math
+
 import networkx as nx
 from networkx.algorithms.tree.mst import random_spanning_tree
 from networkx.utils import not_implemented_for, pairwise, py_random_state
-__all__ = ['traveling_salesman_problem', 'christofides', 'asadpour_atsp',
-    'greedy_tsp', 'simulated_annealing_tsp', 'threshold_accepting_tsp']
+
+__all__ = [
+    "traveling_salesman_problem",
+    "christofides",
+    "asadpour_atsp",
+    "greedy_tsp",
+    "simulated_annealing_tsp",
+    "threshold_accepting_tsp",
+]


 def swap_two_nodes(soln, seed):
@@ -71,7 +79,9 @@ def swap_two_nodes(soln, seed):
     --------
         move_one_node
     """
-    pass
+    a, b = seed.sample(range(1, len(soln) - 1), k=2)
+    soln[a], soln[b] = soln[b], soln[a]
+    return soln


 def move_one_node(soln, seed):
@@ -108,12 +118,14 @@ def move_one_node(soln, seed):
     --------
         swap_two_nodes
     """
-    pass
+    a, b = seed.sample(range(1, len(soln) - 1), k=2)
+    soln.insert(b, soln.pop(a))
+    return soln


-@not_implemented_for('directed')
-@nx._dispatchable(edge_attrs='weight')
-def christofides(G, weight='weight', tree=None):
+@not_implemented_for("directed")
+@nx._dispatchable(edge_attrs="weight")
+def christofides(G, weight="weight", tree=None):
     """Approximate a solution of the traveling salesman problem

     Compute a 3/2-approximation of the traveling salesman problem
@@ -145,17 +157,50 @@ def christofides(G, weight='weight', tree=None):
        the travelling salesman problem." No. RR-388. Carnegie-Mellon Univ
        Pittsburgh Pa Management Sciences Research Group, 1976.
     """
-    pass
+    # Remove selfloops if necessary
+    loop_nodes = nx.nodes_with_selfloops(G)
+    try:
+        node = next(loop_nodes)
+    except StopIteration:
+        pass
+    else:
+        G = G.copy()
+        G.remove_edge(node, node)
+        G.remove_edges_from((n, n) for n in loop_nodes)
+    # Check that G is a complete graph
+    N = len(G) - 1
+    # This check ignores selfloops which is what we want here.
+    if any(len(nbrdict) != N for n, nbrdict in G.adj.items()):
+        raise nx.NetworkXError("G must be a complete graph.")
+
+    if tree is None:
+        tree = nx.minimum_spanning_tree(G, weight=weight)
+    L = G.copy()
+    L.remove_nodes_from([v for v, degree in tree.degree if not (degree % 2)])
+    MG = nx.MultiGraph()
+    MG.add_edges_from(tree.edges)
+    edges = nx.min_weight_matching(L, weight=weight)
+    MG.add_edges_from(edges)
+    return _shortcutting(nx.eulerian_circuit(MG))


 def _shortcutting(circuit):
     """Remove duplicate nodes in the path"""
-    pass
-
-
-@nx._dispatchable(edge_attrs='weight')
-def traveling_salesman_problem(G, weight='weight', nodes=None, cycle=True,
-    method=None, **kwargs):
+    nodes = []
+    for u, v in circuit:
+        if v in nodes:
+            continue
+        if not nodes:
+            nodes.append(u)
+        nodes.append(v)
+    nodes.append(nodes[0])
+    return nodes
+
+
+@nx._dispatchable(edge_attrs="weight")
+def traveling_salesman_problem(
+    G, weight="weight", nodes=None, cycle=True, method=None, **kwargs
+):
     """Find the shortest path in `G` connecting specified nodes

     This function allows approximate solution to the traveling salesman
@@ -263,13 +308,54 @@ def traveling_salesman_problem(G, weight='weight', nodes=None, cycle=True,
     >>> path in ([4, 3, 2, 1, 0, 8, 7, 6, 5], [5, 6, 7, 8, 0, 1, 2, 3, 4])
     True
     """
-    pass
-
-
-@not_implemented_for('undirected')
+    if method is None:
+        if G.is_directed():
+            method = asadpour_atsp
+        else:
+            method = christofides
+    if nodes is None:
+        nodes = list(G.nodes)
+
+    dist = {}
+    path = {}
+    for n, (d, p) in nx.all_pairs_dijkstra(G, weight=weight):
+        dist[n] = d
+        path[n] = p
+
+    if G.is_directed():
+        # If the graph is not strongly connected, raise an exception
+        if not nx.is_strongly_connected(G):
+            raise nx.NetworkXError("G is not strongly connected")
+        GG = nx.DiGraph()
+    else:
+        GG = nx.Graph()
+    for u in nodes:
+        for v in nodes:
+            if u == v:
+                continue
+            GG.add_edge(u, v, weight=dist[u][v])
+
+    best_GG = method(GG, weight=weight, **kwargs)
+
+    if not cycle:
+        # find and remove the biggest edge
+        (u, v) = max(pairwise(best_GG), key=lambda x: dist[x[0]][x[1]])
+        pos = best_GG.index(u) + 1
+        while best_GG[pos] != v:
+            pos = best_GG[pos:].index(u) + 1
+        best_GG = best_GG[pos:-1] + best_GG[:pos]
+
+    best_path = []
+    for u, v in pairwise(best_GG):
+        best_path.extend(path[u][v][:-1])
+    best_path.append(v)
+    return best_path
+
+
+@not_implemented_for("undirected")
 @py_random_state(2)
-@nx._dispatchable(edge_attrs='weight', mutates_input=True)
-def asadpour_atsp(G, weight='weight', seed=None, source=None):
+@nx._dispatchable(edge_attrs="weight", mutates_input=True)
+def asadpour_atsp(G, weight="weight", seed=None, source=None):
     """
     Returns an approximate solution to the traveling salesman problem.

@@ -342,11 +428,86 @@ def asadpour_atsp(G, weight='weight', seed=None, source=None):
     >>> tour
     [0, 2, 1, 0]
     """
-    pass
-
-
-@nx._dispatchable(edge_attrs='weight', mutates_input=True, returns_graph=True)
-def held_karp_ascent(G, weight='weight'):
+    from math import ceil, exp
+    from math import log as ln
+
+    # Check that G is a complete graph
+    N = len(G) - 1
+    if N < 2:
+        raise nx.NetworkXError("G must have at least two nodes")
+    # This check ignores selfloops which is what we want here.
+    if any(len(nbrdict) - (n in nbrdict) != N for n, nbrdict in G.adj.items()):
+        raise nx.NetworkXError("G is not a complete DiGraph")
+    # Check that the source vertex, if given, is in the graph
+    if source is not None and source not in G.nodes:
+        raise nx.NetworkXError("Given source node not in G.")
+
+    opt_hk, z_star = held_karp_ascent(G, weight)
+
+    # Test to see if the ascent method found an integer solution or a fractional
+    # solution. If it is integral then z_star is a nx.Graph, otherwise it is
+    # a dict
+    if not isinstance(z_star, dict):
+        # Here we are using the shortcutting method to go from the list of edges
+        # returned from eulerian_circuit to a list of nodes
+        return _shortcutting(nx.eulerian_circuit(z_star, source=source))
+
+    # Create the undirected support of z_star
+    z_support = nx.MultiGraph()
+    for u, v in z_star:
+        if (u, v) not in z_support.edges:
+            edge_weight = min(G[u][v][weight], G[v][u][weight])
+            z_support.add_edge(u, v, **{weight: edge_weight})
+
+    # Create the exponential distribution of spanning trees
+    gamma = spanning_tree_distribution(z_support, z_star)
+
+    # Write the lambda values to the edges of z_support
+    z_support = nx.Graph(z_support)
+    lambda_dict = {(u, v): exp(gamma[(u, v)]) for u, v in z_support.edges()}
+    nx.set_edge_attributes(z_support, lambda_dict, "weight")
+    del gamma, lambda_dict
+
+    # Sample 2 * ceil( ln(n) ) spanning trees and record the minimum one
+    minimum_sampled_tree = None
+    minimum_sampled_tree_weight = math.inf
+    for _ in range(2 * ceil(ln(G.number_of_nodes()))):
+        sampled_tree = random_spanning_tree(z_support, "weight", seed=seed)
+        sampled_tree_weight = sampled_tree.size(weight)
+        if sampled_tree_weight < minimum_sampled_tree_weight:
+            minimum_sampled_tree = sampled_tree.copy()
+            minimum_sampled_tree_weight = sampled_tree_weight
+
+    # Orient the edges in that tree to keep the cost of the tree the same.
+    t_star = nx.MultiDiGraph()
+    for u, v, d in minimum_sampled_tree.edges(data=weight):
+        if d == G[u][v][weight]:
+            t_star.add_edge(u, v, **{weight: d})
+        else:
+            t_star.add_edge(v, u, **{weight: d})
+
+    # Find the node demands needed to neutralize the flow of t_star in G
+    node_demands = {n: t_star.out_degree(n) - t_star.in_degree(n) for n in t_star}
+    nx.set_node_attributes(G, node_demands, "demand")
+
+    # Find the min_cost_flow
+    flow_dict = nx.min_cost_flow(G, "demand")
+
+    # Build the flow into t_star
+    for source, values in flow_dict.items():
+        for target in values:
+            if (source, target) not in t_star.edges and values[target] > 0:
+                # IF values[target] > 0 we have to add that many edges
+                for _ in range(values[target]):
+                    t_star.add_edge(source, target)
+
+    # Return the shortcut eulerian circuit
+    circuit = nx.eulerian_circuit(t_star, source=source)
+    return _shortcutting(circuit)
+
+
+@nx._dispatchable(edge_attrs="weight", mutates_input=True, returns_graph=True)
+def held_karp_ascent(G, weight="weight"):
     """
     Minimizes the Held-Karp relaxation of the TSP for `G`

@@ -391,7 +552,273 @@ def held_karp_ascent(G, weight='weight'):
            spanning trees, Operations Research, 1970-11-01, Vol. 18 (6),
            pp.1138-1162
     """
-    pass
+    import numpy as np
+    from scipy import optimize
+
+    def k_pi():
+        """
+        Find the set of minimum 1-Arborescences for G at point pi.
+
+        Returns
+        -------
+        Set
+            The set of minimum 1-Arborescences
+        """
+        # Create a copy of G without vertex 1.
+        G_1 = G.copy()
+        minimum_1_arborescences = set()
+        minimum_1_arborescence_weight = math.inf
+
+        # node is node '1' in the Held and Karp paper
+        n = next(G.__iter__())
+        G_1.remove_node(n)
+
+        # Iterate over the spanning arborescences of the graph until we know
+        # that we have found the minimum 1-arborescences. My proposed strategy
+        # is to find the most extensive root to connect to from 'node 1' and
+        # the least expensive one. We then iterate over arborescences until
+        # the cost of the basic arborescence is the cost of the minimum one
+        # plus the difference between the most and least expensive roots,
+        # that way the cost of connecting 'node 1' will by definition not by
+        # minimum
+        min_root = {"node": None, weight: math.inf}
+        max_root = {"node": None, weight: -math.inf}
+        for u, v, d in G.edges(n, data=True):
+            if d[weight] < min_root[weight]:
+                min_root = {"node": v, weight: d[weight]}
+            if d[weight] > max_root[weight]:
+                max_root = {"node": v, weight: d[weight]}
+
+        min_in_edge = min(G.in_edges(n, data=True), key=lambda x: x[2][weight])
+        min_root[weight] = min_root[weight] + min_in_edge[2][weight]
+        max_root[weight] = max_root[weight] + min_in_edge[2][weight]
+
+        min_arb_weight = math.inf
+        for arb in nx.ArborescenceIterator(G_1):
+            arb_weight = arb.size(weight)
+            if min_arb_weight == math.inf:
+                min_arb_weight = arb_weight
+            elif arb_weight > min_arb_weight + max_root[weight] - min_root[weight]:
+                break
+            # We have to pick the root node of the arborescence for the out
+            # edge of the first vertex as that is the only node without an
+            # edge directed into it.
+            for N, deg in arb.in_degree:
+                if deg == 0:
+                    # root found
+                    arb.add_edge(n, N, **{weight: G[n][N][weight]})
+                    arb_weight += G[n][N][weight]
+                    break
+
+            # We can pick the minimum weight in-edge for the vertex with
+            # a cycle. If there are multiple edges with the same, minimum
+            # weight, We need to add all of them.
+            #
+            # Delete the edge (N, v) so that we cannot pick it.
+            edge_data = G[N][n]
+            G.remove_edge(N, n)
+            min_weight = min(G.in_edges(n, data=weight), key=lambda x: x[2])[2]
+            min_edges = [
+                (u, v, d) for u, v, d in G.in_edges(n, data=weight) if d == min_weight
+            ]
+            for u, v, d in min_edges:
+                new_arb = arb.copy()
+                new_arb.add_edge(u, v, **{weight: d})
+                new_arb_weight = arb_weight + d
+                # Check to see the weight of the arborescence, if it is a
+                # new minimum, clear all of the old potential minimum
+                # 1-arborescences and add this is the only one. If its
+                # weight is above the known minimum, do not add it.
+                if new_arb_weight < minimum_1_arborescence_weight:
+                    minimum_1_arborescences.clear()
+                    minimum_1_arborescence_weight = new_arb_weight
+                # We have a 1-arborescence, add it to the set
+                if new_arb_weight == minimum_1_arborescence_weight:
+                    minimum_1_arborescences.add(new_arb)
+            G.add_edge(N, n, **edge_data)
+
+        return minimum_1_arborescences
+
+    def direction_of_ascent():
+        """
+        Find the direction of ascent at point pi.
+
+        See [1]_ for more information.
+
+        Returns
+        -------
+        dict
+            A mapping from the nodes of the graph which represents the direction
+            of ascent.
+
+        References
+        ----------
+        .. [1] M. Held, R. M. Karp, The traveling-salesman problem and minimum
+           spanning trees, Operations Research, 1970-11-01, Vol. 18 (6),
+           pp.1138-1162
+        """
+        # 1. Set d equal to the zero n-vector.
+        d = {}
+        for n in G:
+            d[n] = 0
+        del n
+        # 2. Find a 1-Arborescence T^k such that k is in K(pi, d).
+        minimum_1_arborescences = k_pi()
+        while True:
+            # Reduce K(pi) to K(pi, d)
+            # Find the arborescence in K(pi) which increases the lest in
+            # direction d
+            min_k_d_weight = math.inf
+            min_k_d = None
+            for arborescence in minimum_1_arborescences:
+                weighted_cost = 0
+                for n, deg in arborescence.degree:
+                    weighted_cost += d[n] * (deg - 2)
+                if weighted_cost < min_k_d_weight:
+                    min_k_d_weight = weighted_cost
+                    min_k_d = arborescence
+
+            # 3. If sum of d_i * v_{i, k} is greater than zero, terminate
+            if min_k_d_weight > 0:
+                return d, min_k_d
+            # 4. d_i = d_i + v_{i, k}
+            for n, deg in min_k_d.degree:
+                d[n] += deg - 2
+            # Check that we do not need to terminate because the direction
+            # of ascent does not exist. This is done with linear
+            # programming.
+            c = np.full(len(minimum_1_arborescences), -1, dtype=int)
+            a_eq = np.empty((len(G) + 1, len(minimum_1_arborescences)), dtype=int)
+            b_eq = np.zeros(len(G) + 1, dtype=int)
+            b_eq[len(G)] = 1
+            for arb_count, arborescence in enumerate(minimum_1_arborescences):
+                n_count = len(G) - 1
+                for n, deg in arborescence.degree:
+                    a_eq[n_count][arb_count] = deg - 2
+                    n_count -= 1
+                a_eq[len(G)][arb_count] = 1
+            program_result = optimize.linprog(
+                c, A_eq=a_eq, b_eq=b_eq, method="highs-ipm"
+            )
+            # If the constants exist, then the direction of ascent doesn't
+            if program_result.success:
+                # There is no direction of ascent
+                return None, minimum_1_arborescences
+
+            # 5. GO TO 2
+
+    def find_epsilon(k, d):
+        """
+        Given the direction of ascent at pi, find the maximum distance we can go
+        in that direction.
+
+        Parameters
+        ----------
+        k_xy : set
+            The set of 1-arborescences which have the minimum rate of increase
+            in the direction of ascent
+
+        d : dict
+            The direction of ascent
+
+        Returns
+        -------
+        float
+            The distance we can travel in direction `d`
+        """
+        min_epsilon = math.inf
+        for e_u, e_v, e_w in G.edges(data=weight):
+            if (e_u, e_v) in k.edges:
+                continue
+            # Now, I have found a condition which MUST be true for the edges to
+            # be a valid substitute. The edge in the graph which is the
+            # substitute is the one with the same terminated end. This can be
+            # checked rather simply.
+            #
+            # Find the edge within k which is the substitute. Because k is a
+            # 1-arborescence, we know that they is only one such edges
+            # leading into every vertex.
+            if len(k.in_edges(e_v, data=weight)) > 1:
+                raise Exception
+            sub_u, sub_v, sub_w = next(k.in_edges(e_v, data=weight).__iter__())
+            k.add_edge(e_u, e_v, **{weight: e_w})
+            k.remove_edge(sub_u, sub_v)
+            if (
+                max(d for n, d in k.in_degree()) <= 1
+                and len(G) == k.number_of_edges()
+                and nx.is_weakly_connected(k)
+            ):
+                # Ascent method calculation
+                if d[sub_u] == d[e_u] or sub_w == e_w:
+                    # Revert to the original graph
+                    k.remove_edge(e_u, e_v)
+                    k.add_edge(sub_u, sub_v, **{weight: sub_w})
+                    continue
+                epsilon = (sub_w - e_w) / (d[e_u] - d[sub_u])
+                if 0 < epsilon < min_epsilon:
+                    min_epsilon = epsilon
+            # Revert to the original graph
+            k.remove_edge(e_u, e_v)
+            k.add_edge(sub_u, sub_v, **{weight: sub_w})
+
+        return min_epsilon
+
+    # I have to know that the elements in pi correspond to the correct elements
+    # in the direction of ascent, even if the node labels are not integers.
+    # Thus, I will use dictionaries to made that mapping.
+    pi_dict = {}
+    for n in G:
+        pi_dict[n] = 0
+    del n
+    original_edge_weights = {}
+    for u, v, d in G.edges(data=True):
+        original_edge_weights[(u, v)] = d[weight]
+    dir_ascent, k_d = direction_of_ascent()
+    while dir_ascent is not None:
+        max_distance = find_epsilon(k_d, dir_ascent)
+        for n, v in dir_ascent.items():
+            pi_dict[n] += max_distance * v
+        for u, v, d in G.edges(data=True):
+            d[weight] = original_edge_weights[(u, v)] + pi_dict[u]
+        dir_ascent, k_d = direction_of_ascent()
+    nx._clear_cache(G)
+    # k_d is no longer an individual 1-arborescence but rather a set of
+    # minimal 1-arborescences at the maximum point of the polytope and should
+    # be reflected as such
+    k_max = k_d
+
+    # Search for a cycle within k_max. If a cycle exists, return it as the
+    # solution
+    for k in k_max:
+        if len([n for n in k if k.degree(n) == 2]) == G.order():
+            # Tour found
+            # TODO: this branch does not restore original_edge_weights of G!
+            return k.size(weight), k
+
+    # Write the original edge weights back to G and every member of k_max at
+    # the maximum point. Also average the number of times that edge appears in
+    # the set of minimal 1-arborescences.
+    x_star = {}
+    size_k_max = len(k_max)
+    for u, v, d in G.edges(data=True):
+        edge_count = 0
+        d[weight] = original_edge_weights[(u, v)]
+        for k in k_max:
+            if (u, v) in k.edges():
+                edge_count += 1
+                k[u][v][weight] = original_edge_weights[(u, v)]
+        x_star[(u, v)] = edge_count / size_k_max
+    # Now symmetrize the edges in x_star and scale them according to (5) in
+    # reference [1]
+    z_star = {}
+    scale_factor = (G.order() - 1) / G.order()
+    for u, v in x_star:
+        frequency = x_star[(u, v)] + x_star[(v, u)]
+        if frequency > 0:
+            z_star[(u, v)] = scale_factor * frequency
+    del x_star
+    # Return the optimal weight and the z dict
+    return next(k_max.__iter__()).size(weight), z_star


 @nx._dispatchable
@@ -423,11 +850,90 @@ def spanning_tree_distribution(G, z):
         The probability distribution which approximately preserves the marginal
         probabilities of `z`.
     """
-    pass
-
-
-@nx._dispatchable(edge_attrs='weight')
-def greedy_tsp(G, weight='weight', source=None):
+    from math import exp
+    from math import log as ln
+
+    def q(e):
+        """
+        The value of q(e) is described in the Asadpour paper is "the
+        probability that edge e will be included in a spanning tree T that is
+        chosen with probability proportional to exp(gamma(T))" which
+        basically means that it is the total probability of the edge appearing
+        across the whole distribution.
+
+        Parameters
+        ----------
+        e : tuple
+            The `(u, v)` tuple describing the edge we are interested in
+
+        Returns
+        -------
+        float
+            The probability that a spanning tree chosen according to the
+            current values of gamma will include edge `e`.
+        """
+        # Create the laplacian matrices
+        for u, v, d in G.edges(data=True):
+            d[lambda_key] = exp(gamma[(u, v)])
+        G_Kirchhoff = nx.total_spanning_tree_weight(G, lambda_key)
+        G_e = nx.contracted_edge(G, e, self_loops=False)
+        G_e_Kirchhoff = nx.total_spanning_tree_weight(G_e, lambda_key)
+
+        # Multiply by the weight of the contracted edge since it is not included
+        # in the total weight of the contracted graph.
+        return exp(gamma[(e[0], e[1])]) * G_e_Kirchhoff / G_Kirchhoff
+
+    # initialize gamma to the zero dict
+    gamma = {}
+    for u, v, _ in G.edges:
+        gamma[(u, v)] = 0
+
+    # set epsilon
+    EPSILON = 0.2
+
+    # pick an edge attribute name that is unlikely to be in the graph
+    lambda_key = "spanning_tree_distribution's secret attribute name for lambda"
+
+    while True:
+        # We need to know that know that no values of q_e are greater than
+        # (1 + epsilon) * z_e, however changing one gamma value can increase the
+        # value of a different q_e, so we have to complete the for loop without
+        # changing anything for the condition to be meet
+        in_range_count = 0
+        # Search for an edge with q_e > (1 + epsilon) * z_e
+        for u, v in gamma:
+            e = (u, v)
+            q_e = q(e)
+            z_e = z[e]
+            if q_e > (1 + EPSILON) * z_e:
+                delta = ln(
+                    (q_e * (1 - (1 + EPSILON / 2) * z_e))
+                    / ((1 - q_e) * (1 + EPSILON / 2) * z_e)
+                )
+                gamma[e] -= delta
+                # Check that delta had the desired effect
+                new_q_e = q(e)
+                desired_q_e = (1 + EPSILON / 2) * z_e
+                if round(new_q_e, 8) != round(desired_q_e, 8):
+                    raise nx.NetworkXError(
+                        f"Unable to modify probability for edge ({u}, {v})"
+                    )
+            else:
+                in_range_count += 1
+        # Check if the for loop terminated without changing any gamma
+        if in_range_count == len(gamma):
+            break
+
+    # Remove the new edge attributes
+    for _, _, d in G.edges(data=True):
+        if lambda_key in d:
+            del d[lambda_key]
+
+    return gamma
+
+
+@nx._dispatchable(edge_attrs="weight")
+def greedy_tsp(G, weight="weight", source=None):
     """Return a low cost cycle starting at `source` and its cost.

     This approximates a solution to the traveling salesman problem.
@@ -503,14 +1009,46 @@ def greedy_tsp(G, weight='weight', source=None):

     Time complexity: It has a running time $O(|V|^2)$
     """
-    pass
+    # Check that G is a complete graph
+    N = len(G) - 1
+    # This check ignores selfloops which is what we want here.
+    if any(len(nbrdict) - (n in nbrdict) != N for n, nbrdict in G.adj.items()):
+        raise nx.NetworkXError("G must be a complete graph.")
+
+    if source is None:
+        source = nx.utils.arbitrary_element(G)
+
+    if G.number_of_nodes() == 2:
+        neighbor = next(G.neighbors(source))
+        return [source, neighbor, source]
+
+    nodeset = set(G)
+    nodeset.remove(source)
+    cycle = [source]
+    next_node = source
+    while nodeset:
+        nbrdict = G[next_node]
+        next_node = min(nodeset, key=lambda n: nbrdict[n].get(weight, 1))
+        cycle.append(next_node)
+        nodeset.remove(next_node)
+    cycle.append(cycle[0])
+    return cycle


 @py_random_state(9)
-@nx._dispatchable(edge_attrs='weight')
-def simulated_annealing_tsp(G, init_cycle, weight='weight', source=None,
-    temp=100, move='1-1', max_iterations=10, N_inner=100, alpha=0.01, seed=None
-    ):
+@nx._dispatchable(edge_attrs="weight")
+def simulated_annealing_tsp(
+    G,
+    init_cycle,
+    weight="weight",
+    source=None,
+    temp=100,
+    move="1-1",
+    max_iterations=10,
+    N_inner=100,
+    alpha=0.01,
+    seed=None,
+):
     """Returns an approximate solution to the traveling salesman problem.

     This function uses simulated annealing to approximate the minimal cost
@@ -663,14 +1201,84 @@ def simulated_annealing_tsp(G, init_cycle, weight='weight', source=None,
     For more information and how the algorithm is inspired see:
     http://en.wikipedia.org/wiki/Simulated_annealing
     """
-    pass
+    if move == "1-1":
+        move = swap_two_nodes
+    elif move == "1-0":
+        move = move_one_node
+    if init_cycle == "greedy":
+        # Construct an initial solution using a greedy algorithm.
+        cycle = greedy_tsp(G, weight=weight, source=source)
+        if G.number_of_nodes() == 2:
+            return cycle
+
+    else:
+        cycle = list(init_cycle)
+        if source is None:
+            source = cycle[0]
+        elif source != cycle[0]:
+            raise nx.NetworkXError("source must be first node in init_cycle")
+        if cycle[0] != cycle[-1]:
+            raise nx.NetworkXError("init_cycle must be a cycle. (return to start)")
+
+        if len(cycle) - 1 != len(G) or len(set(G.nbunch_iter(cycle))) != len(G):
+            raise nx.NetworkXError("init_cycle should be a cycle over all nodes in G.")
+
+        # Check that G is a complete graph
+        N = len(G) - 1
+        # This check ignores selfloops which is what we want here.
+        if any(len(nbrdict) - (n in nbrdict) != N for n, nbrdict in G.adj.items()):
+            raise nx.NetworkXError("G must be a complete graph.")
+
+        if G.number_of_nodes() == 2:
+            neighbor = next(G.neighbors(source))
+            return [source, neighbor, source]
+
+    # Find the cost of initial solution
+    cost = sum(G[u][v].get(weight, 1) for u, v in pairwise(cycle))
+
+    count = 0
+    best_cycle = cycle.copy()
+    best_cost = cost
+    while count <= max_iterations and temp > 0:
+        count += 1
+        for i in range(N_inner):
+            adj_sol = move(cycle, seed)
+            adj_cost = sum(G[u][v].get(weight, 1) for u, v in pairwise(adj_sol))
+            delta = adj_cost - cost
+            if delta <= 0:
+                # Set current solution the adjacent solution.
+                cycle = adj_sol
+                cost = adj_cost
+
+                if cost < best_cost:
+                    count = 0
+                    best_cycle = cycle.copy()
+                    best_cost = cost
+            else:
+                # Accept even a worse solution with probability p.
+                p = math.exp(-delta / temp)
+                if p >= seed.random():
+                    cycle = adj_sol
+                    cost = adj_cost
+        temp -= temp * alpha
+
+    return best_cycle


 @py_random_state(9)
-@nx._dispatchable(edge_attrs='weight')
-def threshold_accepting_tsp(G, init_cycle, weight='weight', source=None,
-    threshold=1, move='1-1', max_iterations=10, N_inner=100, alpha=0.1,
-    seed=None):
+@nx._dispatchable(edge_attrs="weight")
+def threshold_accepting_tsp(
+    G,
+    init_cycle,
+    weight="weight",
+    source=None,
+    threshold=1,
+    move="1-1",
+    max_iterations=10,
+    N_inner=100,
+    alpha=0.1,
+    seed=None,
+):
     """Returns an approximate solution to the traveling salesman problem.

     This function uses threshold accepting methods to approximate the minimal cost
@@ -828,4 +1436,63 @@ def threshold_accepting_tsp(G, init_cycle, weight='weight', source=None,
     simulated_annealing_tsp

     """
-    pass
+    if move == "1-1":
+        move = swap_two_nodes
+    elif move == "1-0":
+        move = move_one_node
+    if init_cycle == "greedy":
+        # Construct an initial solution using a greedy algorithm.
+        cycle = greedy_tsp(G, weight=weight, source=source)
+        if G.number_of_nodes() == 2:
+            return cycle
+
+    else:
+        cycle = list(init_cycle)
+        if source is None:
+            source = cycle[0]
+        elif source != cycle[0]:
+            raise nx.NetworkXError("source must be first node in init_cycle")
+        if cycle[0] != cycle[-1]:
+            raise nx.NetworkXError("init_cycle must be a cycle. (return to start)")
+
+        if len(cycle) - 1 != len(G) or len(set(G.nbunch_iter(cycle))) != len(G):
+            raise nx.NetworkXError("init_cycle is not all and only nodes.")
+
+        # Check that G is a complete graph
+        N = len(G) - 1
+        # This check ignores selfloops which is what we want here.
+        if any(len(nbrdict) - (n in nbrdict) != N for n, nbrdict in G.adj.items()):
+            raise nx.NetworkXError("G must be a complete graph.")
+
+        if G.number_of_nodes() == 2:
+            neighbor = list(G.neighbors(source))[0]
+            return [source, neighbor, source]
+
+    # Find the cost of initial solution
+    cost = sum(G[u][v].get(weight, 1) for u, v in pairwise(cycle))
+
+    count = 0
+    best_cycle = cycle.copy()
+    best_cost = cost
+    while count <= max_iterations:
+        count += 1
+        accepted = False
+        for i in range(N_inner):
+            adj_sol = move(cycle, seed)
+            adj_cost = sum(G[u][v].get(weight, 1) for u, v in pairwise(adj_sol))
+            delta = adj_cost - cost
+            if delta <= threshold:
+                accepted = True
+
+                # Set current solution the adjacent solution.
+                cycle = adj_sol
+                cost = adj_cost
+
+                if cost < best_cost:
+                    count = 0
+                    best_cycle = cycle.copy()
+                    best_cost = cost
+        if accepted:
+            threshold -= threshold * alpha
+
+    return best_cycle
diff --git a/networkx/algorithms/approximation/treewidth.py b/networkx/algorithms/approximation/treewidth.py
index 33ba4841d..31d73f636 100644
--- a/networkx/algorithms/approximation/treewidth.py
+++ b/networkx/algorithms/approximation/treewidth.py
@@ -28,16 +28,19 @@ There are two different functions for computing a tree decomposition:
       https://web.archive.org/web/20210507025929/http://web.eecs.utk.edu/~cphill25/cs594_spring2015_projects/treewidth.pdf

 """
+
 import itertools
 import sys
 from heapq import heapify, heappop, heappush
+
 import networkx as nx
 from networkx.utils import not_implemented_for
-__all__ = ['treewidth_min_degree', 'treewidth_min_fill_in']
+
+__all__ = ["treewidth_min_degree", "treewidth_min_fill_in"]


-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable(returns_graph=True)
 def treewidth_min_degree(G):
     """Returns a treewidth decomposition using the Minimum Degree heuristic.
@@ -56,11 +59,12 @@ def treewidth_min_degree(G):
     Treewidth decomposition : (int, Graph) tuple
           2-tuple with treewidth and the corresponding decomposed tree.
     """
-    pass
+    deg_heuristic = MinDegreeHeuristic(G)
+    return treewidth_decomp(G, lambda graph: deg_heuristic.best_node(graph))


-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable(returns_graph=True)
 def treewidth_min_fill_in(G):
     """Returns a treewidth decomposition using the Minimum Fill-in heuristic.
@@ -78,7 +82,7 @@ def treewidth_min_fill_in(G):
     Treewidth decomposition : (int, Graph) tuple
         2-tuple with treewidth and the corresponding decomposed tree.
     """
-    pass
+    return treewidth_decomp(G, min_fill_in_heuristic)


 class MinDegreeHeuristic:
@@ -92,13 +96,41 @@ class MinDegreeHeuristic:

     def __init__(self, graph):
         self._graph = graph
+
+        # nodes that have to be updated in the heap before each iteration
         self._update_nodes = []
-        self._degreeq = []
+
+        self._degreeq = []  # a heapq with 3-tuples (degree,unique_id,node)
         self.count = itertools.count()
+
+        # build heap with initial degrees
         for n in graph:
             self._degreeq.append((len(graph[n]), next(self.count), n))
         heapify(self._degreeq)

+    def best_node(self, graph):
+        # update nodes in self._update_nodes
+        for n in self._update_nodes:
+            # insert changed degrees into degreeq
+            heappush(self._degreeq, (len(graph[n]), next(self.count), n))
+
+        # get the next valid (minimum degree) node
+        while self._degreeq:
+            (min_degree, _, elim_node) = heappop(self._degreeq)
+            if elim_node not in graph or len(graph[elim_node]) != min_degree:
+                # outdated entry in degreeq
+                continue
+            elif min_degree == len(graph) - 1:
+                # fully connected: abort condition
+                return None
+
+            # remember to update nodes in the heap before getting the next node
+            self._update_nodes = graph[elim_node]
+            return elim_node
+
+        # the heap is empty: abort
+        return None
+

 def min_fill_in_heuristic(graph):
     """Implements the Minimum Degree heuristic.
@@ -108,7 +140,41 @@ def min_fill_in_heuristic(graph):
     possible. This algorithm chooses the nodes using the Minimum Fill-In
     heuristic. The running time of the algorithm is :math:`O(V^3)` and it uses
     additional constant memory."""
-    pass
+
+    if len(graph) == 0:
+        return None
+
+    min_fill_in_node = None
+
+    min_fill_in = sys.maxsize
+
+    # sort nodes by degree
+    nodes_by_degree = sorted(graph, key=lambda x: len(graph[x]))
+    min_degree = len(graph[nodes_by_degree[0]])
+
+    # abort condition (handle complete graph)
+    if min_degree == len(graph) - 1:
+        return None
+
+    for node in nodes_by_degree:
+        num_fill_in = 0
+        nbrs = graph[node]
+        for nbr in nbrs:
+            # count how many nodes in nbrs current nbr is not connected to
+            # subtract 1 for the node itself
+            num_fill_in += len(nbrs - graph[nbr]) - 1
+            if num_fill_in >= 2 * min_fill_in:
+                break
+
+        num_fill_in /= 2  # divide by 2 because of double counting
+
+        if num_fill_in < min_fill_in:  # update min-fill-in node
+            if num_fill_in == 0:
+                return node
+            min_fill_in = num_fill_in
+            min_fill_in_node = node
+
+    return min_fill_in_node


 @nx._dispatchable(returns_graph=True)
@@ -125,4 +191,62 @@ def treewidth_decomp(G, heuristic=min_fill_in_heuristic):
     Treewidth decomposition : (int, Graph) tuple
         2-tuple with treewidth and the corresponding decomposed tree.
     """
-    pass
+
+    # make dict-of-sets structure
+    graph = {n: set(G[n]) - {n} for n in G}
+
+    # stack containing nodes and neighbors in the order from the heuristic
+    node_stack = []
+
+    # get first node from heuristic
+    elim_node = heuristic(graph)
+    while elim_node is not None:
+        # connect all neighbors with each other
+        nbrs = graph[elim_node]
+        for u, v in itertools.permutations(nbrs, 2):
+            if v not in graph[u]:
+                graph[u].add(v)
+
+        # push node and its current neighbors on stack
+        node_stack.append((elim_node, nbrs))
+
+        # remove node from graph
+        for u in graph[elim_node]:
+            graph[u].remove(elim_node)
+
+        del graph[elim_node]
+        elim_node = heuristic(graph)
+
+    # the abort condition is met; put all remaining nodes into one bag
+    decomp = nx.Graph()
+    first_bag = frozenset(graph.keys())
+    decomp.add_node(first_bag)
+
+    treewidth = len(first_bag) - 1
+
+    while node_stack:
+        # get node and its neighbors from the stack
+        (curr_node, nbrs) = node_stack.pop()
+
+        # find a bag all neighbors are in
+        old_bag = None
+        for bag in decomp.nodes:
+            if nbrs <= bag:
+                old_bag = bag
+                break
+
+        if old_bag is None:
+            # no old_bag was found: just connect to the first_bag
+            old_bag = first_bag
+
+        # create new node for decomposition
+        nbrs.add(curr_node)
+        new_bag = frozenset(nbrs)
+
+        # update treewidth
+        treewidth = max(treewidth, len(new_bag) - 1)
+
+        # add edge to decomposition (implicitly also adds the new node)
+        decomp.add_edge(old_bag, new_bag)
+
+    return treewidth, decomp
diff --git a/networkx/algorithms/approximation/vertex_cover.py b/networkx/algorithms/approximation/vertex_cover.py
index 10b268303..c71399ebc 100644
--- a/networkx/algorithms/approximation/vertex_cover.py
+++ b/networkx/algorithms/approximation/vertex_cover.py
@@ -8,12 +8,13 @@ is incident to at least one node in the subset.

 """
 import networkx as nx
-__all__ = ['min_weighted_vertex_cover']

+__all__ = ["min_weighted_vertex_cover"]

-@nx._dispatchable(node_attrs='weight')
+
+@nx._dispatchable(node_attrs="weight")
 def min_weighted_vertex_cover(G, weight=None):
-    """Returns an approximate minimum weighted vertex cover.
+    r"""Returns an approximate minimum weighted vertex cover.

     The set of nodes returned by this function is guaranteed to be a
     vertex cover, and the total weight of the set is guaranteed to be at
@@ -22,7 +23,7 @@ def min_weighted_vertex_cover(G, weight=None):

     .. math::

-       w(S) \\leq 2 * w(S^*),
+       w(S) \leq 2 * w(S^*),

     where $S$ is the vertex cover returned by this function,
     $S^*$ is the vertex cover of minimum weight out of all vertex
@@ -54,7 +55,7 @@ def min_weighted_vertex_cover(G, weight=None):
     This is the local-ratio algorithm for computing an approximate
     vertex cover. The algorithm greedily reduces the costs over edges,
     iteratively building a cover. The worst-case runtime of this
-    implementation is $O(m \\log n)$, where $n$ is the number
+    implementation is $O(m \log n)$, where $n$ is the number
     of nodes and $m$ the number of edges in the graph.

     References
@@ -65,4 +66,17 @@ def min_weighted_vertex_cover(G, weight=None):
        <http://www.cs.technion.ac.il/~reuven/PDF/vc_lr.pdf>

     """
-    pass
+    cost = dict(G.nodes(data=weight, default=1))
+    # While there are uncovered edges, choose an uncovered and update
+    # the cost of the remaining edges.
+    cover = set()
+    for u, v in G.edges():
+        if u in cover or v in cover:
+            continue
+        if cost[u] <= cost[v]:
+            cover.add(u)
+            cost[v] -= cost[u]
+        else:
+            cover.add(v)
+            cost[u] -= cost[v]
+    return cover
diff --git a/networkx/algorithms/assortativity/connectivity.py b/networkx/algorithms/assortativity/connectivity.py
index d4dd9ca05..c3fde0da6 100644
--- a/networkx/algorithms/assortativity/connectivity.py
+++ b/networkx/algorithms/assortativity/connectivity.py
@@ -1,12 +1,15 @@
 from collections import defaultdict
+
 import networkx as nx
-__all__ = ['average_degree_connectivity']
+
+__all__ = ["average_degree_connectivity"]


-@nx._dispatchable(edge_attrs='weight')
-def average_degree_connectivity(G, source='in+out', target='in+out', nodes=
-    None, weight=None):
-    """Compute the average degree connectivity of graph.
+@nx._dispatchable(edge_attrs="weight")
+def average_degree_connectivity(
+    G, source="in+out", target="in+out", nodes=None, weight=None
+):
+    r"""Compute the average degree connectivity of graph.

     The average degree connectivity is the average nearest neighbor degree of
     nodes with degree k. For weighted graphs, an analogous measure can
@@ -15,7 +18,7 @@ def average_degree_connectivity(G, source='in+out', target='in+out', nodes=

     .. math::

-        k_{nn,i}^{w} = \\frac{1}{s_i} \\sum_{j \\in N(i)} w_{ij} k_j
+        k_{nn,i}^{w} = \frac{1}{s_i} \sum_{j \in N(i)} w_{ij} k_j

     where `s_i` is the weighted degree of node `i`,
     `w_{ij}` is the weight of the edge that links `i` and `j`,
@@ -70,4 +73,50 @@ def average_degree_connectivity(G, source='in+out', target='in+out', nodes=
        "The architecture of complex weighted networks".
        PNAS 101 (11): 3747–3752 (2004).
     """
-    pass
+    # First, determine the type of neighbors and the type of degree to use.
+    if G.is_directed():
+        if source not in ("in", "out", "in+out"):
+            raise nx.NetworkXError('source must be one of "in", "out", or "in+out"')
+        if target not in ("in", "out", "in+out"):
+            raise nx.NetworkXError('target must be one of "in", "out", or "in+out"')
+        direction = {"out": G.out_degree, "in": G.in_degree, "in+out": G.degree}
+        neighbor_funcs = {
+            "out": G.successors,
+            "in": G.predecessors,
+            "in+out": G.neighbors,
+        }
+        source_degree = direction[source]
+        target_degree = direction[target]
+        neighbors = neighbor_funcs[source]
+        # `reverse` indicates whether to look at the in-edge when
+        # computing the weight of an edge.
+        reverse = source == "in"
+    else:
+        if source != "in+out" or target != "in+out":
+            raise nx.NetworkXError(
+                f"source and target arguments are only supported for directed graphs"
+            )
+        source_degree = G.degree
+        target_degree = G.degree
+        neighbors = G.neighbors
+        reverse = False
+    dsum = defaultdict(int)
+    dnorm = defaultdict(int)
+    # Check if `source_nodes` is actually a single node in the graph.
+    source_nodes = source_degree(nodes)
+    if nodes in G:
+        source_nodes = [(nodes, source_degree(nodes))]
+    for n, k in source_nodes:
+        nbrdeg = target_degree(neighbors(n))
+        if weight is None:
+            s = sum(d for n, d in nbrdeg)
+        else:  # weight nbr degree by weight of (n,nbr) edge
+            if reverse:
+                s = sum(G[nbr][n].get(weight, 1) * d for nbr, d in nbrdeg)
+            else:
+                s = sum(G[n][nbr].get(weight, 1) * d for nbr, d in nbrdeg)
+        dnorm[k] += source_degree(n, weight=weight)
+        dsum[k] += s
+
+    # normalize
+    return {k: avg if dnorm[k] == 0 else avg / dnorm[k] for k, avg in dsum.items()}
diff --git a/networkx/algorithms/assortativity/correlation.py b/networkx/algorithms/assortativity/correlation.py
index a6d207842..170d219a5 100644
--- a/networkx/algorithms/assortativity/correlation.py
+++ b/networkx/algorithms/assortativity/correlation.py
@@ -1,16 +1,22 @@
 """Node assortativity coefficients and correlation measures.
 """
 import networkx as nx
-from networkx.algorithms.assortativity.mixing import attribute_mixing_matrix, degree_mixing_matrix
+from networkx.algorithms.assortativity.mixing import (
+    attribute_mixing_matrix,
+    degree_mixing_matrix,
+)
 from networkx.algorithms.assortativity.pairs import node_degree_xy
-__all__ = ['degree_pearson_correlation_coefficient',
-    'degree_assortativity_coefficient',
-    'attribute_assortativity_coefficient', 'numeric_assortativity_coefficient']

+__all__ = [
+    "degree_pearson_correlation_coefficient",
+    "degree_assortativity_coefficient",
+    "attribute_assortativity_coefficient",
+    "numeric_assortativity_coefficient",
+]

-@nx._dispatchable(edge_attrs='weight')
-def degree_assortativity_coefficient(G, x='out', y='in', weight=None, nodes
-    =None):
+
+@nx._dispatchable(edge_attrs="weight")
+def degree_assortativity_coefficient(G, x="out", y="in", weight=None, nodes=None):
     """Compute degree assortativity of graph.

     Assortativity measures the similarity of connections
@@ -68,12 +74,34 @@ def degree_assortativity_coefficient(G, x='out', y='in', weight=None, nodes
     .. [2] Foster, J.G., Foster, D.V., Grassberger, P. & Paczuski, M.
        Edge direction and the structure of networks, PNAS 107, 10815-20 (2010).
     """
-    pass
-
-
-@nx._dispatchable(edge_attrs='weight')
-def degree_pearson_correlation_coefficient(G, x='out', y='in', weight=None,
-    nodes=None):
+    if nodes is None:
+        nodes = G.nodes
+
+    degrees = None
+
+    if G.is_directed():
+        indeg = (
+            {d for _, d in G.in_degree(nodes, weight=weight)}
+            if "in" in (x, y)
+            else set()
+        )
+        outdeg = (
+            {d for _, d in G.out_degree(nodes, weight=weight)}
+            if "out" in (x, y)
+            else set()
+        )
+        degrees = set.union(indeg, outdeg)
+    else:
+        degrees = {d for _, d in G.degree(nodes, weight=weight)}
+
+    mapping = {d: i for i, d in enumerate(degrees)}
+    M = degree_mixing_matrix(G, x=x, y=y, nodes=nodes, weight=weight, mapping=mapping)
+
+    return _numeric_ac(M, mapping=mapping)
+
+
+@nx._dispatchable(edge_attrs="weight")
+def degree_pearson_correlation_coefficient(G, x="out", y="in", weight=None, nodes=None):
     """Compute degree assortativity of graph.

     Assortativity measures the similarity of connections
@@ -124,10 +152,14 @@ def degree_pearson_correlation_coefficient(G, x='out', y='in', weight=None,
     .. [2] Foster, J.G., Foster, D.V., Grassberger, P. & Paczuski, M.
        Edge direction and the structure of networks, PNAS 107, 10815-20 (2010).
     """
-    pass
+    import scipy as sp
+
+    xy = node_degree_xy(G, x=x, y=y, nodes=nodes, weight=weight)
+    x, y = zip(*xy)
+    return float(sp.stats.pearsonr(x, y)[0])


-@nx._dispatchable(node_attrs='attribute')
+@nx._dispatchable(node_attrs="attribute")
 def attribute_assortativity_coefficient(G, attribute, nodes=None):
     """Compute assortativity for node attributes.

@@ -170,10 +202,11 @@ def attribute_assortativity_coefficient(G, attribute, nodes=None):
     .. [1] M. E. J. Newman, Mixing patterns in networks,
        Physical Review E, 67 026126, 2003
     """
-    pass
+    M = attribute_mixing_matrix(G, attribute, nodes)
+    return attribute_ac(M)


-@nx._dispatchable(node_attrs='attribute')
+@nx._dispatchable(node_attrs="attribute")
 def numeric_assortativity_coefficient(G, attribute, nodes=None):
     """Compute assortativity for numerical node attributes.

@@ -215,7 +248,12 @@ def numeric_assortativity_coefficient(G, attribute, nodes=None):
     .. [1] M. E. J. Newman, Mixing patterns in networks
            Physical Review E, 67 026126, 2003
     """
-    pass
+    if nodes is None:
+        nodes = G.nodes
+    vals = {G.nodes[n][attribute] for n in nodes}
+    mapping = {d: i for i, d in enumerate(vals)}
+    M = attribute_mixing_matrix(G, attribute, nodes, mapping)
+    return _numeric_ac(M, mapping)


 def attribute_ac(M):
@@ -237,4 +275,28 @@ def attribute_ac(M):
     .. [1] M. E. J. Newman, Mixing patterns in networks,
        Physical Review E, 67 026126, 2003
     """
-    pass
+    if M.sum() != 1.0:
+        M = M / M.sum()
+    s = (M @ M).sum()
+    t = M.trace()
+    r = (t - s) / (1 - s)
+    return float(r)
+
+
+def _numeric_ac(M, mapping):
+    # M is a 2D numpy array
+    # numeric assortativity coefficient, pearsonr
+    import numpy as np
+
+    if M.sum() != 1.0:
+        M = M / M.sum()
+    x = np.array(list(mapping.keys()))
+    y = x  # x and y have the same support
+    idx = list(mapping.values())
+    a = M.sum(axis=0)
+    b = M.sum(axis=1)
+    vara = (a[idx] * x**2).sum() - ((a[idx] * x).sum()) ** 2
+    varb = (b[idx] * y**2).sum() - ((b[idx] * y).sum()) ** 2
+    xy = np.outer(x, y)
+    ab = np.outer(a[idx], b[idx])
+    return float((xy * (M - ab)).sum() / np.sqrt(vara * varb))
diff --git a/networkx/algorithms/assortativity/mixing.py b/networkx/algorithms/assortativity/mixing.py
index 929c736b9..852ad82a4 100644
--- a/networkx/algorithms/assortativity/mixing.py
+++ b/networkx/algorithms/assortativity/mixing.py
@@ -4,11 +4,17 @@ Mixing matrices for node attributes and degree.
 import networkx as nx
 from networkx.algorithms.assortativity.pairs import node_attribute_xy, node_degree_xy
 from networkx.utils import dict_to_numpy_array
-__all__ = ['attribute_mixing_matrix', 'attribute_mixing_dict',
-    'degree_mixing_matrix', 'degree_mixing_dict', 'mixing_dict']

+__all__ = [
+    "attribute_mixing_matrix",
+    "attribute_mixing_dict",
+    "degree_mixing_matrix",
+    "degree_mixing_dict",
+    "mixing_dict",
+]

-@nx._dispatchable(node_attrs='attribute')
+
+@nx._dispatchable(node_attrs="attribute")
 def attribute_mixing_dict(G, attribute, nodes=None, normalized=False):
     """Returns dictionary representation of mixing matrix for attribute.

@@ -43,12 +49,12 @@ def attribute_mixing_dict(G, attribute, nodes=None, normalized=False):
     d : dictionary
        Counts or joint probability of occurrence of attribute pairs.
     """
-    pass
+    xy_iter = node_attribute_xy(G, attribute, nodes)
+    return mixing_dict(xy_iter, normalized=normalized)


-@nx._dispatchable(node_attrs='attribute')
-def attribute_mixing_matrix(G, attribute, nodes=None, mapping=None,
-    normalized=True):
+@nx._dispatchable(node_attrs="attribute")
+def attribute_mixing_matrix(G, attribute, nodes=None, mapping=None, normalized=True):
     """Returns mixing matrix for attribute.

     Parameters
@@ -100,12 +106,15 @@ def attribute_mixing_matrix(G, attribute, nodes=None, mapping=None,
     array([[0.  , 0.25],
            [0.25, 0.5 ]])
     """
-    pass
+    d = attribute_mixing_dict(G, attribute, nodes)
+    a = dict_to_numpy_array(d, mapping=mapping)
+    if normalized:
+        a = a / a.sum()
+    return a


-@nx._dispatchable(edge_attrs='weight')
-def degree_mixing_dict(G, x='out', y='in', weight=None, nodes=None,
-    normalized=False):
+@nx._dispatchable(edge_attrs="weight")
+def degree_mixing_dict(G, x="out", y="in", weight=None, nodes=None, normalized=False):
     """Returns dictionary representation of mixing matrix for degree.

     Parameters
@@ -132,12 +141,14 @@ def degree_mixing_dict(G, x='out', y='in', weight=None, nodes=None,
     d: dictionary
        Counts or joint probability of occurrence of degree pairs.
     """
-    pass
+    xy_iter = node_degree_xy(G, x=x, y=y, nodes=nodes, weight=weight)
+    return mixing_dict(xy_iter, normalized=normalized)


-@nx._dispatchable(edge_attrs='weight')
-def degree_mixing_matrix(G, x='out', y='in', weight=None, nodes=None,
-    normalized=True, mapping=None):
+@nx._dispatchable(edge_attrs="weight")
+def degree_mixing_matrix(
+    G, x="out", y="in", weight=None, nodes=None, normalized=True, mapping=None
+):
     """Returns mixing matrix for attribute.

     Parameters
@@ -199,7 +210,11 @@ def degree_mixing_matrix(G, x='out', y='in', weight=None, nodes=None,
            [0. , 0. , 0. , 0. ],
            [0. , 0.5, 0. , 0. ]])
     """
-    pass
+    d = degree_mixing_dict(G, x=x, y=y, nodes=nodes, weight=weight)
+    a = dict_to_numpy_array(d, mapping=mapping)
+    if normalized:
+        a = a / a.sum()
+    return a


 def mixing_dict(xy, normalized=False):
@@ -221,4 +236,19 @@ def mixing_dict(xy, normalized=False):
     d: dictionary
        Counts or Joint probability of occurrence of values in xy.
     """
-    pass
+    d = {}
+    psum = 0.0
+    for x, y in xy:
+        if x not in d:
+            d[x] = {}
+        if y not in d:
+            d[y] = {}
+        v = d[x].get(y, 0)
+        d[x][y] = v + 1
+        psum += 1
+
+    if normalized:
+        for _, jdict in d.items():
+            for j in jdict:
+                jdict[j] /= psum
+    return d
diff --git a/networkx/algorithms/assortativity/neighbor_degree.py b/networkx/algorithms/assortativity/neighbor_degree.py
index 75b0f6d85..6488d041a 100644
--- a/networkx/algorithms/assortativity/neighbor_degree.py
+++ b/networkx/algorithms/assortativity/neighbor_degree.py
@@ -1,11 +1,11 @@
 import networkx as nx
-__all__ = ['average_neighbor_degree']

+__all__ = ["average_neighbor_degree"]

-@nx._dispatchable(edge_attrs='weight')
-def average_neighbor_degree(G, source='out', target='out', nodes=None,
-    weight=None):
-    """Returns the average degree of the neighborhood of each node.
+
+@nx._dispatchable(edge_attrs="weight")
+def average_neighbor_degree(G, source="out", target="out", nodes=None, weight=None):
+    r"""Returns the average degree of the neighborhood of each node.

     In an undirected graph, the neighborhood `N(i)` of node `i` contains the
     nodes that are connected to `i` by an edge.
@@ -20,7 +20,7 @@ def average_neighbor_degree(G, source='out', target='out', nodes=None,

     .. math::

-        k_{nn,i} = \\frac{1}{|N(i)|} \\sum_{j \\in N(i)} k_j
+        k_{nn,i} = \frac{1}{|N(i)|} \sum_{j \in N(i)} k_j

     where `N(i)` are the neighbors of node `i` and `k_j` is
     the degree of node `j` which belongs to `N(i)`. For weighted
@@ -28,7 +28,7 @@ def average_neighbor_degree(G, source='out', target='out', nodes=None,

     .. math::

-        k_{nn,i}^{w} = \\frac{1}{s_i} \\sum_{j \\in N(i)} w_{ij} k_j
+        k_{nn,i}^{w} = \frac{1}{s_i} \sum_{j \in N(i)} w_{ij} k_j

     where `s_i` is the weighted degree of node `i`, `w_{ij}`
     is the weight of the edge that links `i` and `j` and
@@ -94,4 +94,67 @@ def average_neighbor_degree(G, source='out', target='out', nodes=None,
        "The architecture of complex weighted networks".
        PNAS 101 (11): 3747–3752 (2004).
     """
-    pass
+    if G.is_directed():
+        if source == "in":
+            source_degree = G.in_degree
+        elif source == "out":
+            source_degree = G.out_degree
+        elif source == "in+out":
+            source_degree = G.degree
+        else:
+            raise nx.NetworkXError(
+                f"source argument {source} must be 'in', 'out' or 'in+out'"
+            )
+
+        if target == "in":
+            target_degree = G.in_degree
+        elif target == "out":
+            target_degree = G.out_degree
+        elif target == "in+out":
+            target_degree = G.degree
+        else:
+            raise nx.NetworkXError(
+                f"target argument {target} must be 'in', 'out' or 'in+out'"
+            )
+    else:
+        if source != "out" or target != "out":
+            raise nx.NetworkXError(
+                f"source and target arguments are only supported for directed graphs"
+            )
+        source_degree = target_degree = G.degree
+
+    # precompute target degrees -- should *not* be weighted degree
+    t_deg = dict(target_degree())
+
+    # Set up both predecessor and successor neighbor dicts leaving empty if not needed
+    G_P = G_S = {n: {} for n in G}
+    if G.is_directed():
+        # "in" or "in+out" cases: G_P contains predecessors
+        if "in" in source:
+            G_P = G.pred
+        # "out" or "in+out" cases: G_S contains successors
+        if "out" in source:
+            G_S = G.succ
+    else:
+        # undirected leave G_P empty but G_S is the adjacency
+        G_S = G.adj
+
+    # Main loop: Compute average degree of neighbors
+    avg = {}
+    for n, deg in source_degree(nodes, weight=weight):
+        # handle degree zero average
+        if deg == 0:
+            avg[n] = 0.0
+            continue
+
+        # we sum over both G_P and G_S, but one of the two is usually empty.
+        if weight is None:
+            avg[n] = (
+                sum(t_deg[nbr] for nbr in G_S[n]) + sum(t_deg[nbr] for nbr in G_P[n])
+            ) / deg
+        else:
+            avg[n] = (
+                sum(dd.get(weight, 1) * t_deg[nbr] for nbr, dd in G_S[n].items())
+                + sum(dd.get(weight, 1) * t_deg[nbr] for nbr, dd in G_P[n].items())
+            ) / deg
+    return avg
diff --git a/networkx/algorithms/assortativity/pairs.py b/networkx/algorithms/assortativity/pairs.py
index 65bf798ef..5a1d6f8e1 100644
--- a/networkx/algorithms/assortativity/pairs.py
+++ b/networkx/algorithms/assortativity/pairs.py
@@ -1,9 +1,10 @@
 """Generators of  x-y pairs of node data."""
 import networkx as nx
-__all__ = ['node_attribute_xy', 'node_degree_xy']

+__all__ = ["node_attribute_xy", "node_degree_xy"]

-@nx._dispatchable(node_attrs='attribute')
+
+@nx._dispatchable(node_attrs="attribute")
 def node_attribute_xy(G, attribute, nodes=None):
     """Returns iterator of node-attribute pairs for all edges in G.

@@ -38,11 +39,28 @@ def node_attribute_xy(G, attribute, nodes=None):
     representation (u, v) and (v, u), with the exception of self-loop edges
     which only appear once.
     """
-    pass
-
-
-@nx._dispatchable(edge_attrs='weight')
-def node_degree_xy(G, x='out', y='in', weight=None, nodes=None):
+    if nodes is None:
+        nodes = set(G)
+    else:
+        nodes = set(nodes)
+    Gnodes = G.nodes
+    for u, nbrsdict in G.adjacency():
+        if u not in nodes:
+            continue
+        uattr = Gnodes[u].get(attribute, None)
+        if G.is_multigraph():
+            for v, keys in nbrsdict.items():
+                vattr = Gnodes[v].get(attribute, None)
+                for _ in keys:
+                    yield (uattr, vattr)
+        else:
+            for v in nbrsdict:
+                vattr = Gnodes[v].get(attribute, None)
+                yield (uattr, vattr)
+
+
+@nx._dispatchable(edge_attrs="weight")
+def node_degree_xy(G, x="out", y="in", weight=None, nodes=None):
     """Generate node degree-degree pairs for edges in G.

     Parameters
@@ -85,4 +103,16 @@ def node_degree_xy(G, x='out', y='in', weight=None, nodes=None):
     representation (u, v) and (v, u), with the exception of self-loop edges
     which only appear once.
     """
-    pass
+    nodes = set(G) if nodes is None else set(nodes)
+    if G.is_directed():
+        direction = {"out": G.out_degree, "in": G.in_degree}
+        xdeg = direction[x]
+        ydeg = direction[y]
+    else:
+        xdeg = ydeg = G.degree
+
+    for u, degu in xdeg(nodes, weight=weight):
+        # use G.edges to treat multigraphs correctly
+        neighbors = (nbr for _, nbr in G.edges(u) if nbr in nodes)
+        for _, degv in ydeg(neighbors, weight=weight):
+            yield degu, degv
diff --git a/networkx/algorithms/asteroidal.py b/networkx/algorithms/asteroidal.py
index 6242d3172..41e91390d 100644
--- a/networkx/algorithms/asteroidal.py
+++ b/networkx/algorithms/asteroidal.py
@@ -12,14 +12,15 @@ independent set and coloring.
 """
 import networkx as nx
 from networkx.utils import not_implemented_for
-__all__ = ['is_at_free', 'find_asteroidal_triple']

+__all__ = ["is_at_free", "find_asteroidal_triple"]

-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def find_asteroidal_triple(G):
-    """Find an asteroidal triple in the given graph.
+    r"""Find an asteroidal triple in the given graph.

     An asteroidal triple is a triple of non-adjacent vertices such that
     there exists a path between any two of them which avoids the closed
@@ -30,7 +31,7 @@ def find_asteroidal_triple(G):
     the same connected component when the closed neighborhood of a given vertex
     is removed from the graph. The algorithm used to check is the trivial
     one, outlined in [1]_, which has a runtime of
-    :math:`O(|V||\\overline{E} + |V||E|)`, where the second term is the
+    :math:`O(|V||\overline{E} + |V||E|)`, where the second term is the
     creation of the component structure.

     Parameters
@@ -60,11 +61,36 @@ def find_asteroidal_triple(G):
        Journal of Discrete Algorithms 2, pages 439-452, 2004.
        https://www.sciencedirect.com/science/article/pii/S157086670400019X
     """
-    pass
-
-
-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+    V = set(G.nodes)
+
+    if len(V) < 6:
+        # An asteroidal triple cannot exist in a graph with 5 or less vertices.
+        return None
+
+    component_structure = create_component_structure(G)
+    E_complement = set(nx.complement(G).edges)
+
+    for e in E_complement:
+        u = e[0]
+        v = e[1]
+        u_neighborhood = set(G[u]).union([u])
+        v_neighborhood = set(G[v]).union([v])
+        union_of_neighborhoods = u_neighborhood.union(v_neighborhood)
+        for w in V - union_of_neighborhoods:
+            # Check for each pair of vertices whether they belong to the
+            # same connected component when the closed neighborhood of the
+            # third is removed.
+            if (
+                component_structure[u][v] == component_structure[u][w]
+                and component_structure[v][u] == component_structure[v][w]
+                and component_structure[w][u] == component_structure[w][v]
+            ):
+                return [u, v, w]
+    return None
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def is_at_free(G):
     """Check if a graph is AT-free.
@@ -94,21 +120,21 @@ def is_at_free(G):
     >>> nx.is_at_free(G)
     False
     """
-    pass
+    return find_asteroidal_triple(G) is None


-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def create_component_structure(G):
-    """Create component structure for G.
+    r"""Create component structure for G.

     A *component structure* is an `nxn` array, denoted `c`, where `n` is
     the number of vertices,  where each row and column corresponds to a vertex.

     .. math::
-        c_{uv} = \\begin{cases} 0, if v \\in N[u] \\\\
-            k, if v \\in component k of G \\setminus N[u] \\end{cases}
+        c_{uv} = \begin{cases} 0, if v \in N[u] \\
+            k, if v \in component k of G \setminus N[u] \end{cases}

     Where `k` is an arbitrary label for each component. The structure is used
     to simplify the detection of asteroidal triples.
@@ -124,4 +150,21 @@ def create_component_structure(G):
         A dictionary of dictionaries, keyed by pairs of vertices.

     """
-    pass
+    V = set(G.nodes)
+    component_structure = {}
+    for v in V:
+        label = 0
+        closed_neighborhood = set(G[v]).union({v})
+        row_dict = {}
+        for u in closed_neighborhood:
+            row_dict[u] = 0
+
+        G_reduced = G.subgraph(set(G.nodes) - closed_neighborhood)
+        for cc in nx.connected_components(G_reduced):
+            label += 1
+            for u in cc:
+                row_dict[u] = label
+
+        component_structure[v] = row_dict
+
+    return component_structure
diff --git a/networkx/algorithms/bipartite/basic.py b/networkx/algorithms/bipartite/basic.py
index 2db85cfba..d0a63a10f 100644
--- a/networkx/algorithms/bipartite/basic.py
+++ b/networkx/algorithms/bipartite/basic.py
@@ -6,8 +6,15 @@ Bipartite Graph Algorithms
 import networkx as nx
 from networkx.algorithms.components import connected_components
 from networkx.exception import AmbiguousSolution
-__all__ = ['is_bipartite', 'is_bipartite_node_set', 'color', 'sets',
-    'density', 'degrees']
+
+__all__ = [
+    "is_bipartite",
+    "is_bipartite_node_set",
+    "color",
+    "sets",
+    "density",
+    "degrees",
+]


 @nx._dispatchable
@@ -46,7 +53,34 @@ def color(G):
     >>> print(G.nodes[1]["bipartite"])
     0
     """
-    pass
+    if G.is_directed():
+        import itertools
+
+        def neighbors(v):
+            return itertools.chain.from_iterable([G.predecessors(v), G.successors(v)])
+
+    else:
+        neighbors = G.neighbors
+
+    color = {}
+    for n in G:  # handle disconnected graphs
+        if n in color or len(G[n]) == 0:  # skip isolates
+            continue
+        queue = [n]
+        color[n] = 1  # nodes seen with color (1 or 0)
+        while queue:
+            v = queue.pop()
+            c = 1 - color[v]  # opposite color of node v
+            for w in neighbors(v):
+                if w in color:
+                    if color[w] == color[v]:
+                        raise nx.NetworkXError("Graph is not bipartite.")
+                else:
+                    color[w] = c
+                    queue.append(w)
+    # color isolates with 0
+    color.update(dict.fromkeys(nx.isolates(G), 0))
+    return color


 @nx._dispatchable
@@ -68,7 +102,11 @@ def is_bipartite(G):
     --------
     color, is_bipartite_node_set
     """
-    pass
+    try:
+        color(G)
+        return True
+    except nx.NetworkXError:
+        return False


 @nx._dispatchable
@@ -97,7 +135,23 @@ def is_bipartite_node_set(G, nodes):
     For connected graphs the bipartite sets are unique.  This function handles
     disconnected graphs.
     """
-    pass
+    S = set(nodes)
+
+    if len(S) < len(nodes):
+        # this should maybe just return False?
+        raise AmbiguousSolution(
+            "The input node set contains duplicates.\n"
+            "This may lead to incorrect results when using it in bipartite algorithms.\n"
+            "Consider using set(nodes) as the input"
+        )
+
+    for CC in (G.subgraph(c).copy() for c in connected_components(G)):
+        X, Y = sets(CC)
+        if not (
+            (X.issubset(S) and Y.isdisjoint(S)) or (Y.issubset(S) and X.isdisjoint(S))
+        ):
+            return False
+    return True


 @nx._dispatchable
@@ -150,10 +204,24 @@ def sets(G, top_nodes=None):
     color

     """
-    pass
-
-
-@nx._dispatchable(graphs='B')
+    if G.is_directed():
+        is_connected = nx.is_weakly_connected
+    else:
+        is_connected = nx.is_connected
+    if top_nodes is not None:
+        X = set(top_nodes)
+        Y = set(G) - X
+    else:
+        if not is_connected(G):
+            msg = "Disconnected graph: Ambiguous solution for bipartite sets."
+            raise nx.AmbiguousSolution(msg)
+        c = color(G)
+        X = {n for n, is_top in c.items() if is_top}
+        Y = {n for n, is_top in c.items() if not is_top}
+    return (X, Y)
+
+
+@nx._dispatchable(graphs="B")
 def density(B, nodes):
     """Returns density of bipartite graph B.

@@ -192,10 +260,21 @@ def density(B, nodes):
     --------
     color
     """
-    pass
-
-
-@nx._dispatchable(graphs='B', edge_attrs='weight')
+    n = len(B)
+    m = nx.number_of_edges(B)
+    nb = len(nodes)
+    nt = n - nb
+    if m == 0:  # includes cases n==0 and n==1
+        d = 0.0
+    else:
+        if B.is_directed():
+            d = m / (2 * nb * nt)
+        else:
+            d = m / (nb * nt)
+    return d
+
+
+@nx._dispatchable(graphs="B", edge_attrs="weight")
 def degrees(B, nodes, weight=None):
     """Returns the degrees of the two node sets in the bipartite graph B.

@@ -237,4 +316,6 @@ def degrees(B, nodes, weight=None):
     --------
     color, density
     """
-    pass
+    bottom = set(nodes)
+    top = set(B) - bottom
+    return (B.degree(top, weight), B.degree(bottom, weight))
diff --git a/networkx/algorithms/bipartite/centrality.py b/networkx/algorithms/bipartite/centrality.py
index cca9a8892..42d7270ee 100644
--- a/networkx/algorithms/bipartite/centrality.py
+++ b/networkx/algorithms/bipartite/centrality.py
@@ -1,11 +1,11 @@
 import networkx as nx
-__all__ = ['degree_centrality', 'betweenness_centrality',
-    'closeness_centrality']

+__all__ = ["degree_centrality", "betweenness_centrality", "closeness_centrality"]

-@nx._dispatchable(name='bipartite_degree_centrality')
+
+@nx._dispatchable(name="bipartite_degree_centrality")
 def degree_centrality(G, nodes):
-    """Compute the degree centrality for nodes in a bipartite network.
+    r"""Compute the degree centrality for nodes in a bipartite network.

     The degree centrality for a node `v` is the fraction of nodes
     connected to it.
@@ -55,9 +55,9 @@ def degree_centrality(G, nodes):

     .. math::

-        d_{v} = \\frac{deg(v)}{m}, \\mbox{for} v \\in U ,
+        d_{v} = \frac{deg(v)}{m}, \mbox{for} v \in U ,

-        d_{v} = \\frac{deg(v)}{n}, \\mbox{for} v \\in V ,
+        d_{v} = \frac{deg(v)}{n}, \mbox{for} v \in V ,


     where `deg(v)` is the degree of node `v`.
@@ -69,12 +69,18 @@ def degree_centrality(G, nodes):
         of Social Network Analysis. Sage Publications.
         https://dx.doi.org/10.4135/9781446294413.n28
     """
-    pass
+    top = set(nodes)
+    bottom = set(G) - top
+    s = 1.0 / len(bottom)
+    centrality = {n: d * s for n, d in G.degree(top)}
+    s = 1.0 / len(top)
+    centrality.update({n: d * s for n, d in G.degree(bottom)})
+    return centrality


-@nx._dispatchable(name='bipartite_betweenness_centrality')
+@nx._dispatchable(name="bipartite_betweenness_centrality")
 def betweenness_centrality(G, nodes):
-    """Compute betweenness centrality for nodes in a bipartite network.
+    r"""Compute betweenness centrality for nodes in a bipartite network.

     Betweenness centrality of a node `v` is the sum of the
     fraction of all-pairs shortest paths that pass through `v`.
@@ -89,25 +95,25 @@ def betweenness_centrality(G, nodes):

     .. math::

-       \\frac{1}{2} [m^2 (s + 1)^2 + m (s + 1)(2t - s - 1) - t (2s - t + 3)] ,
+       \frac{1}{2} [m^2 (s + 1)^2 + m (s + 1)(2t - s - 1) - t (2s - t + 3)] ,

     where

     .. math::

-        s = (n - 1) \\div m , t = (n - 1) \\mod m ,
+        s = (n - 1) \div m , t = (n - 1) \mod m ,

     and nodes in `V` are normalized by dividing by

     .. math::

-        \\frac{1}{2} [n^2 (p + 1)^2 + n (p + 1)(2r - p - 1) - r (2p - r + 3)] ,
+        \frac{1}{2} [n^2 (p + 1)^2 + n (p + 1)(2r - p - 1) - r (2p - r + 3)] ,

     where,

     .. math::

-        p = (m - 1) \\div n , r = (m - 1) \\mod n .
+        p = (m - 1) \div n , r = (m - 1) \mod n .

     Parameters
     ----------
@@ -152,12 +158,33 @@ def betweenness_centrality(G, nodes):
         of Social Network Analysis. Sage Publications.
         https://dx.doi.org/10.4135/9781446294413.n28
     """
-    pass
-
-
-@nx._dispatchable(name='bipartite_closeness_centrality')
+    top = set(nodes)
+    bottom = set(G) - top
+    n = len(top)
+    m = len(bottom)
+    s, t = divmod(n - 1, m)
+    bet_max_top = (
+        ((m**2) * ((s + 1) ** 2))
+        + (m * (s + 1) * (2 * t - s - 1))
+        - (t * ((2 * s) - t + 3))
+    ) / 2.0
+    p, r = divmod(m - 1, n)
+    bet_max_bot = (
+        ((n**2) * ((p + 1) ** 2))
+        + (n * (p + 1) * (2 * r - p - 1))
+        - (r * ((2 * p) - r + 3))
+    ) / 2.0
+    betweenness = nx.betweenness_centrality(G, normalized=False, weight=None)
+    for node in top:
+        betweenness[node] /= bet_max_top
+    for node in bottom:
+        betweenness[node] /= bet_max_bot
+    return betweenness
+
+
+@nx._dispatchable(name="bipartite_closeness_centrality")
 def closeness_centrality(G, nodes, normalized=True):
-    """Compute the closeness centrality for nodes in a bipartite network.
+    r"""Compute the closeness centrality for nodes in a bipartite network.

     The closeness of a node is the distance to all other nodes in the
     graph or in the case that the graph is not connected to all other nodes
@@ -211,9 +238,9 @@ def closeness_centrality(G, nodes, normalized=True):

     .. math::

-        c_{v} = \\frac{m + 2(n - 1)}{d}, \\mbox{for} v \\in U,
+        c_{v} = \frac{m + 2(n - 1)}{d}, \mbox{for} v \in U,

-        c_{v} = \\frac{n + 2(m - 1)}{d}, \\mbox{for} v \\in V,
+        c_{v} = \frac{n + 2(m - 1)}{d}, \mbox{for} v \in V,

     where `d` is the sum of the distances from `v` to all
     other nodes.
@@ -234,4 +261,30 @@ def closeness_centrality(G, nodes, normalized=True):
         of Social Network Analysis. Sage Publications.
         https://dx.doi.org/10.4135/9781446294413.n28
     """
-    pass
+    closeness = {}
+    path_length = nx.single_source_shortest_path_length
+    top = set(nodes)
+    bottom = set(G) - top
+    n = len(top)
+    m = len(bottom)
+    for node in top:
+        sp = dict(path_length(G, node))
+        totsp = sum(sp.values())
+        if totsp > 0.0 and len(G) > 1:
+            closeness[node] = (m + 2 * (n - 1)) / totsp
+            if normalized:
+                s = (len(sp) - 1) / (len(G) - 1)
+                closeness[node] *= s
+        else:
+            closeness[node] = 0.0
+    for node in bottom:
+        sp = dict(path_length(G, node))
+        totsp = sum(sp.values())
+        if totsp > 0.0 and len(G) > 1:
+            closeness[node] = (n + 2 * (m - 1)) / totsp
+            if normalized:
+                s = (len(sp) - 1) / (len(G) - 1)
+                closeness[node] *= s
+        else:
+            closeness[node] = 0.0
+    return closeness
diff --git a/networkx/algorithms/bipartite/cluster.py b/networkx/algorithms/bipartite/cluster.py
index 56e5d1c32..d96115277 100644
--- a/networkx/algorithms/bipartite/cluster.py
+++ b/networkx/algorithms/bipartite/cluster.py
@@ -1,23 +1,44 @@
 """Functions for computing clustering of pairs

 """
+
 import itertools
+
 import networkx as nx
-__all__ = ['clustering', 'average_clustering', 'latapy_clustering',
-    'robins_alexander_clustering']
-modes = {'dot': cc_dot, 'min': cc_min, 'max': cc_max}
+
+__all__ = [
+    "clustering",
+    "average_clustering",
+    "latapy_clustering",
+    "robins_alexander_clustering",
+]
+
+
+def cc_dot(nu, nv):
+    return len(nu & nv) / len(nu | nv)
+
+
+def cc_max(nu, nv):
+    return len(nu & nv) / max(len(nu), len(nv))
+
+
+def cc_min(nu, nv):
+    return len(nu & nv) / min(len(nu), len(nv))
+
+
+modes = {"dot": cc_dot, "min": cc_min, "max": cc_max}


 @nx._dispatchable
-def latapy_clustering(G, nodes=None, mode='dot'):
-    """Compute a bipartite clustering coefficient for nodes.
+def latapy_clustering(G, nodes=None, mode="dot"):
+    r"""Compute a bipartite clustering coefficient for nodes.

     The bipartite clustering coefficient is a measure of local density
     of connections defined as [1]_:

     .. math::

-       c_u = \\frac{\\sum_{v \\in N(N(u))} c_{uv} }{|N(N(u))|}
+       c_u = \frac{\sum_{v \in N(N(u))} c_{uv} }{|N(N(u))|}

     where `N(N(u))` are the second order neighbors of `u` in `G` excluding `u`,
     and `c_{uv}` is the pairwise clustering coefficient between nodes
@@ -29,19 +50,19 @@ def latapy_clustering(G, nodes=None, mode='dot'):

     .. math::

-       c_{uv}=\\frac{|N(u)\\cap N(v)|}{|N(u) \\cup N(v)|}
+       c_{uv}=\frac{|N(u)\cap N(v)|}{|N(u) \cup N(v)|}

     `min`:

     .. math::

-       c_{uv}=\\frac{|N(u)\\cap N(v)|}{min(|N(u)|,|N(v)|)}
+       c_{uv}=\frac{|N(u)\cap N(v)|}{min(|N(u)|,|N(v)|)}

     `max`:

     .. math::

-       c_{uv}=\\frac{|N(u)\\cap N(v)|}{max(|N(u)|,|N(v)|)}
+       c_{uv}=\frac{|N(u)\cap N(v)|}{max(|N(u)|,|N(v)|)}


     Parameters
@@ -86,21 +107,42 @@ def latapy_clustering(G, nodes=None, mode='dot'):
        Basic notions for the analysis of large two-mode networks.
        Social Networks 30(1), 31--48.
     """
-    pass
+    if not nx.algorithms.bipartite.is_bipartite(G):
+        raise nx.NetworkXError("Graph is not bipartite")
+
+    try:
+        cc_func = modes[mode]
+    except KeyError as err:
+        raise nx.NetworkXError(
+            "Mode for bipartite clustering must be: dot, min or max"
+        ) from err
+
+    if nodes is None:
+        nodes = G
+    ccs = {}
+    for v in nodes:
+        cc = 0.0
+        nbrs2 = {u for nbr in G[v] for u in G[nbr]} - {v}
+        for u in nbrs2:
+            cc += cc_func(set(G[u]), set(G[v]))
+        if cc > 0.0:  # len(nbrs2)>0
+            cc /= len(nbrs2)
+        ccs[v] = cc
+    return ccs


 clustering = latapy_clustering


-@nx._dispatchable(name='bipartite_average_clustering')
-def average_clustering(G, nodes=None, mode='dot'):
-    """Compute the average bipartite clustering coefficient.
+@nx._dispatchable(name="bipartite_average_clustering")
+def average_clustering(G, nodes=None, mode="dot"):
+    r"""Compute the average bipartite clustering coefficient.

     A clustering coefficient for the whole graph is the average,

     .. math::

-       C = \\frac{1}{n}\\sum_{v \\in G} c_v,
+       C = \frac{1}{n}\sum_{v \in G} c_v,

     where `n` is the number of nodes in `G`.

@@ -108,7 +150,7 @@ def average_clustering(G, nodes=None, mode='dot'):

     .. math::

-       C_X = \\frac{1}{|X|}\\sum_{v \\in X} c_v,
+       C_X = \frac{1}{|X|}\sum_{v \in X} c_v,

     where `X` is a bipartite set of `G`.

@@ -163,12 +205,15 @@ def average_clustering(G, nodes=None, mode='dot'):
         Basic notions for the analysis of large two-mode networks.
         Social Networks 30(1), 31--48.
     """
-    pass
+    if nodes is None:
+        nodes = G
+    ccs = latapy_clustering(G, nodes=nodes, mode=mode)
+    return sum(ccs[v] for v in nodes) / len(nodes)


 @nx._dispatchable
 def robins_alexander_clustering(G):
-    """Compute the bipartite clustering of G.
+    r"""Compute the bipartite clustering of G.

     Robins and Alexander [1]_ defined bipartite clustering coefficient as
     four times the number of four cycles `C_4` divided by the number of
@@ -176,7 +221,7 @@ def robins_alexander_clustering(G):

     .. math::

-       CC_4 = \\frac{4 * C_4}{L_3}
+       CC_4 = \frac{4 * C_4}{L_3}

     Parameters
     ----------
@@ -207,4 +252,29 @@ def robins_alexander_clustering(G):
            Computational & Mathematical Organization Theory 10(1), 69–94.

     """
-    pass
+    if G.order() < 4 or G.size() < 3:
+        return 0
+    L_3 = _threepaths(G)
+    if L_3 == 0:
+        return 0
+    C_4 = _four_cycles(G)
+    return (4.0 * C_4) / L_3
+
+
+def _four_cycles(G):
+    cycles = 0
+    for v in G:
+        for u, w in itertools.combinations(G[v], 2):
+            cycles += len((set(G[u]) & set(G[w])) - {v})
+    return cycles / 4
+
+
+def _threepaths(G):
+    paths = 0
+    for v in G:
+        for u in G[v]:
+            for w in set(G[u]) - {v}:
+                paths += len(set(G[w]) - {v, u})
+    # Divide by two because we count each three path twice
+    # one for each possible starting point
+    return paths / 2
diff --git a/networkx/algorithms/bipartite/covering.py b/networkx/algorithms/bipartite/covering.py
index 39dbf9ba6..720c63ac4 100644
--- a/networkx/algorithms/bipartite/covering.py
+++ b/networkx/algorithms/bipartite/covering.py
@@ -1,14 +1,16 @@
 """ Functions related to graph covers."""
+
 import networkx as nx
 from networkx.algorithms.bipartite.matching import hopcroft_karp_matching
 from networkx.algorithms.covering import min_edge_cover as _min_edge_cover
 from networkx.utils import not_implemented_for
-__all__ = ['min_edge_cover']
+
+__all__ = ["min_edge_cover"]


-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
-@nx._dispatchable(name='bipartite_min_edge_cover')
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
+@nx._dispatchable(name="bipartite_min_edge_cover")
 def min_edge_cover(G, matching_algorithm=None):
     """Returns a set of edges which constitutes
     the minimum edge cover of the graph.
@@ -48,4 +50,8 @@ def min_edge_cover(G, matching_algorithm=None):
     is bounded by the worst-case running time of the function
     ``matching_algorithm``.
     """
-    pass
+    if G.order() == 0:  # Special case for the empty graph
+        return set()
+    if matching_algorithm is None:
+        matching_algorithm = hopcroft_karp_matching
+    return _min_edge_cover(G, matching_algorithm=matching_algorithm)
diff --git a/networkx/algorithms/bipartite/edgelist.py b/networkx/algorithms/bipartite/edgelist.py
index 252a14b18..70631ea0e 100644
--- a/networkx/algorithms/bipartite/edgelist.py
+++ b/networkx/algorithms/bipartite/edgelist.py
@@ -22,15 +22,14 @@ Arbitrary data::

 For each edge (u, v) the node u is assigned to part 0 and the node v to part 1.
 """
-__all__ = ['generate_edgelist', 'write_edgelist', 'parse_edgelist',
-    'read_edgelist']
+__all__ = ["generate_edgelist", "write_edgelist", "parse_edgelist", "read_edgelist"]
+
 import networkx as nx
 from networkx.utils import not_implemented_for, open_file


-@open_file(1, mode='wb')
-def write_edgelist(G, path, comments='#', delimiter=' ', data=True,
-    encoding='utf-8'):
+@open_file(1, mode="wb")
+def write_edgelist(G, path, comments="#", delimiter=" ", data=True, encoding="utf-8"):
     """Write a bipartite graph as a list of edges.

     Parameters
@@ -74,11 +73,13 @@ def write_edgelist(G, path, comments='#', delimiter=' ', data=True,
     write_edgelist
     generate_edgelist
     """
-    pass
+    for line in generate_edgelist(G, delimiter, data):
+        line += "\n"
+        path.write(line.encode(encoding))


-@not_implemented_for('directed')
-def generate_edgelist(G, delimiter=' ', data=True):
+@not_implemented_for("directed")
+def generate_edgelist(G, delimiter=" ", data=True):
     """Generate a single line of the bipartite graph G in edge list format.

     Parameters
@@ -126,13 +127,29 @@ def generate_edgelist(G, delimiter=' ', data=True):
     2 1 3
     2 3
     """
-    pass
-
-
-@nx._dispatchable(name='bipartite_parse_edgelist', graphs=None,
-    returns_graph=True)
-def parse_edgelist(lines, comments='#', delimiter=None, create_using=None,
-    nodetype=None, data=True):
+    try:
+        part0 = [n for n, d in G.nodes.items() if d["bipartite"] == 0]
+    except BaseException as err:
+        raise AttributeError("Missing node attribute `bipartite`") from err
+    if data is True or data is False:
+        for n in part0:
+            for edge in G.edges(n, data=data):
+                yield delimiter.join(map(str, edge))
+    else:
+        for n in part0:
+            for u, v, d in G.edges(n, data=True):
+                edge = [u, v]
+                try:
+                    edge.extend(d[k] for k in data)
+                except KeyError:
+                    pass  # missing data for this edge, should warn?
+                yield delimiter.join(map(str, edge))
+
+
+@nx._dispatchable(name="bipartite_parse_edgelist", graphs=None, returns_graph=True)
+def parse_edgelist(
+    lines, comments="#", delimiter=None, create_using=None, nodetype=None, data=True
+):
     """Parse lines of an edge list representation of a bipartite graph.

     Parameters
@@ -192,14 +209,76 @@ def parse_edgelist(lines, comments='#', delimiter=None, create_using=None,
     See Also
     --------
     """
-    pass
-
-
-@open_file(0, mode='rb')
-@nx._dispatchable(name='bipartite_read_edgelist', graphs=None,
-    returns_graph=True)
-def read_edgelist(path, comments='#', delimiter=None, create_using=None,
-    nodetype=None, data=True, edgetype=None, encoding='utf-8'):
+    from ast import literal_eval
+
+    G = nx.empty_graph(0, create_using)
+    for line in lines:
+        p = line.find(comments)
+        if p >= 0:
+            line = line[:p]
+        if not len(line):
+            continue
+        # split line, should have 2 or more
+        s = line.strip().split(delimiter)
+        if len(s) < 2:
+            continue
+        u = s.pop(0)
+        v = s.pop(0)
+        d = s
+        if nodetype is not None:
+            try:
+                u = nodetype(u)
+                v = nodetype(v)
+            except BaseException as err:
+                raise TypeError(
+                    f"Failed to convert nodes {u},{v} to type {nodetype}."
+                ) from err
+
+        if len(d) == 0 or data is False:
+            # no data or data type specified
+            edgedata = {}
+        elif data is True:
+            # no edge types specified
+            try:  # try to evaluate as dictionary
+                edgedata = dict(literal_eval(" ".join(d)))
+            except BaseException as err:
+                raise TypeError(
+                    f"Failed to convert edge data ({d}) to dictionary."
+                ) from err
+        else:
+            # convert edge data to dictionary with specified keys and type
+            if len(d) != len(data):
+                raise IndexError(
+                    f"Edge data {d} and data_keys {data} are not the same length"
+                )
+            edgedata = {}
+            for (edge_key, edge_type), edge_value in zip(data, d):
+                try:
+                    edge_value = edge_type(edge_value)
+                except BaseException as err:
+                    raise TypeError(
+                        f"Failed to convert {edge_key} data "
+                        f"{edge_value} to type {edge_type}."
+                    ) from err
+                edgedata.update({edge_key: edge_value})
+        G.add_node(u, bipartite=0)
+        G.add_node(v, bipartite=1)
+        G.add_edge(u, v, **edgedata)
+    return G
+
+
+@open_file(0, mode="rb")
+@nx._dispatchable(name="bipartite_read_edgelist", graphs=None, returns_graph=True)
+def read_edgelist(
+    path,
+    comments="#",
+    delimiter=None,
+    create_using=None,
+    nodetype=None,
+    data=True,
+    edgetype=None,
+    encoding="utf-8",
+):
     """Read a bipartite graph from a list of edges.

     Parameters
@@ -269,4 +348,12 @@ def read_edgelist(path, comments='#', delimiter=None, create_using=None,
     Since nodes must be hashable, the function nodetype must return hashable
     types (e.g. int, float, str, frozenset - or tuples of those, etc.)
     """
-    pass
+    lines = (line.decode(encoding) for line in path)
+    return parse_edgelist(
+        lines,
+        comments=comments,
+        delimiter=delimiter,
+        create_using=create_using,
+        nodetype=nodetype,
+        data=data,
+    )
diff --git a/networkx/algorithms/bipartite/extendability.py b/networkx/algorithms/bipartite/extendability.py
index 84e5c3c5c..0764997ad 100644
--- a/networkx/algorithms/bipartite/extendability.py
+++ b/networkx/algorithms/bipartite/extendability.py
@@ -1,12 +1,15 @@
 """ Provides a function for computing the extendability of a graph which is
 undirected, simple, connected and bipartite and contains at least one perfect matching."""
+
+
 import networkx as nx
 from networkx.utils import not_implemented_for
-__all__ = ['maximal_extendability']
+
+__all__ = ["maximal_extendability"]


-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def maximal_extendability(G):
     """Computes the extendability of a graph.
@@ -63,4 +66,41 @@ def maximal_extendability(G):
           https://doi.org/10.1016/0012-365X(80)90037-0

     """
-    pass
+    if not nx.is_connected(G):
+        raise nx.NetworkXError("Graph G is not connected")
+
+    if not nx.bipartite.is_bipartite(G):
+        raise nx.NetworkXError("Graph G is not bipartite")
+
+    U, V = nx.bipartite.sets(G)
+
+    maximum_matching = nx.bipartite.hopcroft_karp_matching(G)
+
+    if not nx.is_perfect_matching(G, maximum_matching):
+        raise nx.NetworkXError("Graph G does not contain a perfect matching")
+
+    # list of edges in perfect matching, directed from V to U
+    pm = [(node, maximum_matching[node]) for node in V & maximum_matching.keys()]
+
+    # Direct all the edges of G, from V to U if in matching, else from U to V
+    directed_edges = [
+        (x, y) if (x in V and (x, y) in pm) or (x in U and (y, x) not in pm) else (y, x)
+        for x, y in G.edges
+    ]
+
+    # Construct the residual graph of G
+    residual_G = nx.DiGraph()
+    residual_G.add_nodes_from(G)
+    residual_G.add_edges_from(directed_edges)
+
+    if not nx.is_strongly_connected(residual_G):
+        raise nx.NetworkXError("The residual graph of G is not strongly connected")
+
+    # For node-pairs between V & U, keep min of max number of node-disjoint paths
+    # Variable $k$ stands for the extendability of graph G
+    k = float("inf")
+    for u in U:
+        for v in V:
+            num_paths = sum(1 for _ in nx.node_disjoint_paths(residual_G, u, v))
+            k = k if k < num_paths else num_paths
+    return k
diff --git a/networkx/algorithms/bipartite/generators.py b/networkx/algorithms/bipartite/generators.py
index f5d313d58..de6f07972 100644
--- a/networkx/algorithms/bipartite/generators.py
+++ b/networkx/algorithms/bipartite/generators.py
@@ -4,12 +4,20 @@ Generators and functions for bipartite graphs.
 import math
 import numbers
 from functools import reduce
+
 import networkx as nx
 from networkx.utils import nodes_or_number, py_random_state
-__all__ = ['configuration_model', 'havel_hakimi_graph',
-    'reverse_havel_hakimi_graph', 'alternating_havel_hakimi_graph',
-    'preferential_attachment_graph', 'random_graph', 'gnmk_random_graph',
-    'complete_bipartite_graph']
+
+__all__ = [
+    "configuration_model",
+    "havel_hakimi_graph",
+    "reverse_havel_hakimi_graph",
+    "alternating_havel_hakimi_graph",
+    "preferential_attachment_graph",
+    "random_graph",
+    "gnmk_random_graph",
+    "complete_bipartite_graph",
+]


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -41,12 +49,25 @@ def complete_bipartite_graph(n1, n2, create_using=None):
     This function is not imported in the main namespace.
     To use it use nx.bipartite.complete_bipartite_graph
     """
-    pass
+    G = nx.empty_graph(0, create_using)
+    if G.is_directed():
+        raise nx.NetworkXError("Directed Graph not supported")
+
+    n1, top = n1
+    n2, bottom = n2
+    if isinstance(n1, numbers.Integral) and isinstance(n2, numbers.Integral):
+        bottom = [n1 + i for i in bottom]
+    G.add_nodes_from(top, bipartite=0)
+    G.add_nodes_from(bottom, bipartite=1)
+    if len(G) != len(top) + len(bottom):
+        raise nx.NetworkXError("Inputs n1 and n2 must contain distinct nodes")
+    G.add_edges_from((u, v) for u in top for v in bottom)
+    G.graph["name"] = f"complete_bipartite_graph({n1}, {n2})"
+    return G


 @py_random_state(3)
-@nx._dispatchable(name='bipartite_configuration_model', graphs=None,
-    returns_graph=True)
+@nx._dispatchable(name="bipartite_configuration_model", graphs=None, returns_graph=True)
 def configuration_model(aseq, bseq, create_using=None, seed=None):
     """Returns a random bipartite graph from two given degree sequences.

@@ -80,11 +101,44 @@ def configuration_model(aseq, bseq, create_using=None, seed=None):
     This function is not imported in the main namespace.
     To use it use nx.bipartite.configuration_model
     """
-    pass
+    G = nx.empty_graph(0, create_using, default=nx.MultiGraph)
+    if G.is_directed():
+        raise nx.NetworkXError("Directed Graph not supported")
+
+    # length and sum of each sequence
+    lena = len(aseq)
+    lenb = len(bseq)
+    suma = sum(aseq)
+    sumb = sum(bseq)
+
+    if not suma == sumb:
+        raise nx.NetworkXError(
+            f"invalid degree sequences, sum(aseq)!=sum(bseq),{suma},{sumb}"
+        )
+
+    G = _add_nodes_with_bipartite_label(G, lena, lenb)
+
+    if len(aseq) == 0 or max(aseq) == 0:
+        return G  # done if no edges
+
+    # build lists of degree-repeated vertex numbers
+    stubs = [[v] * aseq[v] for v in range(lena)]
+    astubs = [x for subseq in stubs for x in subseq]
+
+    stubs = [[v] * bseq[v - lena] for v in range(lena, lena + lenb)]
+    bstubs = [x for subseq in stubs for x in subseq]
+
+    # shuffle lists
+    seed.shuffle(astubs)
+    seed.shuffle(bstubs)
+
+    G.add_edges_from([astubs[i], bstubs[i]] for i in range(suma))
+
+    G.name = "bipartite_configuration_model"
+    return G


-@nx._dispatchable(name='bipartite_havel_hakimi_graph', graphs=None,
-    returns_graph=True)
+@nx._dispatchable(name="bipartite_havel_hakimi_graph", graphs=None, returns_graph=True)
 def havel_hakimi_graph(aseq, bseq, create_using=None):
     """Returns a bipartite graph from two given degree sequences using a
     Havel-Hakimi style construction.
@@ -117,7 +171,46 @@ def havel_hakimi_graph(aseq, bseq, create_using=None):
     This function is not imported in the main namespace.
     To use it use nx.bipartite.havel_hakimi_graph
     """
-    pass
+    G = nx.empty_graph(0, create_using, default=nx.MultiGraph)
+    if G.is_directed():
+        raise nx.NetworkXError("Directed Graph not supported")
+
+    # length of the each sequence
+    naseq = len(aseq)
+    nbseq = len(bseq)
+
+    suma = sum(aseq)
+    sumb = sum(bseq)
+
+    if not suma == sumb:
+        raise nx.NetworkXError(
+            f"invalid degree sequences, sum(aseq)!=sum(bseq),{suma},{sumb}"
+        )
+
+    G = _add_nodes_with_bipartite_label(G, naseq, nbseq)
+
+    if len(aseq) == 0 or max(aseq) == 0:
+        return G  # done if no edges
+
+    # build list of degree-repeated vertex numbers
+    astubs = [[aseq[v], v] for v in range(naseq)]
+    bstubs = [[bseq[v - naseq], v] for v in range(naseq, naseq + nbseq)]
+    astubs.sort()
+    while astubs:
+        (degree, u) = astubs.pop()  # take of largest degree node in the a set
+        if degree == 0:
+            break  # done, all are zero
+        # connect the source to largest degree nodes in the b set
+        bstubs.sort()
+        for target in bstubs[-degree:]:
+            v = target[1]
+            G.add_edge(u, v)
+            target[0] -= 1  # note this updates bstubs too.
+            if target[0] == 0:
+                bstubs.remove(target)
+
+    G.name = "bipartite_havel_hakimi_graph"
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -153,7 +246,45 @@ def reverse_havel_hakimi_graph(aseq, bseq, create_using=None):
     This function is not imported in the main namespace.
     To use it use nx.bipartite.reverse_havel_hakimi_graph
     """
-    pass
+    G = nx.empty_graph(0, create_using, default=nx.MultiGraph)
+    if G.is_directed():
+        raise nx.NetworkXError("Directed Graph not supported")
+
+    # length of the each sequence
+    lena = len(aseq)
+    lenb = len(bseq)
+    suma = sum(aseq)
+    sumb = sum(bseq)
+
+    if not suma == sumb:
+        raise nx.NetworkXError(
+            f"invalid degree sequences, sum(aseq)!=sum(bseq),{suma},{sumb}"
+        )
+
+    G = _add_nodes_with_bipartite_label(G, lena, lenb)
+
+    if len(aseq) == 0 or max(aseq) == 0:
+        return G  # done if no edges
+
+    # build list of degree-repeated vertex numbers
+    astubs = [[aseq[v], v] for v in range(lena)]
+    bstubs = [[bseq[v - lena], v] for v in range(lena, lena + lenb)]
+    astubs.sort()
+    bstubs.sort()
+    while astubs:
+        (degree, u) = astubs.pop()  # take of largest degree node in the a set
+        if degree == 0:
+            break  # done, all are zero
+        # connect the source to the smallest degree nodes in the b set
+        for target in bstubs[0:degree]:
+            v = target[1]
+            G.add_edge(u, v)
+            target[0] -= 1  # note this updates bstubs too.
+            if target[0] == 0:
+                bstubs.remove(target)
+
+    G.name = "bipartite_reverse_havel_hakimi_graph"
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -190,7 +321,48 @@ def alternating_havel_hakimi_graph(aseq, bseq, create_using=None):
     This function is not imported in the main namespace.
     To use it use nx.bipartite.alternating_havel_hakimi_graph
     """
-    pass
+    G = nx.empty_graph(0, create_using, default=nx.MultiGraph)
+    if G.is_directed():
+        raise nx.NetworkXError("Directed Graph not supported")
+
+    # length of the each sequence
+    naseq = len(aseq)
+    nbseq = len(bseq)
+    suma = sum(aseq)
+    sumb = sum(bseq)
+
+    if not suma == sumb:
+        raise nx.NetworkXError(
+            f"invalid degree sequences, sum(aseq)!=sum(bseq),{suma},{sumb}"
+        )
+
+    G = _add_nodes_with_bipartite_label(G, naseq, nbseq)
+
+    if len(aseq) == 0 or max(aseq) == 0:
+        return G  # done if no edges
+    # build list of degree-repeated vertex numbers
+    astubs = [[aseq[v], v] for v in range(naseq)]
+    bstubs = [[bseq[v - naseq], v] for v in range(naseq, naseq + nbseq)]
+    while astubs:
+        astubs.sort()
+        (degree, u) = astubs.pop()  # take of largest degree node in the a set
+        if degree == 0:
+            break  # done, all are zero
+        bstubs.sort()
+        small = bstubs[0 : degree // 2]  # add these low degree targets
+        large = bstubs[(-degree + degree // 2) :]  # now high degree targets
+        stubs = [x for z in zip(large, small) for x in z]  # combine, sorry
+        if len(stubs) < len(small) + len(large):  # check for zip truncation
+            stubs.append(large.pop())
+        for target in stubs:
+            v = target[1]
+            G.add_edge(u, v)
+            target[0] -= 1  # note this updates bstubs too.
+            if target[0] == 0:
+                bstubs.remove(target)
+
+    G.name = "bipartite_alternating_havel_hakimi_graph"
+    return G


 @py_random_state(3)
@@ -234,7 +406,35 @@ def preferential_attachment_graph(aseq, p, create_using=None, seed=None):
     This function is not imported in the main namespace.
     To use it use nx.bipartite.preferential_attachment_graph
     """
-    pass
+    G = nx.empty_graph(0, create_using, default=nx.MultiGraph)
+    if G.is_directed():
+        raise nx.NetworkXError("Directed Graph not supported")
+
+    if p > 1:
+        raise nx.NetworkXError(f"probability {p} > 1")
+
+    naseq = len(aseq)
+    G = _add_nodes_with_bipartite_label(G, naseq, 0)
+    vv = [[v] * aseq[v] for v in range(naseq)]
+    while vv:
+        while vv[0]:
+            source = vv[0][0]
+            vv[0].remove(source)
+            if seed.random() < p or len(G) == naseq:
+                target = len(G)
+                G.add_node(target, bipartite=1)
+                G.add_edge(source, target)
+            else:
+                bb = [[b] * G.degree(b) for b in range(naseq, len(G))]
+                # flatten the list of lists into a list.
+                bbstubs = reduce(lambda x, y: x + y, bb)
+                # choose preferentially a bottom node.
+                target = seed.choice(bbstubs)
+                G.add_node(target, bipartite=1)
+                G.add_edge(source, target)
+        vv.remove(vv[0])
+    G.name = "bipartite_preferential_attachment_model"
+    return G


 @py_random_state(3)
@@ -283,7 +483,45 @@ def random_graph(n, m, p, seed=None, directed=False):
        "Efficient generation of large random networks",
        Phys. Rev. E, 71, 036113, 2005.
     """
-    pass
+    G = nx.Graph()
+    G = _add_nodes_with_bipartite_label(G, n, m)
+    if directed:
+        G = nx.DiGraph(G)
+    G.name = f"fast_gnp_random_graph({n},{m},{p})"
+
+    if p <= 0:
+        return G
+    if p >= 1:
+        return nx.complete_bipartite_graph(n, m)
+
+    lp = math.log(1.0 - p)
+
+    v = 0
+    w = -1
+    while v < n:
+        lr = math.log(1.0 - seed.random())
+        w = w + 1 + int(lr / lp)
+        while w >= m and v < n:
+            w = w - m
+            v = v + 1
+        if v < n:
+            G.add_edge(v, n + w)
+
+    if directed:
+        # use the same algorithm to
+        # add edges from the "m" to "n" set
+        v = 0
+        w = -1
+        while v < n:
+            lr = math.log(1.0 - seed.random())
+            w = w + 1 + int(lr / lp)
+            while w >= m and v < n:
+                w = w - m
+                v = v + 1
+            if v < n:
+                G.add_edge(n + w, v)
+
+    return G


 @py_random_state(3)
@@ -331,4 +569,35 @@ def gnmk_random_graph(n, m, k, seed=None, directed=False):
     This function is not imported in the main namespace.
     To use it use nx.bipartite.gnmk_random_graph
     """
-    pass
+    G = nx.Graph()
+    G = _add_nodes_with_bipartite_label(G, n, m)
+    if directed:
+        G = nx.DiGraph(G)
+    G.name = f"bipartite_gnm_random_graph({n},{m},{k})"
+    if n == 1 or m == 1:
+        return G
+    max_edges = n * m  # max_edges for bipartite networks
+    if k >= max_edges:  # Maybe we should raise an exception here
+        return nx.complete_bipartite_graph(n, m, create_using=G)
+
+    top = [n for n, d in G.nodes(data=True) if d["bipartite"] == 0]
+    bottom = list(set(G) - set(top))
+    edge_count = 0
+    while edge_count < k:
+        # generate random edge,u,v
+        u = seed.choice(top)
+        v = seed.choice(bottom)
+        if v in G[u]:
+            continue
+        else:
+            G.add_edge(u, v)
+            edge_count += 1
+    return G
+
+
+def _add_nodes_with_bipartite_label(G, lena, lenb):
+    G.add_nodes_from(range(lena + lenb))
+    b = dict(zip(range(lena), [0] * lena))
+    b.update(dict(zip(range(lena, lena + lenb), [1] * lenb)))
+    nx.set_node_attributes(G, b, "bipartite")
+    return G
diff --git a/networkx/algorithms/bipartite/matching.py b/networkx/algorithms/bipartite/matching.py
index 931b37b2f..48149ab9e 100644
--- a/networkx/algorithms/bipartite/matching.py
+++ b/networkx/algorithms/bipartite/matching.py
@@ -1,3 +1,13 @@
+# This module uses material from the Wikipedia article Hopcroft--Karp algorithm
+# <https://en.wikipedia.org/wiki/Hopcroft%E2%80%93Karp_algorithm>, accessed on
+# January 3, 2015, which is released under the Creative Commons
+# Attribution-Share-Alike License 3.0
+# <http://creativecommons.org/licenses/by-sa/3.0/>. That article includes
+# pseudocode, which has been translated into the corresponding Python code.
+#
+# Portions of this module use code from David Eppstein's Python Algorithms and
+# Data Structures (PADS) library, which is dedicated to the public domain (for
+# proof, see <http://www.ics.uci.edu/~eppstein/PADS/ABOUT-PADS.txt>).
 """Provides functions for computing maximum cardinality matchings and minimum
 weight full matchings in a bipartite graph.

@@ -28,12 +38,20 @@ edges included in the matching is minimal.
 """
 import collections
 import itertools
+
 import networkx as nx
 from networkx.algorithms.bipartite import sets as bipartite_sets
 from networkx.algorithms.bipartite.matrix import biadjacency_matrix
-__all__ = ['maximum_matching', 'hopcroft_karp_matching',
-    'eppstein_matching', 'to_vertex_cover', 'minimum_weight_full_matching']
-INFINITY = float('inf')
+
+__all__ = [
+    "maximum_matching",
+    "hopcroft_karp_matching",
+    "eppstein_matching",
+    "to_vertex_cover",
+    "minimum_weight_full_matching",
+]
+
+INFINITY = float("inf")


 @nx._dispatchable
@@ -98,7 +116,69 @@ def hopcroft_karp_matching(G, top_nodes=None):
        2.4 (1973), pp. 225--231. <https://doi.org/10.1137/0202019>.

     """
-    pass
+
+    # First we define some auxiliary search functions.
+    #
+    # If you are a human reading these auxiliary search functions, the "global"
+    # variables `leftmatches`, `rightmatches`, `distances`, etc. are defined
+    # below the functions, so that they are initialized close to the initial
+    # invocation of the search functions.
+    def breadth_first_search():
+        for v in left:
+            if leftmatches[v] is None:
+                distances[v] = 0
+                queue.append(v)
+            else:
+                distances[v] = INFINITY
+        distances[None] = INFINITY
+        while queue:
+            v = queue.popleft()
+            if distances[v] < distances[None]:
+                for u in G[v]:
+                    if distances[rightmatches[u]] is INFINITY:
+                        distances[rightmatches[u]] = distances[v] + 1
+                        queue.append(rightmatches[u])
+        return distances[None] is not INFINITY
+
+    def depth_first_search(v):
+        if v is not None:
+            for u in G[v]:
+                if distances[rightmatches[u]] == distances[v] + 1:
+                    if depth_first_search(rightmatches[u]):
+                        rightmatches[u] = v
+                        leftmatches[v] = u
+                        return True
+            distances[v] = INFINITY
+            return False
+        return True
+
+    # Initialize the "global" variables that maintain state during the search.
+    left, right = bipartite_sets(G, top_nodes)
+    leftmatches = {v: None for v in left}
+    rightmatches = {v: None for v in right}
+    distances = {}
+    queue = collections.deque()
+
+    # Implementation note: this counter is incremented as pairs are matched but
+    # it is currently not used elsewhere in the computation.
+    num_matched_pairs = 0
+    while breadth_first_search():
+        for v in left:
+            if leftmatches[v] is None:
+                if depth_first_search(v):
+                    num_matched_pairs += 1
+
+    # Strip the entries matched to `None`.
+    leftmatches = {k: v for k, v in leftmatches.items() if v is not None}
+    rightmatches = {k: v for k, v in rightmatches.items() if v is not None}
+
+    # At this point, the left matches and the right matches are inverses of one
+    # another. In other words,
+    #
+    #     leftmatches == {v, k for k, v in rightmatches.items()}
+    #
+    # Finally, we combine both the left matches and right matches.
+    return dict(itertools.chain(leftmatches.items(), rightmatches.items()))


 @nx._dispatchable
@@ -149,11 +229,98 @@ def eppstein_matching(G, top_nodes=None):
     hopcroft_karp_matching

     """
-    pass
-
-
-def _is_connected_by_alternating_path(G, v, matched_edges, unmatched_edges,
-    targets):
+    # Due to its original implementation, a directed graph is needed
+    # so that the two sets of bipartite nodes can be distinguished
+    left, right = bipartite_sets(G, top_nodes)
+    G = nx.DiGraph(G.edges(left))
+    # initialize greedy matching (redundant, but faster than full search)
+    matching = {}
+    for u in G:
+        for v in G[u]:
+            if v not in matching:
+                matching[v] = u
+                break
+    while True:
+        # structure residual graph into layers
+        # pred[u] gives the neighbor in the previous layer for u in U
+        # preds[v] gives a list of neighbors in the previous layer for v in V
+        # unmatched gives a list of unmatched vertices in final layer of V,
+        # and is also used as a flag value for pred[u] when u is in the first
+        # layer
+        preds = {}
+        unmatched = []
+        pred = {u: unmatched for u in G}
+        for v in matching:
+            del pred[matching[v]]
+        layer = list(pred)
+
+        # repeatedly extend layering structure by another pair of layers
+        while layer and not unmatched:
+            newLayer = {}
+            for u in layer:
+                for v in G[u]:
+                    if v not in preds:
+                        newLayer.setdefault(v, []).append(u)
+            layer = []
+            for v in newLayer:
+                preds[v] = newLayer[v]
+                if v in matching:
+                    layer.append(matching[v])
+                    pred[matching[v]] = v
+                else:
+                    unmatched.append(v)
+
+        # did we finish layering without finding any alternating paths?
+        if not unmatched:
+            # TODO - The lines between --- were unused and were thus commented
+            # out. This whole commented chunk should be reviewed to determine
+            # whether it should be built upon or completely removed.
+            # ---
+            # unlayered = {}
+            # for u in G:
+            #     # TODO Why is extra inner loop necessary?
+            #     for v in G[u]:
+            #         if v not in preds:
+            #             unlayered[v] = None
+            # ---
+            # TODO Originally, this function returned a three-tuple:
+            #
+            #     return (matching, list(pred), list(unlayered))
+            #
+            # For some reason, the documentation for this function
+            # indicated that the second and third elements of the returned
+            # three-tuple would be the vertices in the left and right vertex
+            # sets, respectively, that are also in the maximum independent set.
+            # However, what I think the author meant was that the second
+            # element is the list of vertices that were unmatched and the third
+            # element was the list of vertices that were matched. Since that
+            # seems to be the case, they don't really need to be returned,
+            # since that information can be inferred from the matching
+            # dictionary.
+
+            # All the matched nodes must be a key in the dictionary
+            for key in matching.copy():
+                matching[matching[key]] = key
+            return matching
+
+        # recursively search backward through layers to find alternating paths
+        # recursion returns true if found path, false otherwise
+        def recurse(v):
+            if v in preds:
+                L = preds.pop(v)
+                for u in L:
+                    if u in pred:
+                        pu = pred.pop(u)
+                        if pu is unmatched or recurse(pu):
+                            matching[v] = u
+                            return True
+            return False
+
+        for v in unmatched:
+            recurse(v)
+
+
+def _is_connected_by_alternating_path(G, v, matched_edges, unmatched_edges, targets):
     """Returns True if and only if the vertex `v` is connected to one of
     the target vertices by an alternating path in `G`.

@@ -175,7 +342,44 @@ def _is_connected_by_alternating_path(G, v, matched_edges, unmatched_edges,
     `targets` is a set of vertices.

     """
-    pass
+
+    def _alternating_dfs(u, along_matched=True):
+        """Returns True if and only if `u` is connected to one of the
+        targets by an alternating path.
+
+        `u` is a vertex in the graph `G`.
+
+        If `along_matched` is True, this step of the depth-first search
+        will continue only through edges in the given matching. Otherwise, it
+        will continue only through edges *not* in the given matching.
+
+        """
+        visited = set()
+        # Follow matched edges when depth is even,
+        # and follow unmatched edges when depth is odd.
+        initial_depth = 0 if along_matched else 1
+        stack = [(u, iter(G[u]), initial_depth)]
+        while stack:
+            parent, children, depth = stack[-1]
+            valid_edges = matched_edges if depth % 2 else unmatched_edges
+            try:
+                child = next(children)
+                if child not in visited:
+                    if (parent, child) in valid_edges or (child, parent) in valid_edges:
+                        if child in targets:
+                            return True
+                        visited.add(child)
+                        stack.append((child, iter(G[child]), depth + 1))
+            except StopIteration:
+                stack.pop()
+        return False
+
+    # Check for alternating paths starting with edges in the matching, then
+    # check for alternating paths starting with edges not in the
+    # matching.
+    return _alternating_dfs(v, along_matched=True) or _alternating_dfs(
+        v, along_matched=False
+    )


 def _connected_by_alternating_paths(G, matching, targets):
@@ -196,7 +400,24 @@ def _connected_by_alternating_paths(G, matching, targets):
     `targets` is a set of vertices.

     """
-    pass
+    # Get the set of matched edges and the set of unmatched edges. Only include
+    # one version of each undirected edge (for example, include edge (1, 2) but
+    # not edge (2, 1)). Using frozensets as an intermediary step we do not
+    # require nodes to be orderable.
+    edge_sets = {frozenset((u, v)) for u, v in matching.items()}
+    matched_edges = {tuple(edge) for edge in edge_sets}
+    unmatched_edges = {
+        (u, v) for (u, v) in G.edges() if frozenset((u, v)) not in edge_sets
+    }
+
+    return {
+        v
+        for v in G
+        if v in targets
+        or _is_connected_by_alternating_path(
+            G, v, matched_edges, unmatched_edges, targets
+        )
+    }


 @nx._dispatchable
@@ -260,30 +481,44 @@ def to_vertex_cover(G, matching, top_nodes=None):
     for further details on how bipartite graphs are handled in NetworkX.

     """
-    pass
-
-
+    # This is a Python implementation of the algorithm described at
+    # <https://en.wikipedia.org/wiki/K%C3%B6nig%27s_theorem_%28graph_theory%29#Proof>.
+    L, R = bipartite_sets(G, top_nodes)
+    # Let U be the set of unmatched vertices in the left vertex set.
+    unmatched_vertices = set(G) - set(matching)
+    U = unmatched_vertices & L
+    # Let Z be the set of vertices that are either in U or are connected to U
+    # by alternating paths.
+    Z = _connected_by_alternating_paths(G, matching, U)
+    # At this point, every edge either has a right endpoint in Z or a left
+    # endpoint not in Z. This gives us the vertex cover.
+    return (L - Z) | (R & Z)
+
+
+#: Returns the maximum cardinality matching in the given bipartite graph.
+#:
+#: This function is simply an alias for :func:`hopcroft_karp_matching`.
 maximum_matching = hopcroft_karp_matching


-@nx._dispatchable(edge_attrs='weight')
-def minimum_weight_full_matching(G, top_nodes=None, weight='weight'):
-    """Returns a minimum weight full matching of the bipartite graph `G`.
+@nx._dispatchable(edge_attrs="weight")
+def minimum_weight_full_matching(G, top_nodes=None, weight="weight"):
+    r"""Returns a minimum weight full matching of the bipartite graph `G`.

     Let :math:`G = ((U, V), E)` be a weighted bipartite graph with real weights
-    :math:`w : E \\to \\mathbb{R}`. This function then produces a matching
-    :math:`M \\subseteq E` with cardinality
+    :math:`w : E \to \mathbb{R}`. This function then produces a matching
+    :math:`M \subseteq E` with cardinality

     .. math::
-       \\lvert M \\rvert = \\min(\\lvert U \\rvert, \\lvert V \\rvert),
+       \lvert M \rvert = \min(\lvert U \rvert, \lvert V \rvert),

     which minimizes the sum of the weights of the edges included in the
-    matching, :math:`\\sum_{e \\in M} w(e)`, or raises an error if no such
+    matching, :math:`\sum_{e \in M} w(e)`, or raises an error if no such
     matching exists.

-    When :math:`\\lvert U \\rvert = \\lvert V \\rvert`, this is commonly
+    When :math:`\lvert U \rvert = \lvert V \rvert`, this is commonly
     referred to as a perfect matching; here, since we allow
-    :math:`\\lvert U \\rvert` and :math:`\\lvert V \\rvert` to differ, we
+    :math:`\lvert U \rvert` and :math:`\lvert V \rvert` to differ, we
     follow Karp [1]_ and refer to the matching as *full*.

     Parameters
@@ -332,4 +567,23 @@ def minimum_weight_full_matching(G, top_nodes=None, weight='weight'):
        Networks, 10(2):143–152, 1980.

     """
-    pass
+    import numpy as np
+    import scipy as sp
+
+    left, right = nx.bipartite.sets(G, top_nodes)
+    U = list(left)
+    V = list(right)
+    # We explicitly create the biadjacency matrix having infinities
+    # where edges are missing (as opposed to zeros, which is what one would
+    # get by using toarray on the sparse matrix).
+    weights_sparse = biadjacency_matrix(
+        G, row_order=U, column_order=V, weight=weight, format="coo"
+    )
+    weights = np.full(weights_sparse.shape, np.inf)
+    weights[weights_sparse.row, weights_sparse.col] = weights_sparse.data
+    left_matches = sp.optimize.linear_sum_assignment(weights)
+    d = {U[u]: V[v] for u, v in zip(*left_matches)}
+    # d will contain the matching from edges in left to right; we need to
+    # add the ones from right to left as well.
+    d.update({v: u for u, v in d.items()})
+    return d
diff --git a/networkx/algorithms/bipartite/matrix.py b/networkx/algorithms/bipartite/matrix.py
index dc5ad9920..462ef8a13 100644
--- a/networkx/algorithms/bipartite/matrix.py
+++ b/networkx/algorithms/bipartite/matrix.py
@@ -4,20 +4,23 @@ Biadjacency matrices
 ====================
 """
 import itertools
+
 import networkx as nx
 from networkx.convert_matrix import _generate_weighted_edges
-__all__ = ['biadjacency_matrix', 'from_biadjacency_matrix']
+
+__all__ = ["biadjacency_matrix", "from_biadjacency_matrix"]


-@nx._dispatchable(edge_attrs='weight')
-def biadjacency_matrix(G, row_order, column_order=None, dtype=None, weight=
-    'weight', format='csr'):
-    """Returns the biadjacency matrix of the bipartite graph G.
+@nx._dispatchable(edge_attrs="weight")
+def biadjacency_matrix(
+    G, row_order, column_order=None, dtype=None, weight="weight", format="csr"
+):
+    r"""Returns the biadjacency matrix of the bipartite graph G.

     Let `G = (U, V, E)` be a bipartite graph with node sets
     `U = u_{1},...,u_{r}` and `V = v_{1},...,v_{s}`. The biadjacency
     matrix [1]_ is the `r` x `s` matrix `B` in which `b_{i,j} = 1`
-    if, and only if, `(u_i, v_j) \\in E`. If the parameter `weight` is
+    if, and only if, `(u_i, v_j) \in E`. If the parameter `weight` is
     not `None` and matches the name of an edge attribute, its value is
     used instead of 1.

@@ -72,12 +75,44 @@ def biadjacency_matrix(G, row_order, column_order=None, dtype=None, weight=
     .. [2] Scipy Dev. References, "Sparse Matrices",
        https://docs.scipy.org/doc/scipy/reference/sparse.html
     """
-    pass
+    import scipy as sp
+
+    nlen = len(row_order)
+    if nlen == 0:
+        raise nx.NetworkXError("row_order is empty list")
+    if len(row_order) != len(set(row_order)):
+        msg = "Ambiguous ordering: `row_order` contained duplicates."
+        raise nx.NetworkXError(msg)
+    if column_order is None:
+        column_order = list(set(G) - set(row_order))
+    mlen = len(column_order)
+    if len(column_order) != len(set(column_order)):
+        msg = "Ambiguous ordering: `column_order` contained duplicates."
+        raise nx.NetworkXError(msg)
+
+    row_index = dict(zip(row_order, itertools.count()))
+    col_index = dict(zip(column_order, itertools.count()))
+
+    if G.number_of_edges() == 0:
+        row, col, data = [], [], []
+    else:
+        row, col, data = zip(
+            *(
+                (row_index[u], col_index[v], d.get(weight, 1))
+                for u, v, d in G.edges(row_order, data=True)
+                if u in row_index and v in col_index
+            )
+        )
+    A = sp.sparse.coo_array((data, (row, col)), shape=(nlen, mlen), dtype=dtype)
+    try:
+        return A.asformat(format)
+    except ValueError as err:
+        raise nx.NetworkXError(f"Unknown sparse array format: {format}") from err


 @nx._dispatchable(graphs=None, returns_graph=True)
-def from_biadjacency_matrix(A, create_using=None, edge_attribute='weight'):
-    """Creates a new bipartite graph from a biadjacency matrix given as a
+def from_biadjacency_matrix(A, create_using=None, edge_attribute="weight"):
+    r"""Creates a new bipartite graph from a biadjacency matrix given as a
     SciPy sparse array.

     Parameters
@@ -112,4 +147,21 @@ def from_biadjacency_matrix(A, create_using=None, edge_attribute='weight'):
     ----------
     [1] https://en.wikipedia.org/wiki/Adjacency_matrix#Adjacency_matrix_of_a_bipartite_graph
     """
-    pass
+    G = nx.empty_graph(0, create_using)
+    n, m = A.shape
+    # Make sure we get even the isolated nodes of the graph.
+    G.add_nodes_from(range(n), bipartite=0)
+    G.add_nodes_from(range(n, n + m), bipartite=1)
+    # Create an iterable over (u, v, w) triples and for each triple, add an
+    # edge from u to v with weight w.
+    triples = ((u, n + v, d) for (u, v, d) in _generate_weighted_edges(A))
+    # If the entries in the adjacency matrix are integers and the graph is a
+    # multigraph, then create parallel edges, each with weight 1, for each
+    # entry in the adjacency matrix. Otherwise, create one edge for each
+    # positive entry in the adjacency matrix and set the weight of that edge to
+    # be the entry in the matrix.
+    if A.dtype.kind in ("i", "u") and G.is_multigraph():
+        chain = itertools.chain.from_iterable
+        triples = chain(((u, v, 1) for d in range(w)) for (u, v, w) in triples)
+    G.add_weighted_edges_from(triples, weight=edge_attribute)
+    return G
diff --git a/networkx/algorithms/bipartite/projection.py b/networkx/algorithms/bipartite/projection.py
index d45be2a76..1eb71fa52 100644
--- a/networkx/algorithms/bipartite/projection.py
+++ b/networkx/algorithms/bipartite/projection.py
@@ -2,15 +2,21 @@
 import networkx as nx
 from networkx.exception import NetworkXAlgorithmError
 from networkx.utils import not_implemented_for
-__all__ = ['projected_graph', 'weighted_projected_graph',
-    'collaboration_weighted_projected_graph',
-    'overlap_weighted_projected_graph', 'generic_weighted_projected_graph']

+__all__ = [
+    "projected_graph",
+    "weighted_projected_graph",
+    "collaboration_weighted_projected_graph",
+    "overlap_weighted_projected_graph",
+    "generic_weighted_projected_graph",
+]

-@nx._dispatchable(graphs='B', preserve_node_attrs=True,
-    preserve_graph_attrs=True, returns_graph=True)
+
+@nx._dispatchable(
+    graphs="B", preserve_node_attrs=True, preserve_graph_attrs=True, returns_graph=True
+)
 def projected_graph(B, nodes, multigraph=False):
-    """Returns the projection of B onto one of its node sets.
+    r"""Returns the projection of B onto one of its node sets.

     Returns the graph G that is the projection of the bipartite graph B
     onto the specified nodes. They retain their attributes and are connected
@@ -80,13 +86,42 @@ def projected_graph(B, nodes, multigraph=False):
     overlap_weighted_projected_graph,
     generic_weighted_projected_graph
     """
-    pass
-
-
-@not_implemented_for('multigraph')
-@nx._dispatchable(graphs='B', returns_graph=True)
+    if B.is_multigraph():
+        raise nx.NetworkXError("not defined for multigraphs")
+    if B.is_directed():
+        directed = True
+        if multigraph:
+            G = nx.MultiDiGraph()
+        else:
+            G = nx.DiGraph()
+    else:
+        directed = False
+        if multigraph:
+            G = nx.MultiGraph()
+        else:
+            G = nx.Graph()
+    G.graph.update(B.graph)
+    G.add_nodes_from((n, B.nodes[n]) for n in nodes)
+    for u in nodes:
+        nbrs2 = {v for nbr in B[u] for v in B[nbr] if v != u}
+        if multigraph:
+            for n in nbrs2:
+                if directed:
+                    links = set(B[u]) & set(B.pred[n])
+                else:
+                    links = set(B[u]) & set(B[n])
+                for l in links:
+                    if not G.has_edge(u, n, l):
+                        G.add_edge(u, n, key=l)
+        else:
+            G.add_edges_from((u, n) for n in nbrs2)
+    return G
+
+
+@not_implemented_for("multigraph")
+@nx._dispatchable(graphs="B", returns_graph=True)
 def weighted_projected_graph(B, nodes, ratio=False):
-    """Returns a weighted projection of B onto one of its node sets.
+    r"""Returns a weighted projection of B onto one of its node sets.

     The weighted projected graph is the projection of the bipartite
     network B onto the specified nodes with weights representing the
@@ -154,13 +189,40 @@ def weighted_projected_graph(B, nodes, ratio=False):
         Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook
         of Social Network Analysis. Sage Publications.
     """
-    pass
-
-
-@not_implemented_for('multigraph')
-@nx._dispatchable(graphs='B', returns_graph=True)
+    if B.is_directed():
+        pred = B.pred
+        G = nx.DiGraph()
+    else:
+        pred = B.adj
+        G = nx.Graph()
+    G.graph.update(B.graph)
+    G.add_nodes_from((n, B.nodes[n]) for n in nodes)
+    n_top = len(B) - len(nodes)
+
+    if n_top < 1:
+        raise NetworkXAlgorithmError(
+            f"the size of the nodes to project onto ({len(nodes)}) is >= the graph size ({len(B)}).\n"
+            "They are either not a valid bipartite partition or contain duplicates"
+        )
+
+    for u in nodes:
+        unbrs = set(B[u])
+        nbrs2 = {n for nbr in unbrs for n in B[nbr]} - {u}
+        for v in nbrs2:
+            vnbrs = set(pred[v])
+            common = unbrs & vnbrs
+            if not ratio:
+                weight = len(common)
+            else:
+                weight = len(common) / n_top
+            G.add_edge(u, v, weight=weight)
+    return G
+
+
+@not_implemented_for("multigraph")
+@nx._dispatchable(graphs="B", returns_graph=True)
 def collaboration_weighted_projected_graph(B, nodes):
-    """Newman's weighted projection of B onto one of its node sets.
+    r"""Newman's weighted projection of B onto one of its node sets.

     The collaboration weighted projection is the projection of the
     bipartite network B onto the specified nodes with weights assigned
@@ -168,12 +230,12 @@ def collaboration_weighted_projected_graph(B, nodes):

     .. math::

-        w_{u, v} = \\sum_k \\frac{\\delta_{u}^{k} \\delta_{v}^{k}}{d_k - 1}
+        w_{u, v} = \sum_k \frac{\delta_{u}^{k} \delta_{v}^{k}}{d_k - 1}

     where `u` and `v` are nodes from the bottom bipartite node set,
     and `k` is a node of the top node set.
     The value `d_k` is the degree of node `k` in the bipartite
-    network and `\\delta_{u}^{k}` is 1 if node `u` is
+    network and `\delta_{u}^{k}` is 1 if node `u` is
     linked to node `k` in the original bipartite graph or 0 otherwise.

     The nodes retain their attributes and are connected in the resulting
@@ -232,13 +294,29 @@ def collaboration_weighted_projected_graph(B, nodes):
         Shortest paths, weighted networks, and centrality,
         M. E. J. Newman, Phys. Rev. E 64, 016132 (2001).
     """
-    pass
-
-
-@not_implemented_for('multigraph')
-@nx._dispatchable(graphs='B', returns_graph=True)
+    if B.is_directed():
+        pred = B.pred
+        G = nx.DiGraph()
+    else:
+        pred = B.adj
+        G = nx.Graph()
+    G.graph.update(B.graph)
+    G.add_nodes_from((n, B.nodes[n]) for n in nodes)
+    for u in nodes:
+        unbrs = set(B[u])
+        nbrs2 = {n for nbr in unbrs for n in B[nbr] if n != u}
+        for v in nbrs2:
+            vnbrs = set(pred[v])
+            common_degree = (len(B[n]) for n in unbrs & vnbrs)
+            weight = sum(1.0 / (deg - 1) for deg in common_degree if deg > 1)
+            G.add_edge(u, v, weight=weight)
+    return G
+
+
+@not_implemented_for("multigraph")
+@nx._dispatchable(graphs="B", returns_graph=True)
 def overlap_weighted_projected_graph(B, nodes, jaccard=True):
-    """Overlap weighted projection of B onto one of its node sets.
+    r"""Overlap weighted projection of B onto one of its node sets.

     The overlap weighted projection is the projection of the bipartite
     network B onto the specified nodes with weights representing
@@ -247,7 +325,7 @@ def overlap_weighted_projected_graph(B, nodes, jaccard=True):

     .. math::

-        w_{v, u} = \\frac{|N(u) \\cap N(v)|}{|N(u) \\cup N(v)|}
+        w_{v, u} = \frac{|N(u) \cap N(v)|}{|N(u) \cup N(v)|}

     or if the parameter 'jaccard' is False, the fraction of common
     neighbors by minimum of both nodes degree in the original
@@ -255,7 +333,7 @@ def overlap_weighted_projected_graph(B, nodes, jaccard=True):

     .. math::

-        w_{v, u} = \\frac{|N(u) \\cap N(v)|}{min(|N(u)|, |N(v)|)}
+        w_{v, u} = \frac{|N(u) \cap N(v)|}{min(|N(u)|, |N(v)|)}

     The nodes retain their attributes and are connected in the resulting
     graph if have an edge to a common node in the original bipartite graph.
@@ -314,13 +392,31 @@ def overlap_weighted_projected_graph(B, nodes, jaccard=True):
         of Social Network Analysis. Sage Publications.

     """
-    pass
-
-
-@not_implemented_for('multigraph')
-@nx._dispatchable(graphs='B', preserve_all_attrs=True, returns_graph=True)
+    if B.is_directed():
+        pred = B.pred
+        G = nx.DiGraph()
+    else:
+        pred = B.adj
+        G = nx.Graph()
+    G.graph.update(B.graph)
+    G.add_nodes_from((n, B.nodes[n]) for n in nodes)
+    for u in nodes:
+        unbrs = set(B[u])
+        nbrs2 = {n for nbr in unbrs for n in B[nbr]} - {u}
+        for v in nbrs2:
+            vnbrs = set(pred[v])
+            if jaccard:
+                wt = len(unbrs & vnbrs) / len(unbrs | vnbrs)
+            else:
+                wt = len(unbrs & vnbrs) / min(len(unbrs), len(vnbrs))
+            G.add_edge(u, v, weight=wt)
+    return G
+
+
+@not_implemented_for("multigraph")
+@nx._dispatchable(graphs="B", preserve_all_attrs=True, returns_graph=True)
 def generic_weighted_projected_graph(B, nodes, weight_function=None):
-    """Weighted projection of B with a user-specified weight function.
+    r"""Weighted projection of B with a user-specified weight function.

     The bipartite network B is projected on to the specified nodes
     with weights computed by a user-specified function.  This function
@@ -403,4 +499,23 @@ def generic_weighted_projected_graph(B, nodes, weight_function=None):
     projected_graph

     """
-    pass
+    if B.is_directed():
+        pred = B.pred
+        G = nx.DiGraph()
+    else:
+        pred = B.adj
+        G = nx.Graph()
+    if weight_function is None:
+
+        def weight_function(G, u, v):
+            # Notice that we use set(pred[v]) for handling the directed case.
+            return len(set(G[u]) & set(pred[v]))
+
+    G.graph.update(B.graph)
+    G.add_nodes_from((n, B.nodes[n]) for n in nodes)
+    for u in nodes:
+        nbrs2 = {n for nbr in set(B[u]) for n in B[nbr]} - {u}
+        for v in nbrs2:
+            weight = weight_function(B, u, v)
+            G.add_edge(u, v, weight=weight)
+    return G
diff --git a/networkx/algorithms/bipartite/redundancy.py b/networkx/algorithms/bipartite/redundancy.py
index 6d8f53973..7a44d2128 100644
--- a/networkx/algorithms/bipartite/redundancy.py
+++ b/networkx/algorithms/bipartite/redundancy.py
@@ -1,13 +1,15 @@
 """Node redundancy for bipartite graphs."""
 from itertools import combinations
+
 import networkx as nx
 from networkx import NetworkXError
-__all__ = ['node_redundancy']
+
+__all__ = ["node_redundancy"]


 @nx._dispatchable
 def node_redundancy(G, nodes=None):
-    """Computes the node redundancy coefficients for the nodes in the bipartite
+    r"""Computes the node redundancy coefficients for the nodes in the bipartite
     graph `G`.

     The redundancy coefficient of a node `v` is the fraction of pairs of
@@ -20,9 +22,9 @@ def node_redundancy(G, nodes=None):

     .. math::

-        rc(v) = \\frac{|\\{\\{u, w\\} \\subseteq N(v),
-        \\: \\exists v' \\neq  v,\\: (v',u) \\in E\\:
-        \\mathrm{and}\\: (v',w) \\in E\\}|}{ \\frac{|N(v)|(|N(v)|-1)}{2}},
+        rc(v) = \frac{|\{\{u, w\} \subseteq N(v),
+        \: \exists v' \neq  v,\: (v',u) \in E\:
+        \mathrm{and}\: (v',w) \in E\}|}{ \frac{|N(v)|(|N(v)|-1)}{2}},

     where `N(v)` is the set of neighbors of `v` in `G`.

@@ -80,7 +82,15 @@ def node_redundancy(G, nodes=None):
        Social Networks 30(1), 31--48.

     """
-    pass
+    if nodes is None:
+        nodes = G
+    if any(len(G[v]) < 2 for v in nodes):
+        raise NetworkXError(
+            "Cannot compute redundancy coefficient for a node"
+            " that has fewer than two neighbors."
+        )
+    # TODO This can be trivially parallelized.
+    return {v: _node_redundancy(G, v) for v in nodes}


 def _node_redundancy(G, v):
@@ -94,4 +104,8 @@ def _node_redundancy(G, v):
     `v` must have at least two neighbors in `G`.

     """
-    pass
+    n = len(G[v])
+    overlap = sum(
+        1 for (u, w) in combinations(G[v], 2) if (set(G[u]) & set(G[w])) - {v}
+    )
+    return (2 * overlap) / (n * (n - 1))
diff --git a/networkx/algorithms/bipartite/spectral.py b/networkx/algorithms/bipartite/spectral.py
index fe6188e5c..61a56dd2c 100644
--- a/networkx/algorithms/bipartite/spectral.py
+++ b/networkx/algorithms/bipartite/spectral.py
@@ -2,11 +2,12 @@
 Spectral bipartivity measure.
 """
 import networkx as nx
-__all__ = ['spectral_bipartivity']

+__all__ = ["spectral_bipartivity"]

-@nx._dispatchable(edge_attrs='weight')
-def spectral_bipartivity(G, nodes=None, weight='weight'):
+
+@nx._dispatchable(edge_attrs="weight")
+def spectral_bipartivity(G, nodes=None, weight="weight"):
     """Returns the spectral bipartivity.

     Parameters
@@ -47,4 +48,21 @@ def spectral_bipartivity(G, nodes=None, weight='weight'):
     .. [1] E. Estrada and J. A. Rodríguez-Velázquez, "Spectral measures of
        bipartivity in complex networks", PhysRev E 72, 046105 (2005)
     """
-    pass
+    import scipy as sp
+
+    nodelist = list(G)  # ordering of nodes in matrix
+    A = nx.to_numpy_array(G, nodelist, weight=weight)
+    expA = sp.linalg.expm(A)
+    expmA = sp.linalg.expm(-A)
+    coshA = 0.5 * (expA + expmA)
+    if nodes is None:
+        # return single number for entire graph
+        return float(coshA.diagonal().sum() / expA.diagonal().sum())
+    else:
+        # contribution for individual nodes
+        index = dict(zip(nodelist, range(len(nodelist))))
+        sb = {}
+        for n in nodes:
+            i = index[n]
+            sb[n] = coshA.item(i, i) / expA.item(i, i)
+        return sb
diff --git a/networkx/algorithms/boundary.py b/networkx/algorithms/boundary.py
index 86e7511a1..fef9ba223 100644
--- a/networkx/algorithms/boundary.py
+++ b/networkx/algorithms/boundary.py
@@ -9,13 +9,14 @@ nodes in *S* that are outside *S*.

 """
 from itertools import chain
+
 import networkx as nx
-__all__ = ['edge_boundary', 'node_boundary']
+
+__all__ = ["edge_boundary", "node_boundary"]


-@nx._dispatchable(edge_attrs={'data': 'default'}, preserve_edge_attrs='data')
-def edge_boundary(G, nbunch1, nbunch2=None, data=False, keys=False, default
-    =None):
+@nx._dispatchable(edge_attrs={"data": "default"}, preserve_edge_attrs="data")
+def edge_boundary(G, nbunch1, nbunch2=None, data=False, keys=False, default=None):
     """Returns the edge boundary of `nbunch1`.

     The *edge boundary* of a set *S* with respect to a set *T* is the
@@ -81,7 +82,28 @@ def edge_boundary(G, nbunch1, nbunch2=None, data=False, keys=False, default
     the interest of speed and generality, that is not required here.

     """
-    pass
+    nset1 = {n for n in nbunch1 if n in G}
+    # Here we create an iterator over edges incident to nodes in the set
+    # `nset1`. The `Graph.edges()` method does not provide a guarantee
+    # on the orientation of the edges, so our algorithm below must
+    # handle the case in which exactly one orientation, either (u, v) or
+    # (v, u), appears in this iterable.
+    if G.is_multigraph():
+        edges = G.edges(nset1, data=data, keys=keys, default=default)
+    else:
+        edges = G.edges(nset1, data=data, default=default)
+    # If `nbunch2` is not provided, then it is assumed to be the set
+    # complement of `nbunch1`. For the sake of efficiency, this is
+    # implemented by using the `not in` operator, instead of by creating
+    # an additional set and using the `in` operator.
+    if nbunch2 is None:
+        return (e for e in edges if (e[0] in nset1) ^ (e[1] in nset1))
+    nset2 = set(nbunch2)
+    return (
+        e
+        for e in edges
+        if (e[0] in nset1 and e[1] in nset2) or (e[1] in nset1 and e[0] in nset2)
+    )


 @nx._dispatchable
@@ -136,4 +158,10 @@ def node_boundary(G, nbunch1, nbunch2=None):
     the interest of speed and generality, that is not required here.

     """
-    pass
+    nset1 = {n for n in nbunch1 if n in G}
+    bdy = set(chain.from_iterable(G[v] for v in nset1)) - nset1
+    # If `nbunch2` is not specified, it is assumed to be the set
+    # complement of `nbunch1`.
+    if nbunch2 is not None:
+        bdy &= set(nbunch2)
+    return bdy
diff --git a/networkx/algorithms/bridges.py b/networkx/algorithms/bridges.py
index fc5a86017..e076a256c 100644
--- a/networkx/algorithms/bridges.py
+++ b/networkx/algorithms/bridges.py
@@ -1,11 +1,13 @@
 """Bridge-finding algorithms."""
 from itertools import chain
+
 import networkx as nx
 from networkx.utils import not_implemented_for
-__all__ = ['bridges', 'has_bridges', 'local_bridges']
+
+__all__ = ["bridges", "has_bridges", "local_bridges"]


-@not_implemented_for('directed')
+@not_implemented_for("directed")
 @nx._dispatchable
 def bridges(G, root=None):
     """Generate all bridges in a graph.
@@ -64,10 +66,21 @@ def bridges(G, root=None):
     ----------
     .. [1] https://en.wikipedia.org/wiki/Bridge_%28graph_theory%29#Bridge-Finding_with_Chain_Decompositions
     """
-    pass
-
-
-@not_implemented_for('directed')
+    multigraph = G.is_multigraph()
+    H = nx.Graph(G) if multigraph else G
+    chains = nx.chain_decomposition(H, root=root)
+    chain_edges = set(chain.from_iterable(chains))
+    H_copy = H.copy()
+    if root is not None:
+        H = H.subgraph(nx.node_connected_component(H, root)).copy()
+    for u, v in H.edges():
+        if (u, v) not in chain_edges and (v, u) not in chain_edges:
+            if multigraph and len(G[u][v]) > 1:
+                continue
+            yield u, v
+
+
+@not_implemented_for("directed")
 @nx._dispatchable
 def has_bridges(G, root=None):
     """Decide whether a graph has any bridges.
@@ -119,12 +132,17 @@ def has_bridges(G, root=None):
     graph and $m$ is the number of edges.

     """
-    pass
+    try:
+        next(bridges(G, root=root))
+    except StopIteration:
+        return False
+    else:
+        return True


-@not_implemented_for('multigraph')
-@not_implemented_for('directed')
-@nx._dispatchable(edge_attrs='weight')
+@not_implemented_for("multigraph")
+@not_implemented_for("directed")
+@nx._dispatchable(edge_attrs="weight")
 def local_bridges(G, with_span=True, weight=None):
     """Iterate over local bridges of `G` optionally computing the span

@@ -165,4 +183,23 @@ def local_bridges(G, with_span=True, weight=None):
        >>> (0, 8, 8) in set(nx.local_bridges(G))
        True
     """
-    pass
+    if with_span is not True:
+        for u, v in G.edges:
+            if not (set(G[u]) & set(G[v])):
+                yield u, v
+    else:
+        wt = nx.weighted._weight_function(G, weight)
+        for u, v in G.edges:
+            if not (set(G[u]) & set(G[v])):
+                enodes = {u, v}
+
+                def hide_edge(n, nbr, d):
+                    if n not in enodes or nbr not in enodes:
+                        return wt(n, nbr, d)
+                    return None
+
+                try:
+                    span = nx.shortest_path_length(G, u, v, weight=hide_edge)
+                    yield u, v, span
+                except nx.NetworkXNoPath:
+                    yield u, v, float("inf")
diff --git a/networkx/algorithms/broadcasting.py b/networkx/algorithms/broadcasting.py
index f3f193aec..9b362a0e1 100644
--- a/networkx/algorithms/broadcasting.py
+++ b/networkx/algorithms/broadcasting.py
@@ -11,14 +11,30 @@ following constraints:
 - A node can only participate in one call per unit of time.
 - Each call only involves two adjacent nodes: a sender and a receiver.
 """
+
 import networkx as nx
 from networkx import NetworkXError
 from networkx.utils import not_implemented_for
-__all__ = ['tree_broadcast_center', 'tree_broadcast_time']
+
+__all__ = [
+    "tree_broadcast_center",
+    "tree_broadcast_time",
+]
+
+
+def _get_max_broadcast_value(G, U, v, values):
+    adj = sorted(set(G.neighbors(v)) & U, key=values.get, reverse=True)
+    return max(values[u] + i for i, u in enumerate(adj, start=1))


-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+def _get_broadcast_centers(G, v, values, target):
+    adj = sorted(G.neighbors(v), key=values.get, reverse=True)
+    j = next(i for i, u in enumerate(adj, start=1) if values[u] + i == target)
+    return set([v] + adj[:j])
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def tree_broadcast_center(G):
     """Return the Broadcast Center of the tree `G`.
@@ -48,11 +64,50 @@ def tree_broadcast_center(G):
     .. [1] Slater, P.J., Cockayne, E.J., Hedetniemi, S.T,
        Information dissemination in trees. SIAM J.Comput. 10(4), 692–701 (1981)
     """
-    pass
-
-
-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+    # Assert that the graph G is a tree
+    if not nx.is_tree(G):
+        NetworkXError("Input graph is not a tree")
+    # step 0
+    if G.number_of_nodes() == 2:
+        return 1, set(G.nodes())
+    if G.number_of_nodes() == 1:
+        return 0, set(G.nodes())
+
+    # step 1
+    U = {node for node, deg in G.degree if deg == 1}
+    values = {n: 0 for n in U}
+    T = G.copy()
+    T.remove_nodes_from(U)
+
+    # step 2
+    W = {node for node, deg in T.degree if deg == 1}
+    values.update((w, G.degree[w] - 1) for w in W)
+
+    # step 3
+    while T.number_of_nodes() >= 2:
+        # step 4
+        w = min(W, key=lambda n: values[n])
+        v = next(T.neighbors(w))
+
+        # step 5
+        U.add(w)
+        W.remove(w)
+        T.remove_node(w)
+
+        # step 6
+        if T.degree(v) == 1:
+            # update t(v)
+            values.update({v: _get_max_broadcast_value(G, U, v, values)})
+            W.add(v)
+
+    # step 7
+    v = nx.utils.arbitrary_element(T)
+    b_T = _get_max_broadcast_value(G, U, v, values)
+    return b_T, _get_broadcast_centers(G, v, values, b_T)
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def tree_broadcast_time(G, node=None):
     """Return the Broadcast Time of the tree `G`.
@@ -89,4 +144,12 @@ def tree_broadcast_time(G, node=None):
         In Computing and Combinatorics. COCOON 2019
         (Ed. D. Z. Du and C. Tian.) Springer, pp. 240-253, 2019.
     """
-    pass
+    b_T, b_C = tree_broadcast_center(G)
+    if node is not None:
+        return b_T + min(nx.shortest_path_length(G, node, u) for u in b_C)
+    dist_from_center = dict.fromkeys(G, len(G))
+    for u in b_C:
+        for v, dist in nx.shortest_path_length(G, u).items():
+            if dist < dist_from_center[v]:
+                dist_from_center[v] = dist
+    return b_T + max(dist_from_center.values())
diff --git a/networkx/algorithms/centrality/betweenness.py b/networkx/algorithms/centrality/betweenness.py
index c1df7c3ee..4f44fb19b 100644
--- a/networkx/algorithms/centrality/betweenness.py
+++ b/networkx/algorithms/centrality/betweenness.py
@@ -2,31 +2,34 @@
 from collections import deque
 from heapq import heappop, heappush
 from itertools import count
+
 import networkx as nx
 from networkx.algorithms.shortest_paths.weighted import _weight_function
 from networkx.utils import py_random_state
 from networkx.utils.decorators import not_implemented_for
-__all__ = ['betweenness_centrality', 'edge_betweenness_centrality']
+
+__all__ = ["betweenness_centrality", "edge_betweenness_centrality"]


 @py_random_state(5)
-@nx._dispatchable(edge_attrs='weight')
-def betweenness_centrality(G, k=None, normalized=True, weight=None,
-    endpoints=False, seed=None):
-    """Compute the shortest-path betweenness centrality for nodes.
+@nx._dispatchable(edge_attrs="weight")
+def betweenness_centrality(
+    G, k=None, normalized=True, weight=None, endpoints=False, seed=None
+):
+    r"""Compute the shortest-path betweenness centrality for nodes.

     Betweenness centrality of a node $v$ is the sum of the
     fraction of all-pairs shortest paths that pass through $v$

     .. math::

-       c_B(v) =\\sum_{s,t \\in V} \\frac{\\sigma(s, t|v)}{\\sigma(s, t)}
+       c_B(v) =\sum_{s,t \in V} \frac{\sigma(s, t|v)}{\sigma(s, t)}

-    where $V$ is the set of nodes, $\\sigma(s, t)$ is the number of
-    shortest $(s, t)$-paths,  and $\\sigma(s, t|v)$ is the number of
+    where $V$ is the set of nodes, $\sigma(s, t)$ is the number of
+    shortest $(s, t)$-paths,  and $\sigma(s, t|v)$ is the number of
     those paths  passing through some  node $v$ other than $s, t$.
-    If $s = t$, $\\sigma(s, t) = 1$, and if $v \\in {s, t}$,
-    $\\sigma(s, t|v) = 0$ [2]_.
+    If $s = t$, $\sigma(s, t) = 1$, and if $v \in {s, t}$,
+    $\sigma(s, t|v) = 0$ [2]_.

     Parameters
     ----------
@@ -122,24 +125,48 @@ def betweenness_centrality(G, k=None, normalized=True, weight=None,
        Sociometry 40: 35–41, 1977
        https://doi.org/10.2307/3033543
     """
-    pass
+    betweenness = dict.fromkeys(G, 0.0)  # b[v]=0 for v in G
+    if k is None:
+        nodes = G
+    else:
+        nodes = seed.sample(list(G.nodes()), k)
+    for s in nodes:
+        # single source shortest paths
+        if weight is None:  # use BFS
+            S, P, sigma, _ = _single_source_shortest_path_basic(G, s)
+        else:  # use Dijkstra's algorithm
+            S, P, sigma, _ = _single_source_dijkstra_path_basic(G, s, weight)
+        # accumulation
+        if endpoints:
+            betweenness, _ = _accumulate_endpoints(betweenness, S, P, sigma, s)
+        else:
+            betweenness, _ = _accumulate_basic(betweenness, S, P, sigma, s)
+    # rescaling
+    betweenness = _rescale(
+        betweenness,
+        len(G),
+        normalized=normalized,
+        directed=G.is_directed(),
+        k=k,
+        endpoints=endpoints,
+    )
+    return betweenness


 @py_random_state(4)
-@nx._dispatchable(edge_attrs='weight')
-def edge_betweenness_centrality(G, k=None, normalized=True, weight=None,
-    seed=None):
-    """Compute betweenness centrality for edges.
+@nx._dispatchable(edge_attrs="weight")
+def edge_betweenness_centrality(G, k=None, normalized=True, weight=None, seed=None):
+    r"""Compute betweenness centrality for edges.

     Betweenness centrality of an edge $e$ is the sum of the
     fraction of all-pairs shortest paths that pass through $e$

     .. math::

-       c_B(e) =\\sum_{s,t \\in V} \\frac{\\sigma(s, t|e)}{\\sigma(s, t)}
+       c_B(e) =\sum_{s,t \in V} \frac{\sigma(s, t|e)}{\sigma(s, t)}

-    where $V$ is the set of nodes, $\\sigma(s, t)$ is the number of
-    shortest $(s, t)$-paths, and $\\sigma(s, t|e)$ is the number of
+    where $V$ is the set of nodes, $\sigma(s, t)$ is the number of
+    shortest $(s, t)$-paths, and $\sigma(s, t|e)$ is the number of
     those paths passing through edge $e$ [2]_.

     Parameters
@@ -196,12 +223,185 @@ def edge_betweenness_centrality(G, k=None, normalized=True, weight=None,
        Social Networks 30(2):136-145, 2008.
        https://doi.org/10.1016/j.socnet.2007.11.001
     """
-    pass
-
-
-@not_implemented_for('graph')
+    betweenness = dict.fromkeys(G, 0.0)  # b[v]=0 for v in G
+    # b[e]=0 for e in G.edges()
+    betweenness.update(dict.fromkeys(G.edges(), 0.0))
+    if k is None:
+        nodes = G
+    else:
+        nodes = seed.sample(list(G.nodes()), k)
+    for s in nodes:
+        # single source shortest paths
+        if weight is None:  # use BFS
+            S, P, sigma, _ = _single_source_shortest_path_basic(G, s)
+        else:  # use Dijkstra's algorithm
+            S, P, sigma, _ = _single_source_dijkstra_path_basic(G, s, weight)
+        # accumulation
+        betweenness = _accumulate_edges(betweenness, S, P, sigma, s)
+    # rescaling
+    for n in G:  # remove nodes to only return edges
+        del betweenness[n]
+    betweenness = _rescale_e(
+        betweenness, len(G), normalized=normalized, directed=G.is_directed()
+    )
+    if G.is_multigraph():
+        betweenness = _add_edge_keys(G, betweenness, weight=weight)
+    return betweenness
+
+
+# helpers for betweenness centrality
+
+
+def _single_source_shortest_path_basic(G, s):
+    S = []
+    P = {}
+    for v in G:
+        P[v] = []
+    sigma = dict.fromkeys(G, 0.0)  # sigma[v]=0 for v in G
+    D = {}
+    sigma[s] = 1.0
+    D[s] = 0
+    Q = deque([s])
+    while Q:  # use BFS to find shortest paths
+        v = Q.popleft()
+        S.append(v)
+        Dv = D[v]
+        sigmav = sigma[v]
+        for w in G[v]:
+            if w not in D:
+                Q.append(w)
+                D[w] = Dv + 1
+            if D[w] == Dv + 1:  # this is a shortest path, count paths
+                sigma[w] += sigmav
+                P[w].append(v)  # predecessors
+    return S, P, sigma, D
+
+
+def _single_source_dijkstra_path_basic(G, s, weight):
+    weight = _weight_function(G, weight)
+    # modified from Eppstein
+    S = []
+    P = {}
+    for v in G:
+        P[v] = []
+    sigma = dict.fromkeys(G, 0.0)  # sigma[v]=0 for v in G
+    D = {}
+    sigma[s] = 1.0
+    push = heappush
+    pop = heappop
+    seen = {s: 0}
+    c = count()
+    Q = []  # use Q as heap with (distance,node id) tuples
+    push(Q, (0, next(c), s, s))
+    while Q:
+        (dist, _, pred, v) = pop(Q)
+        if v in D:
+            continue  # already searched this node.
+        sigma[v] += sigma[pred]  # count paths
+        S.append(v)
+        D[v] = dist
+        for w, edgedata in G[v].items():
+            vw_dist = dist + weight(v, w, edgedata)
+            if w not in D and (w not in seen or vw_dist < seen[w]):
+                seen[w] = vw_dist
+                push(Q, (vw_dist, next(c), v, w))
+                sigma[w] = 0.0
+                P[w] = [v]
+            elif vw_dist == seen[w]:  # handle equal paths
+                sigma[w] += sigma[v]
+                P[w].append(v)
+    return S, P, sigma, D
+
+
+def _accumulate_basic(betweenness, S, P, sigma, s):
+    delta = dict.fromkeys(S, 0)
+    while S:
+        w = S.pop()
+        coeff = (1 + delta[w]) / sigma[w]
+        for v in P[w]:
+            delta[v] += sigma[v] * coeff
+        if w != s:
+            betweenness[w] += delta[w]
+    return betweenness, delta
+
+
+def _accumulate_endpoints(betweenness, S, P, sigma, s):
+    betweenness[s] += len(S) - 1
+    delta = dict.fromkeys(S, 0)
+    while S:
+        w = S.pop()
+        coeff = (1 + delta[w]) / sigma[w]
+        for v in P[w]:
+            delta[v] += sigma[v] * coeff
+        if w != s:
+            betweenness[w] += delta[w] + 1
+    return betweenness, delta
+
+
+def _accumulate_edges(betweenness, S, P, sigma, s):
+    delta = dict.fromkeys(S, 0)
+    while S:
+        w = S.pop()
+        coeff = (1 + delta[w]) / sigma[w]
+        for v in P[w]:
+            c = sigma[v] * coeff
+            if (v, w) not in betweenness:
+                betweenness[(w, v)] += c
+            else:
+                betweenness[(v, w)] += c
+            delta[v] += c
+        if w != s:
+            betweenness[w] += delta[w]
+    return betweenness
+
+
+def _rescale(betweenness, n, normalized, directed=False, k=None, endpoints=False):
+    if normalized:
+        if endpoints:
+            if n < 2:
+                scale = None  # no normalization
+            else:
+                # Scale factor should include endpoint nodes
+                scale = 1 / (n * (n - 1))
+        elif n <= 2:
+            scale = None  # no normalization b=0 for all nodes
+        else:
+            scale = 1 / ((n - 1) * (n - 2))
+    else:  # rescale by 2 for undirected graphs
+        if not directed:
+            scale = 0.5
+        else:
+            scale = None
+    if scale is not None:
+        if k is not None:
+            scale = scale * n / k
+        for v in betweenness:
+            betweenness[v] *= scale
+    return betweenness
+
+
+def _rescale_e(betweenness, n, normalized, directed=False, k=None):
+    if normalized:
+        if n <= 1:
+            scale = None  # no normalization b=0 for all nodes
+        else:
+            scale = 1 / (n * (n - 1))
+    else:  # rescale by 2 for undirected graphs
+        if not directed:
+            scale = 0.5
+        else:
+            scale = None
+    if scale is not None:
+        if k is not None:
+            scale = scale * n / k
+        for v in betweenness:
+            betweenness[v] *= scale
+    return betweenness
+
+
+@not_implemented_for("graph")
 def _add_edge_keys(G, betweenness, weight=None):
-    """Adds the corrected betweenness centrality (BC) values for multigraphs.
+    r"""Adds the corrected betweenness centrality (BC) values for multigraphs.

     Parameters
     ----------
@@ -221,4 +421,15 @@ def _add_edge_keys(G, betweenness, weight=None):

     The BC value is divided among edges of equal weight.
     """
-    pass
+    _weight = _weight_function(G, weight)
+
+    edge_bc = dict.fromkeys(G.edges, 0.0)
+    for u, v in betweenness:
+        d = G[u][v]
+        wt = _weight(u, v, d)
+        keys = [k for k in d if _weight(u, v, {k: d[k]}) == wt]
+        bc = betweenness[(u, v)] / len(keys)
+        for k in keys:
+            edge_bc[(u, v, k)] = bc
+
+    return edge_bc
diff --git a/networkx/algorithms/centrality/betweenness_subset.py b/networkx/algorithms/centrality/betweenness_subset.py
index 8dd1c6b3e..7f9967e96 100644
--- a/networkx/algorithms/centrality/betweenness_subset.py
+++ b/networkx/algorithms/centrality/betweenness_subset.py
@@ -1,27 +1,35 @@
 """Betweenness centrality measures for subsets of nodes."""
 import networkx as nx
-from networkx.algorithms.centrality.betweenness import _add_edge_keys
-from networkx.algorithms.centrality.betweenness import _single_source_dijkstra_path_basic as dijkstra
-from networkx.algorithms.centrality.betweenness import _single_source_shortest_path_basic as shortest_path
-__all__ = ['betweenness_centrality_subset',
-    'edge_betweenness_centrality_subset']
-
-
-@nx._dispatchable(edge_attrs='weight')
-def betweenness_centrality_subset(G, sources, targets, normalized=False,
-    weight=None):
-    """Compute betweenness centrality for a subset of nodes.
+from networkx.algorithms.centrality.betweenness import (
+    _add_edge_keys,
+)
+from networkx.algorithms.centrality.betweenness import (
+    _single_source_dijkstra_path_basic as dijkstra,
+)
+from networkx.algorithms.centrality.betweenness import (
+    _single_source_shortest_path_basic as shortest_path,
+)
+
+__all__ = [
+    "betweenness_centrality_subset",
+    "edge_betweenness_centrality_subset",
+]
+
+
+@nx._dispatchable(edge_attrs="weight")
+def betweenness_centrality_subset(G, sources, targets, normalized=False, weight=None):
+    r"""Compute betweenness centrality for a subset of nodes.

     .. math::

-       c_B(v) =\\sum_{s\\in S, t \\in T} \\frac{\\sigma(s, t|v)}{\\sigma(s, t)}
+       c_B(v) =\sum_{s\in S, t \in T} \frac{\sigma(s, t|v)}{\sigma(s, t)}

     where $S$ is the set of sources, $T$ is the set of targets,
-    $\\sigma(s, t)$ is the number of shortest $(s, t)$-paths,
-    and $\\sigma(s, t|v)$ is the number of those paths
+    $\sigma(s, t)$ is the number of shortest $(s, t)$-paths,
+    and $\sigma(s, t|v)$ is the number of those paths
     passing through some  node $v$ other than $s, t$.
-    If $s = t$, $\\sigma(s, t) = 1$,
-    and if $v \\in {s, t}$, $\\sigma(s, t|v) = 0$ [2]_.
+    If $s = t$, $\sigma(s, t) = 1$,
+    and if $v \in {s, t}$, $\sigma(s, t|v) = 0$ [2]_.


     Parameters
@@ -94,21 +102,31 @@ def betweenness_centrality_subset(G, sources, targets, normalized=False,
        Social Networks 30(2):136-145, 2008.
        https://doi.org/10.1016/j.socnet.2007.11.001
     """
-    pass
-
-
-@nx._dispatchable(edge_attrs='weight')
-def edge_betweenness_centrality_subset(G, sources, targets, normalized=
-    False, weight=None):
-    """Compute betweenness centrality for edges for a subset of nodes.
+    b = dict.fromkeys(G, 0.0)  # b[v]=0 for v in G
+    for s in sources:
+        # single source shortest paths
+        if weight is None:  # use BFS
+            S, P, sigma, _ = shortest_path(G, s)
+        else:  # use Dijkstra's algorithm
+            S, P, sigma, _ = dijkstra(G, s, weight)
+        b = _accumulate_subset(b, S, P, sigma, s, targets)
+    b = _rescale(b, len(G), normalized=normalized, directed=G.is_directed())
+    return b
+
+
+@nx._dispatchable(edge_attrs="weight")
+def edge_betweenness_centrality_subset(
+    G, sources, targets, normalized=False, weight=None
+):
+    r"""Compute betweenness centrality for edges for a subset of nodes.

     .. math::

-       c_B(v) =\\sum_{s\\in S,t \\in T} \\frac{\\sigma(s, t|e)}{\\sigma(s, t)}
+       c_B(v) =\sum_{s\in S,t \in T} \frac{\sigma(s, t|e)}{\sigma(s, t)}

     where $S$ is the set of sources, $T$ is the set of targets,
-    $\\sigma(s, t)$ is the number of shortest $(s, t)$-paths,
-    and $\\sigma(s, t|e)$ is the number of those paths
+    $\sigma(s, t)$ is the number of shortest $(s, t)$-paths,
+    and $\sigma(s, t|e)$ is the number of those paths
     passing through edge $e$ [2]_.

     Parameters
@@ -166,19 +184,91 @@ def edge_betweenness_centrality_subset(G, sources, targets, normalized=
        Social Networks 30(2):136-145, 2008.
        https://doi.org/10.1016/j.socnet.2007.11.001
     """
-    pass
+    b = dict.fromkeys(G, 0.0)  # b[v]=0 for v in G
+    b.update(dict.fromkeys(G.edges(), 0.0))  # b[e] for e in G.edges()
+    for s in sources:
+        # single source shortest paths
+        if weight is None:  # use BFS
+            S, P, sigma, _ = shortest_path(G, s)
+        else:  # use Dijkstra's algorithm
+            S, P, sigma, _ = dijkstra(G, s, weight)
+        b = _accumulate_edges_subset(b, S, P, sigma, s, targets)
+    for n in G:  # remove nodes to only return edges
+        del b[n]
+    b = _rescale_e(b, len(G), normalized=normalized, directed=G.is_directed())
+    if G.is_multigraph():
+        b = _add_edge_keys(G, b, weight=weight)
+    return b
+
+
+def _accumulate_subset(betweenness, S, P, sigma, s, targets):
+    delta = dict.fromkeys(S, 0.0)
+    target_set = set(targets) - {s}
+    while S:
+        w = S.pop()
+        if w in target_set:
+            coeff = (delta[w] + 1.0) / sigma[w]
+        else:
+            coeff = delta[w] / sigma[w]
+        for v in P[w]:
+            delta[v] += sigma[v] * coeff
+        if w != s:
+            betweenness[w] += delta[w]
+    return betweenness


 def _accumulate_edges_subset(betweenness, S, P, sigma, s, targets):
     """edge_betweenness_centrality_subset helper."""
-    pass
+    delta = dict.fromkeys(S, 0)
+    target_set = set(targets)
+    while S:
+        w = S.pop()
+        for v in P[w]:
+            if w in target_set:
+                c = (sigma[v] / sigma[w]) * (1.0 + delta[w])
+            else:
+                c = delta[w] / len(P[w])
+            if (v, w) not in betweenness:
+                betweenness[(w, v)] += c
+            else:
+                betweenness[(v, w)] += c
+            delta[v] += c
+        if w != s:
+            betweenness[w] += delta[w]
+    return betweenness


 def _rescale(betweenness, n, normalized, directed=False):
     """betweenness_centrality_subset helper."""
-    pass
+    if normalized:
+        if n <= 2:
+            scale = None  # no normalization b=0 for all nodes
+        else:
+            scale = 1.0 / ((n - 1) * (n - 2))
+    else:  # rescale by 2 for undirected graphs
+        if not directed:
+            scale = 0.5
+        else:
+            scale = None
+    if scale is not None:
+        for v in betweenness:
+            betweenness[v] *= scale
+    return betweenness


 def _rescale_e(betweenness, n, normalized, directed=False):
     """edge_betweenness_centrality_subset helper."""
-    pass
+    if normalized:
+        if n <= 1:
+            scale = None  # no normalization b=0 for all nodes
+        else:
+            scale = 1.0 / (n * (n - 1))
+    else:  # rescale by 2 for undirected graphs
+        if not directed:
+            scale = 0.5
+        else:
+            scale = None
+    if scale is not None:
+        for v in betweenness:
+            betweenness[v] *= scale
+    return betweenness
diff --git a/networkx/algorithms/centrality/closeness.py b/networkx/algorithms/centrality/closeness.py
index fa551c90c..1c1722d4e 100644
--- a/networkx/algorithms/centrality/closeness.py
+++ b/networkx/algorithms/centrality/closeness.py
@@ -2,22 +2,24 @@
 Closeness centrality measures.
 """
 import functools
+
 import networkx as nx
 from networkx.exception import NetworkXError
 from networkx.utils.decorators import not_implemented_for
-__all__ = ['closeness_centrality', 'incremental_closeness_centrality']
+
+__all__ = ["closeness_centrality", "incremental_closeness_centrality"]


-@nx._dispatchable(edge_attrs='distance')
+@nx._dispatchable(edge_attrs="distance")
 def closeness_centrality(G, u=None, distance=None, wf_improved=True):
-    """Compute closeness centrality for nodes.
+    r"""Compute closeness centrality for nodes.

     Closeness centrality [1]_ of a node `u` is the reciprocal of the
     average shortest path distance to `u` over all `n-1` reachable nodes.

     .. math::

-        C(u) = \\frac{n - 1}{\\sum_{v=1}^{n-1} d(v, u)},
+        C(u) = \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)},

     where `d(v, u)` is the shortest-path distance between `v` and `u`,
     and `n-1` is the number of nodes reachable from `u`. Notice that the
@@ -36,7 +38,7 @@ def closeness_centrality(G, u=None, distance=None, wf_improved=True):

     .. math::

-        C_{WF}(u) = \\frac{n-1}{N-1} \\frac{n - 1}{\\sum_{v=1}^{n-1} d(v, u)},
+        C_{WF}(u) = \frac{n-1}{N-1} \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)},

     Parameters
     ----------
@@ -101,14 +103,45 @@ def closeness_centrality(G, u=None, distance=None, wf_improved=True):
        Social Network Analysis: Methods and Applications, 1994,
        Cambridge University Press.
     """
-    pass
-
-
-@not_implemented_for('directed')
+    if G.is_directed():
+        G = G.reverse()  # create a reversed graph view
+
+    if distance is not None:
+        # use Dijkstra's algorithm with specified attribute as edge weight
+        path_length = functools.partial(
+            nx.single_source_dijkstra_path_length, weight=distance
+        )
+    else:
+        path_length = nx.single_source_shortest_path_length
+
+    if u is None:
+        nodes = G.nodes
+    else:
+        nodes = [u]
+    closeness_dict = {}
+    for n in nodes:
+        sp = path_length(G, n)
+        totsp = sum(sp.values())
+        len_G = len(G)
+        _closeness_centrality = 0.0
+        if totsp > 0.0 and len_G > 1:
+            _closeness_centrality = (len(sp) - 1.0) / totsp
+            # normalize to number of nodes-1 in connected part
+            if wf_improved:
+                s = (len(sp) - 1.0) / (len_G - 1)
+                _closeness_centrality *= s
+        closeness_dict[n] = _closeness_centrality
+    if u is not None:
+        return closeness_dict[u]
+    return closeness_dict
+
+
+@not_implemented_for("directed")
 @nx._dispatchable(mutates_input=True)
-def incremental_closeness_centrality(G, edge, prev_cc=None, insertion=True,
-    wf_improved=True):
-    """Incremental closeness centrality for nodes.
+def incremental_closeness_centrality(
+    G, edge, prev_cc=None, insertion=True, wf_improved=True
+):
+    r"""Incremental closeness centrality for nodes.

     Compute closeness centrality for nodes using level-based work filtering
     as described in Incremental Algorithms for Closeness Centrality by Sariyuce et al.
@@ -120,8 +153,8 @@ def incremental_closeness_centrality(G, edge, prev_cc=None, insertion=True,
     From "Incremental Algorithms for Closeness Centrality":

     Theorem 1: Let :math:`G = (V, E)` be a graph and u and v be two vertices in V
-    such that there is no edge (u, v) in E. Let :math:`G' = (V, E \\cup uv)`
-    Then :math:`cc[s] = cc'[s]` if and only if :math:`\\left|dG(s, u) - dG(s, v)\\right| \\leq 1`.
+    such that there is no edge (u, v) in E. Let :math:`G' = (V, E \cup uv)`
+    Then :math:`cc[s] = cc'[s]` if and only if :math:`\left|dG(s, u) - dG(s, v)\right| \leq 1`.

     Where :math:`dG(u, v)` denotes the length of the shortest path between
     two vertices u, v in a graph G, cc[s] is the closeness centrality for a
@@ -134,7 +167,7 @@ def incremental_closeness_centrality(G, edge, prev_cc=None, insertion=True,
     other nodes to u and to v before the node is added. When removing an edge,
     we compute the shortest path lengths after the edge is removed. Then we
     apply Theorem 1 to use previously computed closeness centrality for nodes
-    where :math:`\\left|dG(s, u) - dG(s, v)\\right| \\leq 1`. This works only for
+    where :math:`\left|dG(s, u) - dG(s, v)\right| \leq 1`. This works only for
     undirected, unweighted graphs; the distance argument is not supported.

     Closeness centrality [1]_ of a node `u` is the reciprocal of the
@@ -145,7 +178,7 @@ def incremental_closeness_centrality(G, edge, prev_cc=None, insertion=True,

     .. math::

-        C(u) = \\frac{n - 1}{\\sum_{v=1}^{n-1} d(v, u)},
+        C(u) = \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)},

     where `d(v, u)` is the shortest-path distance between `v` and `u`,
     and `n` is the number of nodes in the graph.
@@ -198,4 +231,51 @@ def incremental_closeness_centrality(G, edge, prev_cc=None, insertion=True,
        Algorithms for Closeness Centrality. 2013 IEEE International Conference on Big Data
        http://sariyuce.com/papers/bigdata13.pdf
     """
-    pass
+    if prev_cc is not None and set(prev_cc.keys()) != set(G.nodes()):
+        raise NetworkXError("prev_cc and G do not have the same nodes")
+
+    # Unpack edge
+    (u, v) = edge
+    path_length = nx.single_source_shortest_path_length
+
+    if insertion:
+        # For edge insertion, we want shortest paths before the edge is inserted
+        du = path_length(G, u)
+        dv = path_length(G, v)
+
+        G.add_edge(u, v)
+    else:
+        G.remove_edge(u, v)
+
+        # For edge removal, we want shortest paths after the edge is removed
+        du = path_length(G, u)
+        dv = path_length(G, v)
+
+    if prev_cc is None:
+        return nx.closeness_centrality(G)
+
+    nodes = G.nodes()
+    closeness_dict = {}
+    for n in nodes:
+        if n in du and n in dv and abs(du[n] - dv[n]) <= 1:
+            closeness_dict[n] = prev_cc[n]
+        else:
+            sp = path_length(G, n)
+            totsp = sum(sp.values())
+            len_G = len(G)
+            _closeness_centrality = 0.0
+            if totsp > 0.0 and len_G > 1:
+                _closeness_centrality = (len(sp) - 1.0) / totsp
+                # normalize to number of nodes-1 in connected part
+                if wf_improved:
+                    s = (len(sp) - 1.0) / (len_G - 1)
+                    _closeness_centrality *= s
+            closeness_dict[n] = _closeness_centrality
+
+    # Leave the graph as we found it
+    if insertion:
+        G.remove_edge(u, v)
+    else:
+        G.add_edge(u, v)
+
+    return closeness_dict
diff --git a/networkx/algorithms/centrality/current_flow_betweenness.py b/networkx/algorithms/centrality/current_flow_betweenness.py
index 017afd931..b79a4c801 100644
--- a/networkx/algorithms/centrality/current_flow_betweenness.py
+++ b/networkx/algorithms/centrality/current_flow_betweenness.py
@@ -1,19 +1,38 @@
 """Current-flow betweenness centrality measures."""
 import networkx as nx
-from networkx.algorithms.centrality.flow_matrix import CGInverseLaplacian, FullInverseLaplacian, SuperLUInverseLaplacian, flow_matrix_row
-from networkx.utils import not_implemented_for, py_random_state, reverse_cuthill_mckee_ordering
-__all__ = ['current_flow_betweenness_centrality',
-    'approximate_current_flow_betweenness_centrality',
-    'edge_current_flow_betweenness_centrality']
-
-
-@not_implemented_for('directed')
+from networkx.algorithms.centrality.flow_matrix import (
+    CGInverseLaplacian,
+    FullInverseLaplacian,
+    SuperLUInverseLaplacian,
+    flow_matrix_row,
+)
+from networkx.utils import (
+    not_implemented_for,
+    py_random_state,
+    reverse_cuthill_mckee_ordering,
+)
+
+__all__ = [
+    "current_flow_betweenness_centrality",
+    "approximate_current_flow_betweenness_centrality",
+    "edge_current_flow_betweenness_centrality",
+]
+
+
+@not_implemented_for("directed")
 @py_random_state(7)
-@nx._dispatchable(edge_attrs='weight')
-def approximate_current_flow_betweenness_centrality(G, normalized=True,
-    weight=None, dtype=float, solver='full', epsilon=0.5, kmax=10000, seed=None
-    ):
-    """Compute the approximate current-flow betweenness centrality for nodes.
+@nx._dispatchable(edge_attrs="weight")
+def approximate_current_flow_betweenness_centrality(
+    G,
+    normalized=True,
+    weight=None,
+    dtype=float,
+    solver="full",
+    epsilon=0.5,
+    kmax=10000,
+    seed=None,
+):
+    r"""Compute the approximate current-flow betweenness centrality for nodes.

     Approximates the current-flow betweenness centrality within absolute
     error of epsilon with high probability [1]_.
@@ -64,7 +83,7 @@ def approximate_current_flow_betweenness_centrality(G, normalized=True,

     Notes
     -----
-    The running time is $O((1/\\epsilon^2)m{\\sqrt k} \\log n)$
+    The running time is $O((1/\epsilon^2)m{\sqrt k} \log n)$
     and the space required is $O(m)$ for $n$ nodes and $m$ edges.

     If the edges have a 'weight' attribute they will be used as
@@ -78,14 +97,58 @@ def approximate_current_flow_betweenness_centrality(G, normalized=True,
        LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
        https://doi.org/10.1007/978-3-540-31856-9_44
     """
-    pass
-
-
-@not_implemented_for('directed')
-@nx._dispatchable(edge_attrs='weight')
-def current_flow_betweenness_centrality(G, normalized=True, weight=None,
-    dtype=float, solver='full'):
-    """Compute current-flow betweenness centrality for nodes.
+    import numpy as np
+
+    if not nx.is_connected(G):
+        raise nx.NetworkXError("Graph not connected.")
+    solvername = {
+        "full": FullInverseLaplacian,
+        "lu": SuperLUInverseLaplacian,
+        "cg": CGInverseLaplacian,
+    }
+    n = G.number_of_nodes()
+    ordering = list(reverse_cuthill_mckee_ordering(G))
+    # make a copy with integer labels according to rcm ordering
+    # this could be done without a copy if we really wanted to
+    H = nx.relabel_nodes(G, dict(zip(ordering, range(n))))
+    L = nx.laplacian_matrix(H, nodelist=range(n), weight=weight).asformat("csc")
+    L = L.astype(dtype)
+    C = solvername[solver](L, dtype=dtype)  # initialize solver
+    betweenness = dict.fromkeys(H, 0.0)
+    nb = (n - 1.0) * (n - 2.0)  # normalization factor
+    cstar = n * (n - 1) / nb
+    l = 1  # parameter in approximation, adjustable
+    k = l * int(np.ceil((cstar / epsilon) ** 2 * np.log(n)))
+    if k > kmax:
+        msg = f"Number random pairs k>kmax ({k}>{kmax}) "
+        raise nx.NetworkXError(msg, "Increase kmax or epsilon")
+    cstar2k = cstar / (2 * k)
+    for _ in range(k):
+        s, t = pair = seed.sample(range(n), 2)
+        b = np.zeros(n, dtype=dtype)
+        b[s] = 1
+        b[t] = -1
+        p = C.solve(b)
+        for v in H:
+            if v in pair:
+                continue
+            for nbr in H[v]:
+                w = H[v][nbr].get(weight, 1.0)
+                betweenness[v] += float(w * np.abs(p[v] - p[nbr]) * cstar2k)
+    if normalized:
+        factor = 1.0
+    else:
+        factor = nb / 2.0
+    # remap to original node names and "unnormalize" if required
+    return {ordering[k]: v * factor for k, v in betweenness.items()}
+
+
+@not_implemented_for("directed")
+@nx._dispatchable(edge_attrs="weight")
+def current_flow_betweenness_centrality(
+    G, normalized=True, weight=None, dtype=float, solver="full"
+):
+    r"""Compute current-flow betweenness centrality for nodes.

     Current-flow betweenness centrality uses an electrical current
     model for information spreading in contrast to betweenness
@@ -132,10 +195,10 @@ def current_flow_betweenness_centrality(G, normalized=True, weight=None,

     Notes
     -----
-    Current-flow betweenness can be computed in  $O(I(n-1)+mn \\log n)$
+    Current-flow betweenness can be computed in  $O(I(n-1)+mn \log n)$
     time [1]_, where $I(n-1)$ is the time needed to compute the
     inverse Laplacian.  For a full matrix this is $O(n^3)$ but using
-    sparse methods you can achieve $O(nm{\\sqrt k})$ where $k$ is the
+    sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the
     Laplacian matrix condition number.

     The space required is $O(nw)$ where $w$ is the width of the sparse
@@ -155,14 +218,32 @@ def current_flow_betweenness_centrality(G, normalized=True, weight=None,
     .. [2] A measure of betweenness centrality based on random walks,
        M. E. J. Newman, Social Networks 27, 39-54 (2005).
     """
-    pass
-
-
-@not_implemented_for('directed')
-@nx._dispatchable(edge_attrs='weight')
-def edge_current_flow_betweenness_centrality(G, normalized=True, weight=
-    None, dtype=float, solver='full'):
-    """Compute current-flow betweenness centrality for edges.
+    if not nx.is_connected(G):
+        raise nx.NetworkXError("Graph not connected.")
+    N = G.number_of_nodes()
+    ordering = list(reverse_cuthill_mckee_ordering(G))
+    # make a copy with integer labels according to rcm ordering
+    # this could be done without a copy if we really wanted to
+    H = nx.relabel_nodes(G, dict(zip(ordering, range(N))))
+    betweenness = dict.fromkeys(H, 0.0)  # b[n]=0 for n in H
+    for row, (s, t) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver):
+        pos = dict(zip(row.argsort()[::-1], range(N)))
+        for i in range(N):
+            betweenness[s] += (i - pos[i]) * row.item(i)
+            betweenness[t] += (N - i - 1 - pos[i]) * row.item(i)
+    if normalized:
+        nb = (N - 1.0) * (N - 2.0)  # normalization factor
+    else:
+        nb = 2.0
+    return {ordering[n]: (b - n) * 2.0 / nb for n, b in betweenness.items()}
+
+
+@not_implemented_for("directed")
+@nx._dispatchable(edge_attrs="weight")
+def edge_current_flow_betweenness_centrality(
+    G, normalized=True, weight=None, dtype=float, solver="full"
+):
+    r"""Compute current-flow betweenness centrality for edges.

     Current-flow betweenness centrality uses an electrical current
     model for information spreading in contrast to betweenness
@@ -215,10 +296,10 @@ def edge_current_flow_betweenness_centrality(G, normalized=True, weight=

     Notes
     -----
-    Current-flow betweenness can be computed in $O(I(n-1)+mn \\log n)$
+    Current-flow betweenness can be computed in $O(I(n-1)+mn \log n)$
     time [1]_, where $I(n-1)$ is the time needed to compute the
     inverse Laplacian.  For a full matrix this is $O(n^3)$ but using
-    sparse methods you can achieve $O(nm{\\sqrt k})$ where $k$ is the
+    sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the
     Laplacian matrix condition number.

     The space required is $O(nw)$ where $w$ is the width of the sparse
@@ -238,4 +319,23 @@ def edge_current_flow_betweenness_centrality(G, normalized=True, weight=
     .. [2] A measure of betweenness centrality based on random walks,
        M. E. J. Newman, Social Networks 27, 39-54 (2005).
     """
-    pass
+    if not nx.is_connected(G):
+        raise nx.NetworkXError("Graph not connected.")
+    N = G.number_of_nodes()
+    ordering = list(reverse_cuthill_mckee_ordering(G))
+    # make a copy with integer labels according to rcm ordering
+    # this could be done without a copy if we really wanted to
+    H = nx.relabel_nodes(G, dict(zip(ordering, range(N))))
+    edges = (tuple(sorted((u, v))) for u, v in H.edges())
+    betweenness = dict.fromkeys(edges, 0.0)
+    if normalized:
+        nb = (N - 1.0) * (N - 2.0)  # normalization factor
+    else:
+        nb = 2.0
+    for row, (e) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver):
+        pos = dict(zip(row.argsort()[::-1], range(1, N + 1)))
+        for i in range(N):
+            betweenness[e] += (i + 1 - pos[i]) * row.item(i)
+            betweenness[e] += (N - i - pos[i]) * row.item(i)
+        betweenness[e] /= nb
+    return {(ordering[s], ordering[t]): b for (s, t), b in betweenness.items()}
diff --git a/networkx/algorithms/centrality/current_flow_betweenness_subset.py b/networkx/algorithms/centrality/current_flow_betweenness_subset.py
index d52c70fb8..c6790b218 100644
--- a/networkx/algorithms/centrality/current_flow_betweenness_subset.py
+++ b/networkx/algorithms/centrality/current_flow_betweenness_subset.py
@@ -2,15 +2,19 @@
 import networkx as nx
 from networkx.algorithms.centrality.flow_matrix import flow_matrix_row
 from networkx.utils import not_implemented_for, reverse_cuthill_mckee_ordering
-__all__ = ['current_flow_betweenness_centrality_subset',
-    'edge_current_flow_betweenness_centrality_subset']

+__all__ = [
+    "current_flow_betweenness_centrality_subset",
+    "edge_current_flow_betweenness_centrality_subset",
+]

-@not_implemented_for('directed')
-@nx._dispatchable(edge_attrs='weight')
-def current_flow_betweenness_centrality_subset(G, sources, targets,
-    normalized=True, weight=None, dtype=float, solver='lu'):
-    """Compute current-flow betweenness centrality for subsets of nodes.
+
+@not_implemented_for("directed")
+@nx._dispatchable(edge_attrs="weight")
+def current_flow_betweenness_centrality_subset(
+    G, sources, targets, normalized=True, weight=None, dtype=float, solver="lu"
+):
+    r"""Compute current-flow betweenness centrality for subsets of nodes.

     Current-flow betweenness centrality uses an electrical current
     model for information spreading in contrast to betweenness
@@ -63,10 +67,10 @@ def current_flow_betweenness_centrality_subset(G, sources, targets,

     Notes
     -----
-    Current-flow betweenness can be computed in $O(I(n-1)+mn \\log n)$
+    Current-flow betweenness can be computed in $O(I(n-1)+mn \log n)$
     time [1]_, where $I(n-1)$ is the time needed to compute the
     inverse Laplacian.  For a full matrix this is $O(n^3)$ but using
-    sparse methods you can achieve $O(nm{\\sqrt k})$ where $k$ is the
+    sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the
     Laplacian matrix condition number.

     The space required is $O(nw)$ where $w$ is the width of the sparse
@@ -86,14 +90,41 @@ def current_flow_betweenness_centrality_subset(G, sources, targets,
     .. [2] A measure of betweenness centrality based on random walks,
        M. E. J. Newman, Social Networks 27, 39-54 (2005).
     """
-    pass
-
-
-@not_implemented_for('directed')
-@nx._dispatchable(edge_attrs='weight')
-def edge_current_flow_betweenness_centrality_subset(G, sources, targets,
-    normalized=True, weight=None, dtype=float, solver='lu'):
-    """Compute current-flow betweenness centrality for edges using subsets
+    import numpy as np
+
+    from networkx.utils import reverse_cuthill_mckee_ordering
+
+    if not nx.is_connected(G):
+        raise nx.NetworkXError("Graph not connected.")
+    N = G.number_of_nodes()
+    ordering = list(reverse_cuthill_mckee_ordering(G))
+    # make a copy with integer labels according to rcm ordering
+    # this could be done without a copy if we really wanted to
+    mapping = dict(zip(ordering, range(N)))
+    H = nx.relabel_nodes(G, mapping)
+    betweenness = dict.fromkeys(H, 0.0)  # b[n]=0 for n in H
+    for row, (s, t) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver):
+        for ss in sources:
+            i = mapping[ss]
+            for tt in targets:
+                j = mapping[tt]
+                betweenness[s] += 0.5 * abs(row.item(i) - row.item(j))
+                betweenness[t] += 0.5 * abs(row.item(i) - row.item(j))
+    if normalized:
+        nb = (N - 1.0) * (N - 2.0)  # normalization factor
+    else:
+        nb = 2.0
+    for node in H:
+        betweenness[node] = betweenness[node] / nb + 1.0 / (2 - N)
+    return {ordering[node]: value for node, value in betweenness.items()}
+
+
+@not_implemented_for("directed")
+@nx._dispatchable(edge_attrs="weight")
+def edge_current_flow_betweenness_centrality_subset(
+    G, sources, targets, normalized=True, weight=None, dtype=float, solver="lu"
+):
+    r"""Compute current-flow betweenness centrality for edges using subsets
     of nodes.

     Current-flow betweenness centrality uses an electrical current
@@ -146,10 +177,10 @@ def edge_current_flow_betweenness_centrality_subset(G, sources, targets,

     Notes
     -----
-    Current-flow betweenness can be computed in $O(I(n-1)+mn \\log n)$
+    Current-flow betweenness can be computed in $O(I(n-1)+mn \log n)$
     time [1]_, where $I(n-1)$ is the time needed to compute the
     inverse Laplacian.  For a full matrix this is $O(n^3)$ but using
-    sparse methods you can achieve $O(nm{\\sqrt k})$ where $k$ is the
+    sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the
     Laplacian matrix condition number.

     The space required is $O(nw)$ where $w$ is the width of the sparse
@@ -169,4 +200,27 @@ def edge_current_flow_betweenness_centrality_subset(G, sources, targets,
     .. [2] A measure of betweenness centrality based on random walks,
        M. E. J. Newman, Social Networks 27, 39-54 (2005).
     """
-    pass
+    import numpy as np
+
+    if not nx.is_connected(G):
+        raise nx.NetworkXError("Graph not connected.")
+    N = G.number_of_nodes()
+    ordering = list(reverse_cuthill_mckee_ordering(G))
+    # make a copy with integer labels according to rcm ordering
+    # this could be done without a copy if we really wanted to
+    mapping = dict(zip(ordering, range(N)))
+    H = nx.relabel_nodes(G, mapping)
+    edges = (tuple(sorted((u, v))) for u, v in H.edges())
+    betweenness = dict.fromkeys(edges, 0.0)
+    if normalized:
+        nb = (N - 1.0) * (N - 2.0)  # normalization factor
+    else:
+        nb = 2.0
+    for row, (e) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver):
+        for ss in sources:
+            i = mapping[ss]
+            for tt in targets:
+                j = mapping[tt]
+                betweenness[e] += 0.5 * abs(row.item(i) - row.item(j))
+        betweenness[e] /= nb
+    return {(ordering[s], ordering[t]): value for (s, t), value in betweenness.items()}
diff --git a/networkx/algorithms/centrality/current_flow_closeness.py b/networkx/algorithms/centrality/current_flow_closeness.py
index f6d4156d9..92c892f74 100644
--- a/networkx/algorithms/centrality/current_flow_closeness.py
+++ b/networkx/algorithms/centrality/current_flow_closeness.py
@@ -1,14 +1,18 @@
 """Current-flow closeness centrality measures."""
 import networkx as nx
-from networkx.algorithms.centrality.flow_matrix import CGInverseLaplacian, FullInverseLaplacian, SuperLUInverseLaplacian
+from networkx.algorithms.centrality.flow_matrix import (
+    CGInverseLaplacian,
+    FullInverseLaplacian,
+    SuperLUInverseLaplacian,
+)
 from networkx.utils import not_implemented_for, reverse_cuthill_mckee_ordering
-__all__ = ['current_flow_closeness_centrality', 'information_centrality']

+__all__ = ["current_flow_closeness_centrality", "information_centrality"]

-@not_implemented_for('directed')
-@nx._dispatchable(edge_attrs='weight')
-def current_flow_closeness_centrality(G, weight=None, dtype=float, solver='lu'
-    ):
+
+@not_implemented_for("directed")
+@nx._dispatchable(edge_attrs="weight")
+def current_flow_closeness_centrality(G, weight=None, dtype=float, solver="lu"):
     """Compute current-flow closeness centrality for nodes.

     Current-flow closeness centrality is variant of closeness
@@ -63,7 +67,29 @@ def current_flow_closeness_centrality(G, weight=None, dtype=float, solver='lu'
        Social Networks 11(1):1-37, 1989.
        https://doi.org/10.1016/0378-8733(89)90016-6
     """
-    pass
+    if not nx.is_connected(G):
+        raise nx.NetworkXError("Graph not connected.")
+    solvername = {
+        "full": FullInverseLaplacian,
+        "lu": SuperLUInverseLaplacian,
+        "cg": CGInverseLaplacian,
+    }
+    N = G.number_of_nodes()
+    ordering = list(reverse_cuthill_mckee_ordering(G))
+    # make a copy with integer labels according to rcm ordering
+    # this could be done without a copy if we really wanted to
+    H = nx.relabel_nodes(G, dict(zip(ordering, range(N))))
+    betweenness = dict.fromkeys(H, 0.0)  # b[n]=0 for n in H
+    N = H.number_of_nodes()
+    L = nx.laplacian_matrix(H, nodelist=range(N), weight=weight).asformat("csc")
+    L = L.astype(dtype)
+    C2 = solvername[solver](L, width=1, dtype=dtype)  # initialize solver
+    for v in H:
+        col = C2.get_row(v)
+        for w in H:
+            betweenness[v] += col.item(v) - 2 * col.item(w)
+            betweenness[w] += col.item(v)
+    return {ordering[node]: 1 / value for node, value in betweenness.items()}


 information_centrality = current_flow_closeness_centrality
diff --git a/networkx/algorithms/centrality/degree_alg.py b/networkx/algorithms/centrality/degree_alg.py
index 9bb653827..ea53f41ea 100644
--- a/networkx/algorithms/centrality/degree_alg.py
+++ b/networkx/algorithms/centrality/degree_alg.py
@@ -1,8 +1,8 @@
 """Degree centrality measures."""
 import networkx as nx
 from networkx.utils.decorators import not_implemented_for
-__all__ = ['degree_centrality', 'in_degree_centrality', 'out_degree_centrality'
-    ]
+
+__all__ = ["degree_centrality", "in_degree_centrality", "out_degree_centrality"]


 @nx._dispatchable
@@ -41,10 +41,15 @@ def degree_centrality(G):
     be higher than n-1 and values of degree centrality greater than 1
     are possible.
     """
-    pass
+    if len(G) <= 1:
+        return {n: 1 for n in G}
+
+    s = 1.0 / (len(G) - 1.0)
+    centrality = {n: d * s for n, d in G.degree()}
+    return centrality


-@not_implemented_for('undirected')
+@not_implemented_for("undirected")
 @nx._dispatchable
 def in_degree_centrality(G):
     """Compute the in-degree centrality for nodes.
@@ -86,10 +91,15 @@ def in_degree_centrality(G):
     be higher than n-1 and values of degree centrality greater than 1
     are possible.
     """
-    pass
+    if len(G) <= 1:
+        return {n: 1 for n in G}

+    s = 1.0 / (len(G) - 1.0)
+    centrality = {n: d * s for n, d in G.in_degree()}
+    return centrality

-@not_implemented_for('undirected')
+
+@not_implemented_for("undirected")
 @nx._dispatchable
 def out_degree_centrality(G):
     """Compute the out-degree centrality for nodes.
@@ -131,4 +141,9 @@ def out_degree_centrality(G):
     be higher than n-1 and values of degree centrality greater than 1
     are possible.
     """
-    pass
+    if len(G) <= 1:
+        return {n: 1 for n in G}
+
+    s = 1.0 / (len(G) - 1.0)
+    centrality = {n: d * s for n, d in G.out_degree()}
+    return centrality
diff --git a/networkx/algorithms/centrality/dispersion.py b/networkx/algorithms/centrality/dispersion.py
index 941a49e83..a3fa68583 100644
--- a/networkx/algorithms/centrality/dispersion.py
+++ b/networkx/algorithms/centrality/dispersion.py
@@ -1,11 +1,13 @@
 from itertools import combinations
+
 import networkx as nx
-__all__ = ['dispersion']
+
+__all__ = ["dispersion"]


 @nx._dispatchable
 def dispersion(G, u=None, v=None, normalized=True, alpha=1.0, b=0.0, c=0.0):
-    """Calculate dispersion between `u` and `v` in `G`.
+    r"""Calculate dispersion between `u` and `v` in `G`.

     A link between two actors (`u` and `v`) has a high dispersion when their
     mutual ties (`s` and `t`) are not well connected with each other.
@@ -51,4 +53,55 @@ def dispersion(G, u=None, v=None, normalized=True, alpha=1.0, b=0.0, c=0.0):
         https://arxiv.org/pdf/1310.6753v1.pdf

     """
-    pass
+
+    def _dispersion(G_u, u, v):
+        """dispersion for all nodes 'v' in a ego network G_u of node 'u'"""
+        u_nbrs = set(G_u[u])
+        ST = {n for n in G_u[v] if n in u_nbrs}
+        set_uv = {u, v}
+        # all possible ties of connections that u and b share
+        possib = combinations(ST, 2)
+        total = 0
+        for s, t in possib:
+            # neighbors of s that are in G_u, not including u and v
+            nbrs_s = u_nbrs.intersection(G_u[s]) - set_uv
+            # s and t are not directly connected
+            if t not in nbrs_s:
+                # s and t do not share a connection
+                if nbrs_s.isdisjoint(G_u[t]):
+                    # tick for disp(u, v)
+                    total += 1
+        # neighbors that u and v share
+        embeddedness = len(ST)
+
+        dispersion_val = total
+        if normalized:
+            dispersion_val = (total + b) ** alpha
+            if embeddedness + c != 0:
+                dispersion_val /= embeddedness + c
+
+        return dispersion_val
+
+    if u is None:
+        # v and u are not specified
+        if v is None:
+            results = {n: {} for n in G}
+            for u in G:
+                for v in G[u]:
+                    results[u][v] = _dispersion(G, u, v)
+        # u is not specified, but v is
+        else:
+            results = dict.fromkeys(G[v], {})
+            for u in G[v]:
+                results[u] = _dispersion(G, v, u)
+    else:
+        # u is specified with no target v
+        if v is None:
+            results = dict.fromkeys(G[u], {})
+            for v in G[u]:
+                results[v] = _dispersion(G, u, v)
+        # both u and v are specified
+        else:
+            results = _dispersion(G, u, v)
+
+    return results
diff --git a/networkx/algorithms/centrality/eigenvector.py b/networkx/algorithms/centrality/eigenvector.py
index 9d142ad9d..ed57b2aeb 100644
--- a/networkx/algorithms/centrality/eigenvector.py
+++ b/networkx/algorithms/centrality/eigenvector.py
@@ -1,44 +1,45 @@
 """Functions for computing eigenvector centrality."""
 import math
+
 import networkx as nx
 from networkx.utils import not_implemented_for
-__all__ = ['eigenvector_centrality', 'eigenvector_centrality_numpy']
+
+__all__ = ["eigenvector_centrality", "eigenvector_centrality_numpy"]


-@not_implemented_for('multigraph')
-@nx._dispatchable(edge_attrs='weight')
-def eigenvector_centrality(G, max_iter=100, tol=1e-06, nstart=None, weight=None
-    ):
-    """Compute the eigenvector centrality for the graph G.
+@not_implemented_for("multigraph")
+@nx._dispatchable(edge_attrs="weight")
+def eigenvector_centrality(G, max_iter=100, tol=1.0e-6, nstart=None, weight=None):
+    r"""Compute the eigenvector centrality for the graph G.

     Eigenvector centrality computes the centrality for a node by adding
     the centrality of its predecessors. The centrality for node $i$ is the
-    $i$-th element of a left eigenvector associated with the eigenvalue $\\lambda$
+    $i$-th element of a left eigenvector associated with the eigenvalue $\lambda$
     of maximum modulus that is positive. Such an eigenvector $x$ is
     defined up to a multiplicative constant by the equation

     .. math::

-         \\lambda x^T = x^T A,
+         \lambda x^T = x^T A,

     where $A$ is the adjacency matrix of the graph G. By definition of
     row-column product, the equation above is equivalent to

     .. math::

-        \\lambda x_i = \\sum_{j\\to i}x_j.
+        \lambda x_i = \sum_{j\to i}x_j.

     That is, adding the eigenvector centralities of the predecessors of
     $i$ one obtains the eigenvector centrality of $i$ multiplied by
-    $\\lambda$. In the case of undirected graphs, $x$ also solves the familiar
-    right-eigenvector equation $Ax = \\lambda x$.
+    $\lambda$. In the case of undirected graphs, $x$ also solves the familiar
+    right-eigenvector equation $Ax = \lambda x$.

     By virtue of the Perron–Frobenius theorem [1]_, if G is strongly
     connected there is a unique eigenvector $x$, and all its entries
     are strictly positive.

     If G is not strongly connected there might be several left
-    eigenvectors associated with $\\lambda$, and some of their elements
+    eigenvectors associated with $\lambda$, and some of their elements
     might be zero.

     Parameters
@@ -157,41 +158,73 @@ def eigenvector_centrality(G, max_iter=100, tol=1e-06, nstart=None, weight=None
     .. [7] Power iteration:: https://en.wikipedia.org/wiki/Power_iteration

     """
-    pass
-
-
-@nx._dispatchable(edge_attrs='weight')
+    if len(G) == 0:
+        raise nx.NetworkXPointlessConcept(
+            "cannot compute centrality for the null graph"
+        )
+    # If no initial vector is provided, start with the all-ones vector.
+    if nstart is None:
+        nstart = {v: 1 for v in G}
+    if all(v == 0 for v in nstart.values()):
+        raise nx.NetworkXError("initial vector cannot have all zero values")
+    # Normalize the initial vector so that each entry is in [0, 1]. This is
+    # guaranteed to never have a divide-by-zero error by the previous line.
+    nstart_sum = sum(nstart.values())
+    x = {k: v / nstart_sum for k, v in nstart.items()}
+    nnodes = G.number_of_nodes()
+    # make up to max_iter iterations
+    for _ in range(max_iter):
+        xlast = x
+        x = xlast.copy()  # Start with xlast times I to iterate with (A+I)
+        # do the multiplication y^T = x^T A (left eigenvector)
+        for n in x:
+            for nbr in G[n]:
+                w = G[n][nbr].get(weight, 1) if weight else 1
+                x[nbr] += xlast[n] * w
+        # Normalize the vector. The normalization denominator `norm`
+        # should never be zero by the Perron--Frobenius
+        # theorem. However, in case it is due to numerical error, we
+        # assume the norm to be one instead.
+        norm = math.hypot(*x.values()) or 1
+        x = {k: v / norm for k, v in x.items()}
+        # Check for convergence (in the L_1 norm).
+        if sum(abs(x[n] - xlast[n]) for n in x) < nnodes * tol:
+            return x
+    raise nx.PowerIterationFailedConvergence(max_iter)
+
+
+@nx._dispatchable(edge_attrs="weight")
 def eigenvector_centrality_numpy(G, weight=None, max_iter=50, tol=0):
-    """Compute the eigenvector centrality for the graph G.
+    r"""Compute the eigenvector centrality for the graph G.

     Eigenvector centrality computes the centrality for a node by adding
     the centrality of its predecessors. The centrality for node $i$ is the
-    $i$-th element of a left eigenvector associated with the eigenvalue $\\lambda$
+    $i$-th element of a left eigenvector associated with the eigenvalue $\lambda$
     of maximum modulus that is positive. Such an eigenvector $x$ is
     defined up to a multiplicative constant by the equation

     .. math::

-         \\lambda x^T = x^T A,
+         \lambda x^T = x^T A,

     where $A$ is the adjacency matrix of the graph G. By definition of
     row-column product, the equation above is equivalent to

     .. math::

-        \\lambda x_i = \\sum_{j\\to i}x_j.
+        \lambda x_i = \sum_{j\to i}x_j.

     That is, adding the eigenvector centralities of the predecessors of
     $i$ one obtains the eigenvector centrality of $i$ multiplied by
-    $\\lambda$. In the case of undirected graphs, $x$ also solves the familiar
-    right-eigenvector equation $Ax = \\lambda x$.
+    $\lambda$. In the case of undirected graphs, $x$ also solves the familiar
+    right-eigenvector equation $Ax = \lambda x$.

     By virtue of the Perron–Frobenius theorem [1]_, if G is strongly
     connected there is a unique eigenvector $x$, and all its entries
     are strictly positive.

     If G is not strongly connected there might be several left
-    eigenvectors associated with $\\lambda$, and some of their elements
+    eigenvectors associated with $\lambda$, and some of their elements
     might be zero.

     Parameters
@@ -292,4 +325,17 @@ def eigenvector_centrality_numpy(G, weight=None, max_iter=50, tol=0):
     .. [7] Arnoldi iteration:: https://en.wikipedia.org/wiki/Arnoldi_iteration

     """
-    pass
+    import numpy as np
+    import scipy as sp
+
+    if len(G) == 0:
+        raise nx.NetworkXPointlessConcept(
+            "cannot compute centrality for the null graph"
+        )
+    M = nx.to_scipy_sparse_array(G, nodelist=list(G), weight=weight, dtype=float)
+    _, eigenvector = sp.sparse.linalg.eigs(
+        M.T, k=1, which="LR", maxiter=max_iter, tol=tol
+    )
+    largest = eigenvector.flatten().real
+    norm = np.sign(largest.sum()) * sp.linalg.norm(largest)
+    return dict(zip(G, (largest / norm).tolist()))
diff --git a/networkx/algorithms/centrality/flow_matrix.py b/networkx/algorithms/centrality/flow_matrix.py
index 406d0bc08..3874f6b2f 100644
--- a/networkx/algorithms/centrality/flow_matrix.py
+++ b/networkx/algorithms/centrality/flow_matrix.py
@@ -1,12 +1,44 @@
+# Helpers for current-flow betweenness and current-flow closeness
+# Lazy computations for inverse Laplacian and flow-matrix rows.
 import networkx as nx


-class InverseLaplacian:
+@nx._dispatchable(edge_attrs="weight")
+def flow_matrix_row(G, weight=None, dtype=float, solver="lu"):
+    # Generate a row of the current-flow matrix
+    import numpy as np
+
+    solvername = {
+        "full": FullInverseLaplacian,
+        "lu": SuperLUInverseLaplacian,
+        "cg": CGInverseLaplacian,
+    }
+    n = G.number_of_nodes()
+    L = nx.laplacian_matrix(G, nodelist=range(n), weight=weight).asformat("csc")
+    L = L.astype(dtype)
+    C = solvername[solver](L, dtype=dtype)  # initialize solver
+    w = C.w  # w is the Laplacian matrix width
+    # row-by-row flow matrix
+    for u, v in sorted(sorted((u, v)) for u, v in G.edges()):
+        B = np.zeros(w, dtype=dtype)
+        c = G[u][v].get(weight, 1.0)
+        B[u % w] = c
+        B[v % w] = -c
+        # get only the rows needed in the inverse laplacian
+        # and multiply to get the flow matrix row
+        row = B @ C.get_rows(u, v)
+        yield row, (u, v)
+

+# Class to compute the inverse laplacian only for specified rows
+# Allows computation of the current-flow matrix without storing entire
+# inverse laplacian matrix
+class InverseLaplacian:
     def __init__(self, L, width=None, dtype=None):
         global np
         import numpy as np
-        n, n = L.shape
+
+        (n, n) = L.shape
         self.dtype = dtype
         self.n = n
         if width is None:
@@ -17,14 +49,82 @@ class InverseLaplacian:
         self.L1 = L[1:, 1:]
         self.init_solver(L)

+    def init_solver(self, L):
+        pass
+
+    def solve(self, r):
+        raise nx.NetworkXError("Implement solver")
+
+    def solve_inverse(self, r):
+        raise nx.NetworkXError("Implement solver")
+
+    def get_rows(self, r1, r2):
+        for r in range(r1, r2 + 1):
+            self.C[r % self.w, 1:] = self.solve_inverse(r)
+        return self.C
+
+    def get_row(self, r):
+        self.C[r % self.w, 1:] = self.solve_inverse(r)
+        return self.C[r % self.w]
+
+    def width(self, L):
+        m = 0
+        for i, row in enumerate(L):
+            w = 0
+            x, y = np.nonzero(row)
+            if len(y) > 0:
+                v = y - i
+                w = v.max() - v.min() + 1
+                m = max(w, m)
+        return m
+

 class FullInverseLaplacian(InverseLaplacian):
-    pass
+    def init_solver(self, L):
+        self.IL = np.zeros(L.shape, dtype=self.dtype)
+        self.IL[1:, 1:] = np.linalg.inv(self.L1.todense())
+
+    def solve(self, rhs):
+        s = np.zeros(rhs.shape, dtype=self.dtype)
+        s = self.IL @ rhs
+        return s
+
+    def solve_inverse(self, r):
+        return self.IL[r, 1:]


 class SuperLUInverseLaplacian(InverseLaplacian):
-    pass
+    def init_solver(self, L):
+        import scipy as sp
+
+        self.lusolve = sp.sparse.linalg.factorized(self.L1.tocsc())
+
+    def solve_inverse(self, r):
+        rhs = np.zeros(self.n, dtype=self.dtype)
+        rhs[r] = 1
+        return self.lusolve(rhs[1:])
+
+    def solve(self, rhs):
+        s = np.zeros(rhs.shape, dtype=self.dtype)
+        s[1:] = self.lusolve(rhs[1:])
+        return s


 class CGInverseLaplacian(InverseLaplacian):
-    pass
+    def init_solver(self, L):
+        global sp
+        import scipy as sp
+
+        ilu = sp.sparse.linalg.spilu(self.L1.tocsc())
+        n = self.n - 1
+        self.M = sp.sparse.linalg.LinearOperator(shape=(n, n), matvec=ilu.solve)
+
+    def solve(self, rhs):
+        s = np.zeros(rhs.shape, dtype=self.dtype)
+        s[1:] = sp.sparse.linalg.cg(self.L1, rhs[1:], M=self.M, atol=0)[0]
+        return s
+
+    def solve_inverse(self, r):
+        rhs = np.zeros(self.n, self.dtype)
+        rhs[r] = 1
+        return sp.sparse.linalg.cg(self.L1, rhs[1:], M=self.M, atol=0)[0]
diff --git a/networkx/algorithms/centrality/group.py b/networkx/algorithms/centrality/group.py
index 34928993a..66fd309ff 100644
--- a/networkx/algorithms/centrality/group.py
+++ b/networkx/algorithms/centrality/group.py
@@ -1,27 +1,37 @@
 """Group centrality measures."""
 from copy import deepcopy
+
 import networkx as nx
-from networkx.algorithms.centrality.betweenness import _accumulate_endpoints, _single_source_dijkstra_path_basic, _single_source_shortest_path_basic
+from networkx.algorithms.centrality.betweenness import (
+    _accumulate_endpoints,
+    _single_source_dijkstra_path_basic,
+    _single_source_shortest_path_basic,
+)
 from networkx.utils.decorators import not_implemented_for
-__all__ = ['group_betweenness_centrality', 'group_closeness_centrality',
-    'group_degree_centrality', 'group_in_degree_centrality',
-    'group_out_degree_centrality', 'prominent_group']
+
+__all__ = [
+    "group_betweenness_centrality",
+    "group_closeness_centrality",
+    "group_degree_centrality",
+    "group_in_degree_centrality",
+    "group_out_degree_centrality",
+    "prominent_group",
+]


-@nx._dispatchable(edge_attrs='weight')
-def group_betweenness_centrality(G, C, normalized=True, weight=None,
-    endpoints=False):
-    """Compute the group betweenness centrality for a group of nodes.
+@nx._dispatchable(edge_attrs="weight")
+def group_betweenness_centrality(G, C, normalized=True, weight=None, endpoints=False):
+    r"""Compute the group betweenness centrality for a group of nodes.

     Group betweenness centrality of a group of nodes $C$ is the sum of the
     fraction of all-pairs shortest paths that pass through any vertex in $C$

     .. math::

-       c_B(v) =\\sum_{s,t \\in V} \\frac{\\sigma(s, t|v)}{\\sigma(s, t)}
+       c_B(v) =\sum_{s,t \in V} \frac{\sigma(s, t|v)}{\sigma(s, t)}

-    where $V$ is the set of nodes, $\\sigma(s, t)$ is the number of
-    shortest $(s, t)$-paths, and $\\sigma(s, t|C)$ is the number of
+    where $V$ is the set of nodes, $\sigma(s, t)$ is the number of
+    shortest $(s, t)$-paths, and $\sigma(s, t|C)$ is the number of
     those paths passing through some node in group $C$. Note that
     $(s, t)$ are not members of the group ($V-C$ is the set of nodes
     in $V$ that are not in $C$).
@@ -103,13 +113,134 @@ def group_betweenness_centrality(G, C, normalized=True, weight=None,
        https://journals.aps.org/pre/pdf/10.1103/PhysRevE.76.056709

     """
-    pass
-
-
-@nx._dispatchable(edge_attrs='weight')
-def prominent_group(G, k, weight=None, C=None, endpoints=False, normalized=
-    True, greedy=False):
-    """Find the prominent group of size $k$ in graph $G$. The prominence of the
+    GBC = []  # initialize betweenness
+    list_of_groups = True
+    #  check weather C contains one or many groups
+    if any(el in G for el in C):
+        C = [C]
+        list_of_groups = False
+    set_v = {node for group in C for node in group}
+    if set_v - G.nodes:  # element(s) of C not in G
+        raise nx.NodeNotFound(f"The node(s) {set_v - G.nodes} are in C but not in G.")
+
+    # pre-processing
+    PB, sigma, D = _group_preprocessing(G, set_v, weight)
+
+    # the algorithm for each group
+    for group in C:
+        group = set(group)  # set of nodes in group
+        # initialize the matrices of the sigma and the PB
+        GBC_group = 0
+        sigma_m = deepcopy(sigma)
+        PB_m = deepcopy(PB)
+        sigma_m_v = deepcopy(sigma_m)
+        PB_m_v = deepcopy(PB_m)
+        for v in group:
+            GBC_group += PB_m[v][v]
+            for x in group:
+                for y in group:
+                    dxvy = 0
+                    dxyv = 0
+                    dvxy = 0
+                    if not (
+                        sigma_m[x][y] == 0 or sigma_m[x][v] == 0 or sigma_m[v][y] == 0
+                    ):
+                        if D[x][v] == D[x][y] + D[y][v]:
+                            dxyv = sigma_m[x][y] * sigma_m[y][v] / sigma_m[x][v]
+                        if D[x][y] == D[x][v] + D[v][y]:
+                            dxvy = sigma_m[x][v] * sigma_m[v][y] / sigma_m[x][y]
+                        if D[v][y] == D[v][x] + D[x][y]:
+                            dvxy = sigma_m[v][x] * sigma[x][y] / sigma[v][y]
+                    sigma_m_v[x][y] = sigma_m[x][y] * (1 - dxvy)
+                    PB_m_v[x][y] = PB_m[x][y] - PB_m[x][y] * dxvy
+                    if y != v:
+                        PB_m_v[x][y] -= PB_m[x][v] * dxyv
+                    if x != v:
+                        PB_m_v[x][y] -= PB_m[v][y] * dvxy
+            sigma_m, sigma_m_v = sigma_m_v, sigma_m
+            PB_m, PB_m_v = PB_m_v, PB_m
+
+        # endpoints
+        v, c = len(G), len(group)
+        if not endpoints:
+            scale = 0
+            # if the graph is connected then subtract the endpoints from
+            # the count for all the nodes in the graph. else count how many
+            # nodes are connected to the group's nodes and subtract that.
+            if nx.is_directed(G):
+                if nx.is_strongly_connected(G):
+                    scale = c * (2 * v - c - 1)
+            elif nx.is_connected(G):
+                scale = c * (2 * v - c - 1)
+            if scale == 0:
+                for group_node1 in group:
+                    for node in D[group_node1]:
+                        if node != group_node1:
+                            if node in group:
+                                scale += 1
+                            else:
+                                scale += 2
+            GBC_group -= scale
+
+        # normalized
+        if normalized:
+            scale = 1 / ((v - c) * (v - c - 1))
+            GBC_group *= scale
+
+        # If undirected than count only the undirected edges
+        elif not G.is_directed():
+            GBC_group /= 2
+
+        GBC.append(GBC_group)
+    if list_of_groups:
+        return GBC
+    return GBC[0]
+
+
+def _group_preprocessing(G, set_v, weight):
+    sigma = {}
+    delta = {}
+    D = {}
+    betweenness = dict.fromkeys(G, 0)
+    for s in G:
+        if weight is None:  # use BFS
+            S, P, sigma[s], D[s] = _single_source_shortest_path_basic(G, s)
+        else:  # use Dijkstra's algorithm
+            S, P, sigma[s], D[s] = _single_source_dijkstra_path_basic(G, s, weight)
+        betweenness, delta[s] = _accumulate_endpoints(betweenness, S, P, sigma[s], s)
+        for i in delta[s]:  # add the paths from s to i and rescale sigma
+            if s != i:
+                delta[s][i] += 1
+            if weight is not None:
+                sigma[s][i] = sigma[s][i] / 2
+    # building the path betweenness matrix only for nodes that appear in the group
+    PB = dict.fromkeys(G)
+    for group_node1 in set_v:
+        PB[group_node1] = dict.fromkeys(G, 0.0)
+        for group_node2 in set_v:
+            if group_node2 not in D[group_node1]:
+                continue
+            for node in G:
+                # if node is connected to the two group nodes than continue
+                if group_node2 in D[node] and group_node1 in D[node]:
+                    if (
+                        D[node][group_node2]
+                        == D[node][group_node1] + D[group_node1][group_node2]
+                    ):
+                        PB[group_node1][group_node2] += (
+                            delta[node][group_node2]
+                            * sigma[node][group_node1]
+                            * sigma[group_node1][group_node2]
+                            / sigma[node][group_node2]
+                        )
+    return PB, sigma, D
+
+
+@nx._dispatchable(edge_attrs="weight")
+def prominent_group(
+    G, k, weight=None, C=None, endpoints=False, normalized=True, greedy=False
+):
+    r"""Find the prominent group of size $k$ in graph $G$. The prominence of the
     group is evaluated by the group betweenness centrality.

     Group betweenness centrality of a group of nodes $C$ is the sum of the
@@ -117,10 +248,10 @@ def prominent_group(G, k, weight=None, C=None, endpoints=False, normalized=

     .. math::

-       c_B(v) =\\sum_{s,t \\in V} \\frac{\\sigma(s, t|v)}{\\sigma(s, t)}
+       c_B(v) =\sum_{s,t \in V} \frac{\sigma(s, t|v)}{\sigma(s, t)}

-    where $V$ is the set of nodes, $\\sigma(s, t)$ is the number of
-    shortest $(s, t)$-paths, and $\\sigma(s, t|C)$ is the number of
+    where $V$ is the set of nodes, $\sigma(s, t)$ is the number of
+    shortest $(s, t)$-paths, and $\sigma(s, t|C)$ is the number of
     those paths passing through some node in group $C$. Note that
     $(s, t)$ are not members of the group ($V-C$ is the set of nodes
     in $V$ that are not in $C$).
@@ -208,21 +339,223 @@ def prominent_group(G, k, weight=None, C=None, endpoints=False, normalized=
        "Fast algorithm for successive computation of group betweenness centrality."
        https://journals.aps.org/pre/pdf/10.1103/PhysRevE.76.056709
     """
-    pass
-
-
-@nx._dispatchable(edge_attrs='weight')
+    import numpy as np
+    import pandas as pd
+
+    if C is not None:
+        C = set(C)
+        if C - G.nodes:  # element(s) of C not in G
+            raise nx.NodeNotFound(f"The node(s) {C - G.nodes} are in C but not in G.")
+        nodes = list(G.nodes - C)
+    else:
+        nodes = list(G.nodes)
+    DF_tree = nx.Graph()
+    DF_tree.__networkx_cache__ = None  # Disable caching
+    PB, sigma, D = _group_preprocessing(G, nodes, weight)
+    betweenness = pd.DataFrame.from_dict(PB)
+    if C is not None:
+        for node in C:
+            # remove from the betweenness all the nodes not part of the group
+            betweenness.drop(index=node, inplace=True)
+            betweenness.drop(columns=node, inplace=True)
+    CL = [node for _, node in sorted(zip(np.diag(betweenness), nodes), reverse=True)]
+    max_GBC = 0
+    max_group = []
+    DF_tree.add_node(
+        1,
+        CL=CL,
+        betweenness=betweenness,
+        GBC=0,
+        GM=[],
+        sigma=sigma,
+        cont=dict(zip(nodes, np.diag(betweenness))),
+    )
+
+    # the algorithm
+    DF_tree.nodes[1]["heu"] = 0
+    for i in range(k):
+        DF_tree.nodes[1]["heu"] += DF_tree.nodes[1]["cont"][DF_tree.nodes[1]["CL"][i]]
+    max_GBC, DF_tree, max_group = _dfbnb(
+        G, k, DF_tree, max_GBC, 1, D, max_group, nodes, greedy
+    )
+
+    v = len(G)
+    if not endpoints:
+        scale = 0
+        # if the graph is connected then subtract the endpoints from
+        # the count for all the nodes in the graph. else count how many
+        # nodes are connected to the group's nodes and subtract that.
+        if nx.is_directed(G):
+            if nx.is_strongly_connected(G):
+                scale = k * (2 * v - k - 1)
+        elif nx.is_connected(G):
+            scale = k * (2 * v - k - 1)
+        if scale == 0:
+            for group_node1 in max_group:
+                for node in D[group_node1]:
+                    if node != group_node1:
+                        if node in max_group:
+                            scale += 1
+                        else:
+                            scale += 2
+        max_GBC -= scale
+
+    # normalized
+    if normalized:
+        scale = 1 / ((v - k) * (v - k - 1))
+        max_GBC *= scale
+
+    # If undirected then count only the undirected edges
+    elif not G.is_directed():
+        max_GBC /= 2
+    max_GBC = float("%.2f" % max_GBC)
+    return max_GBC, max_group
+
+
+def _dfbnb(G, k, DF_tree, max_GBC, root, D, max_group, nodes, greedy):
+    # stopping condition - if we found a group of size k and with higher GBC then prune
+    if len(DF_tree.nodes[root]["GM"]) == k and DF_tree.nodes[root]["GBC"] > max_GBC:
+        return DF_tree.nodes[root]["GBC"], DF_tree, DF_tree.nodes[root]["GM"]
+    # stopping condition - if the size of group members equal to k or there are less than
+    # k - |GM| in the candidate list or the heuristic function plus the GBC is below the
+    # maximal GBC found then prune
+    if (
+        len(DF_tree.nodes[root]["GM"]) == k
+        or len(DF_tree.nodes[root]["CL"]) <= k - len(DF_tree.nodes[root]["GM"])
+        or DF_tree.nodes[root]["GBC"] + DF_tree.nodes[root]["heu"] <= max_GBC
+    ):
+        return max_GBC, DF_tree, max_group
+
+    # finding the heuristic of both children
+    node_p, node_m, DF_tree = _heuristic(k, root, DF_tree, D, nodes, greedy)
+
+    # finding the child with the bigger heuristic + GBC and expand
+    # that node first if greedy then only expand the plus node
+    if greedy:
+        max_GBC, DF_tree, max_group = _dfbnb(
+            G, k, DF_tree, max_GBC, node_p, D, max_group, nodes, greedy
+        )
+
+    elif (
+        DF_tree.nodes[node_p]["GBC"] + DF_tree.nodes[node_p]["heu"]
+        > DF_tree.nodes[node_m]["GBC"] + DF_tree.nodes[node_m]["heu"]
+    ):
+        max_GBC, DF_tree, max_group = _dfbnb(
+            G, k, DF_tree, max_GBC, node_p, D, max_group, nodes, greedy
+        )
+        max_GBC, DF_tree, max_group = _dfbnb(
+            G, k, DF_tree, max_GBC, node_m, D, max_group, nodes, greedy
+        )
+    else:
+        max_GBC, DF_tree, max_group = _dfbnb(
+            G, k, DF_tree, max_GBC, node_m, D, max_group, nodes, greedy
+        )
+        max_GBC, DF_tree, max_group = _dfbnb(
+            G, k, DF_tree, max_GBC, node_p, D, max_group, nodes, greedy
+        )
+    return max_GBC, DF_tree, max_group
+
+
+def _heuristic(k, root, DF_tree, D, nodes, greedy):
+    import numpy as np
+
+    # This helper function add two nodes to DF_tree - one left son and the
+    # other right son, finds their heuristic, CL, GBC, and GM
+    node_p = DF_tree.number_of_nodes() + 1
+    node_m = DF_tree.number_of_nodes() + 2
+    added_node = DF_tree.nodes[root]["CL"][0]
+
+    # adding the plus node
+    DF_tree.add_nodes_from([(node_p, deepcopy(DF_tree.nodes[root]))])
+    DF_tree.nodes[node_p]["GM"].append(added_node)
+    DF_tree.nodes[node_p]["GBC"] += DF_tree.nodes[node_p]["cont"][added_node]
+    root_node = DF_tree.nodes[root]
+    for x in nodes:
+        for y in nodes:
+            dxvy = 0
+            dxyv = 0
+            dvxy = 0
+            if not (
+                root_node["sigma"][x][y] == 0
+                or root_node["sigma"][x][added_node] == 0
+                or root_node["sigma"][added_node][y] == 0
+            ):
+                if D[x][added_node] == D[x][y] + D[y][added_node]:
+                    dxyv = (
+                        root_node["sigma"][x][y]
+                        * root_node["sigma"][y][added_node]
+                        / root_node["sigma"][x][added_node]
+                    )
+                if D[x][y] == D[x][added_node] + D[added_node][y]:
+                    dxvy = (
+                        root_node["sigma"][x][added_node]
+                        * root_node["sigma"][added_node][y]
+                        / root_node["sigma"][x][y]
+                    )
+                if D[added_node][y] == D[added_node][x] + D[x][y]:
+                    dvxy = (
+                        root_node["sigma"][added_node][x]
+                        * root_node["sigma"][x][y]
+                        / root_node["sigma"][added_node][y]
+                    )
+            DF_tree.nodes[node_p]["sigma"][x][y] = root_node["sigma"][x][y] * (1 - dxvy)
+            DF_tree.nodes[node_p]["betweenness"].loc[y, x] = (
+                root_node["betweenness"][x][y] - root_node["betweenness"][x][y] * dxvy
+            )
+            if y != added_node:
+                DF_tree.nodes[node_p]["betweenness"].loc[y, x] -= (
+                    root_node["betweenness"][x][added_node] * dxyv
+                )
+            if x != added_node:
+                DF_tree.nodes[node_p]["betweenness"].loc[y, x] -= (
+                    root_node["betweenness"][added_node][y] * dvxy
+                )
+
+    DF_tree.nodes[node_p]["CL"] = [
+        node
+        for _, node in sorted(
+            zip(np.diag(DF_tree.nodes[node_p]["betweenness"]), nodes), reverse=True
+        )
+        if node not in DF_tree.nodes[node_p]["GM"]
+    ]
+    DF_tree.nodes[node_p]["cont"] = dict(
+        zip(nodes, np.diag(DF_tree.nodes[node_p]["betweenness"]))
+    )
+    DF_tree.nodes[node_p]["heu"] = 0
+    for i in range(k - len(DF_tree.nodes[node_p]["GM"])):
+        DF_tree.nodes[node_p]["heu"] += DF_tree.nodes[node_p]["cont"][
+            DF_tree.nodes[node_p]["CL"][i]
+        ]
+
+    # adding the minus node - don't insert the first node in the CL to GM
+    # Insert minus node only if isn't greedy type algorithm
+    if not greedy:
+        DF_tree.add_nodes_from([(node_m, deepcopy(DF_tree.nodes[root]))])
+        DF_tree.nodes[node_m]["CL"].pop(0)
+        DF_tree.nodes[node_m]["cont"].pop(added_node)
+        DF_tree.nodes[node_m]["heu"] = 0
+        for i in range(k - len(DF_tree.nodes[node_m]["GM"])):
+            DF_tree.nodes[node_m]["heu"] += DF_tree.nodes[node_m]["cont"][
+                DF_tree.nodes[node_m]["CL"][i]
+            ]
+    else:
+        node_m = None
+
+    return node_p, node_m, DF_tree
+
+
+@nx._dispatchable(edge_attrs="weight")
 def group_closeness_centrality(G, S, weight=None):
-    """Compute the group closeness centrality for a group of nodes.
+    r"""Compute the group closeness centrality for a group of nodes.

     Group closeness centrality of a group of nodes $S$ is a measure
     of how close the group is to the other nodes in the graph.

     .. math::

-       c_{close}(S) = \\frac{|V-S|}{\\sum_{v \\in V-S} d_{S, v}}
+       c_{close}(S) = \frac{|V-S|}{\sum_{v \in V-S} d_{S, v}}

-       d_{S, v} = min_{u \\in S} (d_{u, v})
+       d_{S, v} = min_{u \in S} (d_{u, v})

     where $V$ is the set of nodes, $d_{S, v}$ is the distance of
     the group $S$ from $v$ defined as above. ($V-S$ is the set of nodes
@@ -288,7 +621,24 @@ def group_closeness_centrality(G, S, weight=None):
        WWWConference Proceedings, 2014. 689-694.
        https://doi.org/10.1145/2567948.2579356
     """
-    pass
+    if G.is_directed():
+        G = G.reverse()  # reverse view
+    closeness = 0  # initialize to 0
+    V = set(G)  # set of nodes in G
+    S = set(S)  # set of nodes in group S
+    V_S = V - S  # set of nodes in V but not S
+    shortest_path_lengths = nx.multi_source_dijkstra_path_length(G, S, weight=weight)
+    # accumulation
+    for v in V_S:
+        try:
+            closeness += shortest_path_lengths[v]
+        except KeyError:  # no path exists
+            closeness += 0
+    try:
+        closeness = len(V_S) / closeness
+    except ZeroDivisionError:  # 1 / 0 assumed as 0
+        closeness = 0
+    return closeness


 @nx._dispatchable
@@ -337,10 +687,12 @@ def group_degree_centrality(G, S):
        Journal of Mathematical Sociology. 23(3): 181-201. 1999.
        http://www.analytictech.com/borgatti/group_centrality.htm
     """
-    pass
+    centrality = len(set().union(*[set(G.neighbors(i)) for i in S]) - set(S))
+    centrality /= len(G.nodes()) - len(S)
+    return centrality


-@not_implemented_for('undirected')
+@not_implemented_for("undirected")
 @nx._dispatchable
 def group_in_degree_centrality(G, S):
     """Compute the group in-degree centrality for a group of nodes.
@@ -384,10 +736,10 @@ def group_in_degree_centrality(G, S):
     `G.neighbors(i)` gives nodes with an outward edge from i, in a DiGraph,
     so for group in-degree centrality, the reverse graph is used.
     """
-    pass
+    return group_degree_centrality(G.reverse(), S)


-@not_implemented_for('undirected')
+@not_implemented_for("undirected")
 @nx._dispatchable
 def group_out_degree_centrality(G, S):
     """Compute the group out-degree centrality for a group of nodes.
@@ -431,4 +783,4 @@ def group_out_degree_centrality(G, S):
     `G.neighbors(i)` gives nodes with an outward edge from i, in a DiGraph,
     so for group out-degree centrality, the graph itself is used.
     """
-    pass
+    return group_degree_centrality(G, S)
diff --git a/networkx/algorithms/centrality/harmonic.py b/networkx/algorithms/centrality/harmonic.py
index 8d1daac3f..9cd9f7f08 100644
--- a/networkx/algorithms/centrality/harmonic.py
+++ b/networkx/algorithms/centrality/harmonic.py
@@ -1,19 +1,21 @@
 """Functions for computing the harmonic centrality of a graph."""
 from functools import partial
+
 import networkx as nx
-__all__ = ['harmonic_centrality']
+
+__all__ = ["harmonic_centrality"]


-@nx._dispatchable(edge_attrs='distance')
+@nx._dispatchable(edge_attrs="distance")
 def harmonic_centrality(G, nbunch=None, distance=None, sources=None):
-    """Compute harmonic centrality for nodes.
+    r"""Compute harmonic centrality for nodes.

     Harmonic centrality [1]_ of a node `u` is the sum of the reciprocal
     of the shortest path distances from all other nodes to `u`

     .. math::

-        C(u) = \\sum_{v \\neq u} \\frac{1}{d(v, u)}
+        C(u) = \sum_{v \neq u} \frac{1}{d(v, u)}

     where `d(v, u)` is the shortest-path distance between `v` and `u`.

@@ -61,4 +63,18 @@ def harmonic_centrality(G, nbunch=None, distance=None, sources=None):
     .. [1] Boldi, Paolo, and Sebastiano Vigna. "Axioms for centrality."
            Internet Mathematics 10.3-4 (2014): 222-262.
     """
-    pass
+
+    nbunch = set(G.nbunch_iter(nbunch)) if nbunch is not None else set(G.nodes)
+    sources = set(G.nbunch_iter(sources)) if sources is not None else G.nodes
+
+    spl = partial(nx.shortest_path_length, G, weight=distance)
+    centrality = {u: 0 for u in nbunch}
+    for v in sources:
+        dist = spl(v)
+        for u in nbunch.intersection(dist):
+            d = dist[u]
+            if d == 0:  # handle u == v and edges with 0 weight
+                continue
+            centrality[u] += 1 / d
+
+    return centrality
diff --git a/networkx/algorithms/centrality/katz.py b/networkx/algorithms/centrality/katz.py
index a5ccdf8a7..d85ffd2dc 100644
--- a/networkx/algorithms/centrality/katz.py
+++ b/networkx/algorithms/centrality/katz.py
@@ -1,15 +1,25 @@
 """Katz centrality."""
 import math
+
 import networkx as nx
 from networkx.utils import not_implemented_for
-__all__ = ['katz_centrality', 'katz_centrality_numpy']
+
+__all__ = ["katz_centrality", "katz_centrality_numpy"]


-@not_implemented_for('multigraph')
-@nx._dispatchable(edge_attrs='weight')
-def katz_centrality(G, alpha=0.1, beta=1.0, max_iter=1000, tol=1e-06,
-    nstart=None, normalized=True, weight=None):
-    """Compute the Katz centrality for the nodes of the graph G.
+@not_implemented_for("multigraph")
+@nx._dispatchable(edge_attrs="weight")
+def katz_centrality(
+    G,
+    alpha=0.1,
+    beta=1.0,
+    max_iter=1000,
+    tol=1.0e-6,
+    nstart=None,
+    normalized=True,
+    weight=None,
+):
+    r"""Compute the Katz centrality for the nodes of the graph G.

     Katz centrality computes the centrality for a node based on the centrality
     of its neighbors. It is a generalization of the eigenvector centrality. The
@@ -17,15 +27,15 @@ def katz_centrality(G, alpha=0.1, beta=1.0, max_iter=1000, tol=1e-06,

     .. math::

-        x_i = \\alpha \\sum_{j} A_{ij} x_j + \\beta,
+        x_i = \alpha \sum_{j} A_{ij} x_j + \beta,

-    where $A$ is the adjacency matrix of graph G with eigenvalues $\\lambda$.
+    where $A$ is the adjacency matrix of graph G with eigenvalues $\lambda$.

-    The parameter $\\beta$ controls the initial centrality and
+    The parameter $\beta$ controls the initial centrality and

     .. math::

-        \\alpha < \\frac{1}{\\lambda_{\\max}}.
+        \alpha < \frac{1}{\lambda_{\max}}.

     Katz centrality computes the relative influence of a node within a
     network by measuring the number of the immediate neighbors (first
@@ -33,8 +43,8 @@ def katz_centrality(G, alpha=0.1, beta=1.0, max_iter=1000, tol=1e-06,
     to the node under consideration through these immediate neighbors.

     Extra weight can be provided to immediate neighbors through the
-    parameter $\\beta$.  Connections made with distant neighbors
-    are, however, penalized by an attenuation factor $\\alpha$ which
+    parameter $\beta$.  Connections made with distant neighbors
+    are, however, penalized by an attenuation factor $\alpha$ which
     should be strictly less than the inverse largest eigenvalue of the
     adjacency matrix in order for the Katz centrality to be computed
     correctly. More information is provided in [1]_.
@@ -113,12 +123,12 @@ def katz_centrality(G, alpha=0.1, beta=1.0, max_iter=1000, tol=1e-06,
     corresponding to the largest eigenvalue of the adjacency matrix of ``G``.
     The parameter ``alpha`` should be strictly less than the inverse of largest
     eigenvalue of the adjacency matrix for the algorithm to converge.
-    You can use ``max(nx.adjacency_spectrum(G))`` to get $\\lambda_{\\max}$ the largest
+    You can use ``max(nx.adjacency_spectrum(G))`` to get $\lambda_{\max}$ the largest
     eigenvalue of the adjacency matrix.
     The iteration will stop after ``max_iter`` iterations or an error tolerance of
     ``number_of_nodes(G) * tol`` has been reached.

-    For strongly connected graphs, as $\\alpha \\to 1/\\lambda_{\\max}$, and $\\beta > 0$,
+    For strongly connected graphs, as $\alpha \to 1/\lambda_{\max}$, and $\beta > 0$,
     Katz centrality approaches the results for eigenvector centrality.

     For directed graphs this finds "left" eigenvectors which corresponds
@@ -135,14 +145,58 @@ def katz_centrality(G, alpha=0.1, beta=1.0, max_iter=1000, tol=1e-06,
        Psychometrika 18(1):39–43, 1953
        https://link.springer.com/content/pdf/10.1007/BF02289026.pdf
     """
-    pass
-
-
-@not_implemented_for('multigraph')
-@nx._dispatchable(edge_attrs='weight')
-def katz_centrality_numpy(G, alpha=0.1, beta=1.0, normalized=True, weight=None
-    ):
-    """Compute the Katz centrality for the graph G.
+    if len(G) == 0:
+        return {}
+
+    nnodes = G.number_of_nodes()
+
+    if nstart is None:
+        # choose starting vector with entries of 0
+        x = {n: 0 for n in G}
+    else:
+        x = nstart
+
+    try:
+        b = dict.fromkeys(G, float(beta))
+    except (TypeError, ValueError, AttributeError) as err:
+        b = beta
+        if set(beta) != set(G):
+            raise nx.NetworkXError(
+                "beta dictionary must have a value for every node"
+            ) from err
+
+    # make up to max_iter iterations
+    for _ in range(max_iter):
+        xlast = x
+        x = dict.fromkeys(xlast, 0)
+        # do the multiplication y^T = Alpha * x^T A + Beta
+        for n in x:
+            for nbr in G[n]:
+                x[nbr] += xlast[n] * G[n][nbr].get(weight, 1)
+        for n in x:
+            x[n] = alpha * x[n] + b[n]
+
+        # check convergence
+        error = sum(abs(x[n] - xlast[n]) for n in x)
+        if error < nnodes * tol:
+            if normalized:
+                # normalize vector
+                try:
+                    s = 1.0 / math.hypot(*x.values())
+                except ZeroDivisionError:
+                    s = 1.0
+            else:
+                s = 1
+            for n in x:
+                x[n] *= s
+            return x
+    raise nx.PowerIterationFailedConvergence(max_iter)
+
+
+@not_implemented_for("multigraph")
+@nx._dispatchable(edge_attrs="weight")
+def katz_centrality_numpy(G, alpha=0.1, beta=1.0, normalized=True, weight=None):
+    r"""Compute the Katz centrality for the graph G.

     Katz centrality computes the centrality for a node based on the centrality
     of its neighbors. It is a generalization of the eigenvector centrality. The
@@ -150,15 +204,15 @@ def katz_centrality_numpy(G, alpha=0.1, beta=1.0, normalized=True, weight=None

     .. math::

-        x_i = \\alpha \\sum_{j} A_{ij} x_j + \\beta,
+        x_i = \alpha \sum_{j} A_{ij} x_j + \beta,

-    where $A$ is the adjacency matrix of graph G with eigenvalues $\\lambda$.
+    where $A$ is the adjacency matrix of graph G with eigenvalues $\lambda$.

-    The parameter $\\beta$ controls the initial centrality and
+    The parameter $\beta$ controls the initial centrality and

     .. math::

-        \\alpha < \\frac{1}{\\lambda_{\\max}}.
+        \alpha < \frac{1}{\lambda_{\max}}.

     Katz centrality computes the relative influence of a node within a
     network by measuring the number of the immediate neighbors (first
@@ -166,8 +220,8 @@ def katz_centrality_numpy(G, alpha=0.1, beta=1.0, normalized=True, weight=None
     to the node under consideration through these immediate neighbors.

     Extra weight can be provided to immediate neighbors through the
-    parameter $\\beta$.  Connections made with distant neighbors
-    are, however, penalized by an attenuation factor $\\alpha$ which
+    parameter $\beta$.  Connections made with distant neighbors
+    are, however, penalized by an attenuation factor $\alpha$ which
     should be strictly less than the inverse largest eigenvalue of the
     adjacency matrix in order for the Katz centrality to be computed
     correctly. More information is provided in [1]_.
@@ -231,10 +285,10 @@ def katz_centrality_numpy(G, alpha=0.1, beta=1.0, normalized=True, weight=None
     This algorithm uses a direct linear solver to solve the above equation.
     The parameter ``alpha`` should be strictly less than the inverse of largest
     eigenvalue of the adjacency matrix for there to be a solution.
-    You can use ``max(nx.adjacency_spectrum(G))`` to get $\\lambda_{\\max}$ the largest
+    You can use ``max(nx.adjacency_spectrum(G))`` to get $\lambda_{\max}$ the largest
     eigenvalue of the adjacency matrix.

-    For strongly connected graphs, as $\\alpha \\to 1/\\lambda_{\\max}$, and $\\beta > 0$,
+    For strongly connected graphs, as $\alpha \to 1/\lambda_{\max}$, and $\beta > 0$,
     Katz centrality approaches the results for eigenvector centrality.

     For directed graphs this finds "left" eigenvectors which corresponds
@@ -251,4 +305,26 @@ def katz_centrality_numpy(G, alpha=0.1, beta=1.0, normalized=True, weight=None
        Psychometrika 18(1):39–43, 1953
        https://link.springer.com/content/pdf/10.1007/BF02289026.pdf
     """
-    pass
+    import numpy as np
+
+    if len(G) == 0:
+        return {}
+    try:
+        nodelist = beta.keys()
+        if set(nodelist) != set(G):
+            raise nx.NetworkXError("beta dictionary must have a value for every node")
+        b = np.array(list(beta.values()), dtype=float)
+    except AttributeError:
+        nodelist = list(G)
+        try:
+            b = np.ones((len(nodelist), 1)) * beta
+        except (TypeError, ValueError, AttributeError) as err:
+            raise nx.NetworkXError("beta must be a number") from err
+
+    A = nx.adjacency_matrix(G, nodelist=nodelist, weight=weight).todense().T
+    n = A.shape[0]
+    centrality = np.linalg.solve(np.eye(n, n) - (alpha * A), b).squeeze()
+
+    # Normalize: rely on truediv to cast to float, then tolist to make Python numbers
+    norm = np.sign(sum(centrality)) * np.linalg.norm(centrality) if normalized else 1
+    return dict(zip(nodelist, (centrality / norm).tolist()))
diff --git a/networkx/algorithms/centrality/laplacian.py b/networkx/algorithms/centrality/laplacian.py
index d9d1c0192..66207ed21 100644
--- a/networkx/algorithms/centrality/laplacian.py
+++ b/networkx/algorithms/centrality/laplacian.py
@@ -2,13 +2,15 @@
 Laplacian centrality measures.
 """
 import networkx as nx
-__all__ = ['laplacian_centrality']

+__all__ = ["laplacian_centrality"]

-@nx._dispatchable(edge_attrs='weight')
-def laplacian_centrality(G, normalized=True, nodelist=None, weight='weight',
-    walk_type=None, alpha=0.95):
-    """Compute the Laplacian centrality for nodes in the graph `G`.
+
+@nx._dispatchable(edge_attrs="weight")
+def laplacian_centrality(
+    G, normalized=True, nodelist=None, weight="weight", walk_type=None, alpha=0.95
+):
+    r"""Compute the Laplacian centrality for nodes in the graph `G`.

     The Laplacian Centrality of a node ``i`` is measured by the drop in the
     Laplacian Energy after deleting node ``i`` from the graph. The Laplacian Energy
@@ -16,13 +18,13 @@ def laplacian_centrality(G, normalized=True, nodelist=None, weight='weight',

     .. math::

-        C_L(u_i,G) = \\frac{(\\Delta E)_i}{E_L (G)} = \\frac{E_L (G)-E_L (G_i)}{E_L (G)}
+        C_L(u_i,G) = \frac{(\Delta E)_i}{E_L (G)} = \frac{E_L (G)-E_L (G_i)}{E_L (G)}

-        E_L (G) = \\sum_{i=0}^n \\lambda_i^2
+        E_L (G) = \sum_{i=0}^n \lambda_i^2

     Where $E_L (G)$ is the Laplacian energy of graph `G`,
     E_L (G_i) is the Laplacian energy of graph `G` after deleting node ``i``
-    and $\\lambda_i$ are the eigenvalues of `G`'s Laplacian matrix.
+    and $\lambda_i$ are the eigenvalues of `G`'s Laplacian matrix.
     This formula shows the normalized value. Without normalization,
     the numerator on the right side is returned.

@@ -96,4 +98,52 @@ def laplacian_centrality(G, normalized=True, nodelist=None, weight='weight',
     :func:`~networkx.linalg.laplacianmatrix.directed_laplacian_matrix`
     :func:`~networkx.linalg.laplacianmatrix.laplacian_matrix`
     """
-    pass
+    import numpy as np
+    import scipy as sp
+
+    if len(G) == 0:
+        raise nx.NetworkXPointlessConcept("null graph has no centrality defined")
+    if G.size(weight=weight) == 0:
+        if normalized:
+            raise ZeroDivisionError("graph with no edges has zero full energy")
+        return {n: 0 for n in G}
+
+    if nodelist is not None:
+        nodeset = set(G.nbunch_iter(nodelist))
+        if len(nodeset) != len(nodelist):
+            raise nx.NetworkXError("nodelist has duplicate nodes or nodes not in G")
+        nodes = nodelist + [n for n in G if n not in nodeset]
+    else:
+        nodelist = nodes = list(G)
+
+    if G.is_directed():
+        lap_matrix = nx.directed_laplacian_matrix(G, nodes, weight, walk_type, alpha)
+    else:
+        lap_matrix = nx.laplacian_matrix(G, nodes, weight).toarray()
+
+    full_energy = np.power(sp.linalg.eigh(lap_matrix, eigvals_only=True), 2).sum()
+
+    # calculate laplacian centrality
+    laplace_centralities_dict = {}
+    for i, node in enumerate(nodelist):
+        # remove row and col i from lap_matrix
+        all_but_i = list(np.arange(lap_matrix.shape[0]))
+        all_but_i.remove(i)
+        A_2 = lap_matrix[all_but_i, :][:, all_but_i]
+
+        # Adjust diagonal for removed row
+        new_diag = lap_matrix.diagonal() - abs(lap_matrix[:, i])
+        np.fill_diagonal(A_2, new_diag[all_but_i])
+
+        if len(all_but_i) > 0:  # catches degenerate case of single node
+            new_energy = np.power(sp.linalg.eigh(A_2, eigvals_only=True), 2).sum()
+        else:
+            new_energy = 0.0
+
+        lapl_cent = full_energy - new_energy
+        if normalized:
+            lapl_cent = lapl_cent / full_energy
+
+        laplace_centralities_dict[node] = float(lapl_cent)
+
+    return laplace_centralities_dict
diff --git a/networkx/algorithms/centrality/load.py b/networkx/algorithms/centrality/load.py
index 7c858b0b5..50bc6210b 100644
--- a/networkx/algorithms/centrality/load.py
+++ b/networkx/algorithms/centrality/load.py
@@ -1,12 +1,13 @@
 """Load centrality."""
 from operator import itemgetter
+
 import networkx as nx
-__all__ = ['load_centrality', 'edge_load_centrality']
+
+__all__ = ["load_centrality", "edge_load_centrality"]


-@nx._dispatchable(edge_attrs='weight')
-def newman_betweenness_centrality(G, v=None, cutoff=None, normalized=True,
-    weight=None):
+@nx._dispatchable(edge_attrs="weight")
+def newman_betweenness_centrality(G, v=None, cutoff=None, normalized=True, weight=None):
     """Compute load centrality for nodes.

     The load centrality of a node is the fraction of all shortest
@@ -55,7 +56,30 @@ def newman_betweenness_centrality(G, v=None, cutoff=None, normalized=True,
        Physical Review Letters 87(27):1–4, 2001.
        https://doi.org/10.1103/PhysRevLett.87.278701
     """
-    pass
+    if v is not None:  # only one node
+        betweenness = 0.0
+        for source in G:
+            ubetween = _node_betweenness(G, source, cutoff, False, weight)
+            betweenness += ubetween[v] if v in ubetween else 0
+        if normalized:
+            order = G.order()
+            if order <= 2:
+                return betweenness  # no normalization b=0 for all nodes
+            betweenness *= 1.0 / ((order - 1) * (order - 2))
+    else:
+        betweenness = {}.fromkeys(G, 0.0)
+        for source in betweenness:
+            ubetween = _node_betweenness(G, source, cutoff, False, weight)
+            for vk in ubetween:
+                betweenness[vk] += ubetween[vk]
+        if normalized:
+            order = G.order()
+            if order <= 2:
+                return betweenness  # no normalization b=0 for all nodes
+            scale = 1.0 / ((order - 1) * (order - 2))
+            for v in betweenness:
+                betweenness[v] *= scale
+    return betweenness  # all nodes


 def _node_betweenness(G, source, cutoff=False, normalized=True, weight=None):
@@ -73,7 +97,40 @@ def _node_betweenness(G, source, cutoff=False, normalized=True, weight=None):

     If weight is not None then use Dijkstra for finding shortest paths.
     """
-    pass
+    # get the predecessor and path length data
+    if weight is None:
+        (pred, length) = nx.predecessor(G, source, cutoff=cutoff, return_seen=True)
+    else:
+        (pred, length) = nx.dijkstra_predecessor_and_distance(G, source, cutoff, weight)
+
+    # order the nodes by path length
+    onodes = [(l, vert) for (vert, l) in length.items()]
+    onodes.sort()
+    onodes[:] = [vert for (l, vert) in onodes if l > 0]
+
+    # initialize betweenness
+    between = {}.fromkeys(length, 1.0)
+
+    while onodes:
+        v = onodes.pop()
+        if v in pred:
+            num_paths = len(pred[v])  # Discount betweenness if more than
+            for x in pred[v]:  # one shortest path.
+                if x == source:  # stop if hit source because all remaining v
+                    break  # also have pred[v]==[source]
+                between[x] += between[v] / num_paths
+    #  remove source
+    for v in between:
+        between[v] -= 1
+    # rescale to be between 0 and 1
+    if normalized:
+        l = len(between)
+        if l > 2:
+            # scale by 1/the number of possible paths
+            scale = 1 / ((l - 1) * (l - 2))
+            for v in between:
+                between[v] *= scale
+    return between


 load_centrality = newman_betweenness_centrality
@@ -103,9 +160,40 @@ def edge_load_centrality(G, cutoff=False):
     which use that edge. Where more than one path is shortest
     the count is divided equally among paths.
     """
-    pass
+    betweenness = {}
+    for u, v in G.edges():
+        betweenness[(u, v)] = 0.0
+        betweenness[(v, u)] = 0.0
+
+    for source in G:
+        ubetween = _edge_betweenness(G, source, cutoff=cutoff)
+        for e, ubetweenv in ubetween.items():
+            betweenness[e] += ubetweenv  # cumulative total
+    return betweenness


 def _edge_betweenness(G, source, nodes=None, cutoff=False):
     """Edge betweenness helper."""
-    pass
+    # get the predecessor data
+    (pred, length) = nx.predecessor(G, source, cutoff=cutoff, return_seen=True)
+    # order the nodes by path length
+    onodes = [n for n, d in sorted(length.items(), key=itemgetter(1))]
+    # initialize betweenness, doesn't account for any edge weights
+    between = {}
+    for u, v in G.edges(nodes):
+        between[(u, v)] = 1.0
+        between[(v, u)] = 1.0
+
+    while onodes:  # work through all paths
+        v = onodes.pop()
+        if v in pred:
+            # Discount betweenness if more than one shortest path.
+            num_paths = len(pred[v])
+            for w in pred[v]:
+                if w in pred:
+                    # Discount betweenness, mult path
+                    num_paths = len(pred[w])
+                    for x in pred[w]:
+                        between[(w, x)] += between[(v, w)] / num_paths
+                        between[(x, w)] += between[(w, v)] / num_paths
+    return between
diff --git a/networkx/algorithms/centrality/percolation.py b/networkx/algorithms/centrality/percolation.py
index 7e690b6ae..0d4c87132 100644
--- a/networkx/algorithms/centrality/percolation.py
+++ b/networkx/algorithms/centrality/percolation.py
@@ -1,14 +1,19 @@
 """Percolation centrality measures."""
+
 import networkx as nx
-from networkx.algorithms.centrality.betweenness import _single_source_dijkstra_path_basic as dijkstra
-from networkx.algorithms.centrality.betweenness import _single_source_shortest_path_basic as shortest_path
-__all__ = ['percolation_centrality']
+from networkx.algorithms.centrality.betweenness import (
+    _single_source_dijkstra_path_basic as dijkstra,
+)
+from networkx.algorithms.centrality.betweenness import (
+    _single_source_shortest_path_basic as shortest_path,
+)
+
+__all__ = ["percolation_centrality"]


-@nx._dispatchable(node_attrs='attribute', edge_attrs='weight')
-def percolation_centrality(G, attribute='percolation', states=None, weight=None
-    ):
-    """Compute the percolation centrality for nodes.
+@nx._dispatchable(node_attrs="attribute", edge_attrs="weight")
+def percolation_centrality(G, attribute="percolation", states=None, weight=None):
+    r"""Compute the percolation centrality for nodes.

     Percolation centrality of a node $v$, at a given time, is defined
     as the proportion of ‘percolated paths’ that go through that node.
@@ -78,4 +83,46 @@ def percolation_centrality(G, attribute='percolation', states=None, weight=None
        Journal of Mathematical Sociology 25(2):163-177, 2001.
        https://doi.org/10.1080/0022250X.2001.9990249
     """
-    pass
+    percolation = dict.fromkeys(G, 0.0)  # b[v]=0 for v in G
+
+    nodes = G
+
+    if states is None:
+        states = nx.get_node_attributes(nodes, attribute, default=1)
+
+    # sum of all percolation states
+    p_sigma_x_t = 0.0
+    for v in states.values():
+        p_sigma_x_t += v
+
+    for s in nodes:
+        # single source shortest paths
+        if weight is None:  # use BFS
+            S, P, sigma, _ = shortest_path(G, s)
+        else:  # use Dijkstra's algorithm
+            S, P, sigma, _ = dijkstra(G, s, weight)
+        # accumulation
+        percolation = _accumulate_percolation(
+            percolation, S, P, sigma, s, states, p_sigma_x_t
+        )
+
+    n = len(G)
+
+    for v in percolation:
+        percolation[v] *= 1 / (n - 2)
+
+    return percolation
+
+
+def _accumulate_percolation(percolation, S, P, sigma, s, states, p_sigma_x_t):
+    delta = dict.fromkeys(S, 0)
+    while S:
+        w = S.pop()
+        coeff = (1 + delta[w]) / sigma[w]
+        for v in P[w]:
+            delta[v] += sigma[v] * coeff
+        if w != s:
+            # percolation weight
+            pw_s_w = states[s] / (p_sigma_x_t - states[w])
+            percolation[w] += delta[w] * pw_s_w
+    return percolation
diff --git a/networkx/algorithms/centrality/reaching.py b/networkx/algorithms/centrality/reaching.py
index 43ee40112..93cb75a96 100644
--- a/networkx/algorithms/centrality/reaching.py
+++ b/networkx/algorithms/centrality/reaching.py
@@ -1,7 +1,9 @@
 """Functions for computing reaching centrality of a node or a graph."""
+
 import networkx as nx
 from networkx.utils import pairwise
-__all__ = ['global_reaching_centrality', 'local_reaching_centrality']
+
+__all__ = ["global_reaching_centrality", "local_reaching_centrality"]


 def _average_weight(G, path, weight=None):
@@ -20,10 +22,16 @@ def _average_weight(G, path, weight=None):
       is assumed to be the multiplicative inverse of the length of the path.
       Otherwise holds the name of the edge attribute used as weight.
     """
-    pass
+    path_length = len(path) - 1
+    if path_length <= 0:
+        return 0
+    if weight is None:
+        return 1 / path_length
+    total_weight = sum(G.edges[i, j][weight] for i, j in pairwise(path))
+    return total_weight / path_length


-@nx._dispatchable(edge_attrs='weight')
+@nx._dispatchable(edge_attrs="weight")
 def global_reaching_centrality(G, weight=None, normalized=True):
     """Returns the global reaching centrality of a directed graph.

@@ -76,10 +84,42 @@ def global_reaching_centrality(G, weight=None, normalized=True):
            *PLoS ONE* 7.3 (2012): e33799.
            https://doi.org/10.1371/journal.pone.0033799
     """
-    pass
-
-
-@nx._dispatchable(edge_attrs='weight')
+    if nx.is_negatively_weighted(G, weight=weight):
+        raise nx.NetworkXError("edge weights must be positive")
+    total_weight = G.size(weight=weight)
+    if total_weight <= 0:
+        raise nx.NetworkXError("Size of G must be positive")
+
+    # If provided, weights must be interpreted as connection strength
+    # (so higher weights are more likely to be chosen). However, the
+    # shortest path algorithms in NetworkX assume the provided "weight"
+    # is actually a distance (so edges with higher weight are less
+    # likely to be chosen). Therefore we need to invert the weights when
+    # computing shortest paths.
+    #
+    # If weight is None, we leave it as-is so that the shortest path
+    # algorithm can use a faster, unweighted algorithm.
+    if weight is not None:
+
+        def as_distance(u, v, d):
+            return total_weight / d.get(weight, 1)
+
+        shortest_paths = nx.shortest_path(G, weight=as_distance)
+    else:
+        shortest_paths = nx.shortest_path(G)
+
+    centrality = local_reaching_centrality
+    # TODO This can be trivially parallelized.
+    lrc = [
+        centrality(G, node, paths=paths, weight=weight, normalized=normalized)
+        for node, paths in shortest_paths.items()
+    ]
+
+    max_lrc = max(lrc)
+    return sum(max_lrc - c for c in lrc) / (len(G) - 1)
+
+
+@nx._dispatchable(edge_attrs="weight")
 def local_reaching_centrality(G, v, paths=None, weight=None, normalized=True):
     """Returns the local reaching centrality of a node in a directed
     graph.
@@ -138,4 +178,29 @@ def local_reaching_centrality(G, v, paths=None, weight=None, normalized=True):
            *PLoS ONE* 7.3 (2012): e33799.
            https://doi.org/10.1371/journal.pone.0033799
     """
-    pass
+    if paths is None:
+        if nx.is_negatively_weighted(G, weight=weight):
+            raise nx.NetworkXError("edge weights must be positive")
+        total_weight = G.size(weight=weight)
+        if total_weight <= 0:
+            raise nx.NetworkXError("Size of G must be positive")
+        if weight is not None:
+            # Interpret weights as lengths.
+            def as_distance(u, v, d):
+                return total_weight / d.get(weight, 1)
+
+            paths = nx.shortest_path(G, source=v, weight=as_distance)
+        else:
+            paths = nx.shortest_path(G, source=v)
+    # If the graph is unweighted, simply return the proportion of nodes
+    # reachable from the source node ``v``.
+    if weight is None and G.is_directed():
+        return (len(paths) - 1) / (len(G) - 1)
+    if normalized and weight is not None:
+        norm = G.size(weight=weight) / G.size()
+    else:
+        norm = 1
+    # TODO This can be trivially parallelized.
+    avgw = (_average_weight(G, path, weight=weight) for path in paths.values())
+    sum_avg_weight = sum(avgw) / norm
+    return sum_avg_weight / (len(G) - 1)
diff --git a/networkx/algorithms/centrality/second_order.py b/networkx/algorithms/centrality/second_order.py
index 65074a214..35583cd63 100644
--- a/networkx/algorithms/centrality/second_order.py
+++ b/networkx/algorithms/centrality/second_order.py
@@ -29,14 +29,18 @@ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
 OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
 IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 """
+
 import networkx as nx
 from networkx.utils import not_implemented_for
-__all__ = ['second_order_centrality']
+
+# Authors: Erwan Le Merrer (erwan.lemerrer@technicolor.com)
+
+__all__ = ["second_order_centrality"]


-@not_implemented_for('directed')
-@nx._dispatchable(edge_attrs='weight')
-def second_order_centrality(G, weight='weight'):
+@not_implemented_for("directed")
+@nx._dispatchable(edge_attrs="weight")
+def second_order_centrality(G, weight="weight"):
     """Compute the second order centrality for nodes of G.

     The second order centrality of a given node is the standard deviation of
@@ -95,4 +99,43 @@ def second_order_centrality(G, weight='weight'):
        "Second order centrality: Distributed assessment of nodes criticity in
        complex networks", Elsevier Computer Communications 34(5):619-628, 2011.
     """
-    pass
+    import numpy as np
+
+    n = len(G)
+
+    if n == 0:
+        raise nx.NetworkXException("Empty graph.")
+    if not nx.is_connected(G):
+        raise nx.NetworkXException("Non connected graph.")
+    if any(d.get(weight, 0) < 0 for u, v, d in G.edges(data=True)):
+        raise nx.NetworkXException("Graph has negative edge weights.")
+
+    # balancing G for Metropolis-Hastings random walks
+    G = nx.DiGraph(G)
+    in_deg = dict(G.in_degree(weight=weight))
+    d_max = max(in_deg.values())
+    for i, deg in in_deg.items():
+        if deg < d_max:
+            G.add_edge(i, i, weight=d_max - deg)
+
+    P = nx.to_numpy_array(G)
+    P /= P.sum(axis=1)[:, np.newaxis]  # to transition probability matrix
+
+    def _Qj(P, j):
+        P = P.copy()
+        P[:, j] = 0
+        return P
+
+    M = np.empty([n, n])
+
+    for i in range(n):
+        M[:, i] = np.linalg.solve(
+            np.identity(n) - _Qj(P, i), np.ones([n, 1])[:, 0]
+        )  # eq 3
+
+    return dict(
+        zip(
+            G.nodes,
+            (float(np.sqrt(2 * np.sum(M[:, i]) - n * (n + 1))) for i in range(n)),
+        )
+    )  # eq 6
diff --git a/networkx/algorithms/centrality/subgraph_alg.py b/networkx/algorithms/centrality/subgraph_alg.py
index a8adc1c6f..29a284c54 100644
--- a/networkx/algorithms/centrality/subgraph_alg.py
+++ b/networkx/algorithms/centrality/subgraph_alg.py
@@ -3,15 +3,20 @@ Subraph centrality and communicability betweenness.
 """
 import networkx as nx
 from networkx.utils import not_implemented_for
-__all__ = ['subgraph_centrality_exp', 'subgraph_centrality',
-    'communicability_betweenness_centrality', 'estrada_index']

+__all__ = [
+    "subgraph_centrality_exp",
+    "subgraph_centrality",
+    "communicability_betweenness_centrality",
+    "estrada_index",
+]

-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def subgraph_centrality_exp(G):
-    """Returns the subgraph centrality for each node of G.
+    r"""Returns the subgraph centrality for each node of G.

     Subgraph centrality  of a node `n` is the sum of weighted closed
     walks of all lengths starting and ending at node `n`. The weights
@@ -78,14 +83,24 @@ def subgraph_centrality_exp(G):
     >>> print([f"{node} {sc[node]:0.2f}" for node in sorted(sc)])
     ['1 3.90', '2 3.90', '3 3.64', '4 3.71', '5 3.64', '6 3.71', '7 3.64', '8 3.90']
     """
-    pass
+    # alternative implementation that calculates the matrix exponential
+    import scipy as sp
+
+    nodelist = list(G)  # ordering of nodes in matrix
+    A = nx.to_numpy_array(G, nodelist)
+    # convert to 0-1 matrix
+    A[A != 0.0] = 1
+    expA = sp.linalg.expm(A)
+    # convert diagonal to dictionary keyed by node
+    sc = dict(zip(nodelist, map(float, expA.diagonal())))
+    return sc


-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def subgraph_centrality(G):
-    """Returns subgraph centrality for each node in G.
+    r"""Returns subgraph centrality for each node in G.

     Subgraph centrality  of a node `n` is the sum of weighted closed
     walks of all lengths starting and ending at node `n`. The weights
@@ -121,10 +136,10 @@ def subgraph_centrality(G):

     .. math::

-       SC(u)=\\sum_{j=1}^{N}(v_{j}^{u})^2 e^{\\lambda_{j}},
+       SC(u)=\sum_{j=1}^{N}(v_{j}^{u})^2 e^{\lambda_{j}},

     where `v_j` is an eigenvector of the adjacency matrix `A` of G
-    corresponding to the eigenvalue `\\lambda_j`.
+    corresponding to the eigenvalue `\lambda_j`.

     Examples
     --------
@@ -157,14 +172,26 @@ def subgraph_centrality(G):
        https://arxiv.org/abs/cond-mat/0504730

     """
-    pass
-
-
-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+    import numpy as np
+
+    nodelist = list(G)  # ordering of nodes in matrix
+    A = nx.to_numpy_array(G, nodelist)
+    # convert to 0-1 matrix
+    A[np.nonzero(A)] = 1
+    w, v = np.linalg.eigh(A)
+    vsquare = np.array(v) ** 2
+    expw = np.exp(w)
+    xg = vsquare @ expw
+    # convert vector dictionary keyed by node
+    sc = dict(zip(nodelist, map(float, xg)))
+    return sc
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def communicability_betweenness_centrality(G):
-    """Returns subgraph communicability for all pairs of nodes in G.
+    r"""Returns subgraph communicability for all pairs of nodes in G.

     Communicability betweenness measure makes use of the number of walks
     connecting every pair of nodes as the basis of a betweenness centrality
@@ -199,8 +226,8 @@ def communicability_betweenness_centrality(G):

     .. math::

-         \\omega_{r} = \\frac{1}{C}\\sum_{p}\\sum_{q}\\frac{G_{prq}}{G_{pq}},
-         p\\neq q, q\\neq r,
+         \omega_{r} = \frac{1}{C}\sum_{p}\sum_{q}\frac{G_{prq}}{G_{pq}},
+         p\neq q, q\neq r,

     where
     `G_{prq}=(e^{A}_{pq} - (e^{A+E(r)})_{pq}`  is the number of walks
@@ -210,7 +237,7 @@ def communicability_betweenness_centrality(G):
     and `C=(n-1)^{2}-(n-1)` is a normalization factor equal to the
     number of terms in the sum.

-    The resulting `\\omega_{r}` takes values between zero and one.
+    The resulting `\omega_{r}` takes values between zero and one.
     The lower bound cannot be attained for a connected
     graph, and the upper bound is attained in the star graph.

@@ -228,12 +255,44 @@ def communicability_betweenness_centrality(G):
     >>> print([f"{node} {cbc[node]:0.2f}" for node in sorted(cbc)])
     ['0 0.03', '1 0.45', '2 0.51', '3 0.45', '4 0.40', '5 0.19', '6 0.03']
     """
-    pass
+    import numpy as np
+    import scipy as sp
+
+    nodelist = list(G)  # ordering of nodes in matrix
+    n = len(nodelist)
+    A = nx.to_numpy_array(G, nodelist)
+    # convert to 0-1 matrix
+    A[np.nonzero(A)] = 1
+    expA = sp.linalg.expm(A)
+    mapping = dict(zip(nodelist, range(n)))
+    cbc = {}
+    for v in G:
+        # remove row and col of node v
+        i = mapping[v]
+        row = A[i, :].copy()
+        col = A[:, i].copy()
+        A[i, :] = 0
+        A[:, i] = 0
+        B = (expA - sp.linalg.expm(A)) / expA
+        # sum with row/col of node v and diag set to zero
+        B[i, :] = 0
+        B[:, i] = 0
+        B -= np.diag(np.diag(B))
+        cbc[v] = float(B.sum())
+        # put row and col back
+        A[i, :] = row
+        A[:, i] = col
+    # rescale when more than two nodes
+    order = len(cbc)
+    if order > 2:
+        scale = 1.0 / ((order - 1.0) ** 2 - (order - 1.0))
+        cbc = {node: value * scale for node, value in cbc.items()}
+    return cbc


 @nx._dispatchable
 def estrada_index(G):
-    """Returns the Estrada index of a the graph G.
+    r"""Returns the Estrada index of a the graph G.

     The Estrada Index is a topological index of folding or 3D "compactness" ([1]_).

@@ -253,12 +312,12 @@ def estrada_index(G):
     Notes
     -----
     Let `G=(V,E)` be a simple undirected graph with `n` nodes  and let
-    `\\lambda_{1}\\leq\\lambda_{2}\\leq\\cdots\\lambda_{n}`
+    `\lambda_{1}\leq\lambda_{2}\leq\cdots\lambda_{n}`
     be a non-increasing ordering of the eigenvalues of its adjacency
     matrix `A`. The Estrada index is ([1]_, [2]_)

     .. math::
-        EE(G)=\\sum_{j=1}^n e^{\\lambda _j}.
+        EE(G)=\sum_{j=1}^n e^{\lambda _j}.

     References
     ----------
@@ -277,4 +336,4 @@ def estrada_index(G):
     >>> print(f"{ei:0.5}")
     20.55
     """
-    pass
+    return sum(subgraph_centrality(G).values())
diff --git a/networkx/algorithms/centrality/trophic.py b/networkx/algorithms/centrality/trophic.py
index f983383bd..6d1ba960b 100644
--- a/networkx/algorithms/centrality/trophic.py
+++ b/networkx/algorithms/centrality/trophic.py
@@ -1,26 +1,26 @@
 """Trophic levels"""
 import networkx as nx
 from networkx.utils import not_implemented_for
-__all__ = ['trophic_levels', 'trophic_differences',
-    'trophic_incoherence_parameter']

+__all__ = ["trophic_levels", "trophic_differences", "trophic_incoherence_parameter"]

-@not_implemented_for('undirected')
-@nx._dispatchable(edge_attrs='weight')
-def trophic_levels(G, weight='weight'):
-    """Compute the trophic levels of nodes.
+
+@not_implemented_for("undirected")
+@nx._dispatchable(edge_attrs="weight")
+def trophic_levels(G, weight="weight"):
+    r"""Compute the trophic levels of nodes.

     The trophic level of a node $i$ is

     .. math::

-        s_i = 1 + \\frac{1}{k^{in}_i} \\sum_{j} a_{ij} s_j
+        s_i = 1 + \frac{1}{k^{in}_i} \sum_{j} a_{ij} s_j

     where $k^{in}_i$ is the in-degree of i

     .. math::

-        k^{in}_i = \\sum_{j} a_{ij}
+        k^{in}_i = \sum_{j} a_{ij}

     and nodes with $k^{in}_i = 0$ have $s_i = 1$ by convention.

@@ -40,13 +40,51 @@ def trophic_levels(G, weight='weight'):
     ----------
     .. [1] Stephen Levine (1980) J. theor. Biol. 83, 195-207
     """
-    pass
-
-
-@not_implemented_for('undirected')
-@nx._dispatchable(edge_attrs='weight')
-def trophic_differences(G, weight='weight'):
-    """Compute the trophic differences of the edges of a directed graph.
+    import numpy as np
+
+    # find adjacency matrix
+    a = nx.adjacency_matrix(G, weight=weight).T.toarray()
+
+    # drop rows/columns where in-degree is zero
+    rowsum = np.sum(a, axis=1)
+    p = a[rowsum != 0][:, rowsum != 0]
+    # normalise so sum of in-degree weights is 1 along each row
+    p = p / rowsum[rowsum != 0][:, np.newaxis]
+
+    # calculate trophic levels
+    nn = p.shape[0]
+    i = np.eye(nn)
+    try:
+        n = np.linalg.inv(i - p)
+    except np.linalg.LinAlgError as err:
+        # LinAlgError is raised when there is a non-basal node
+        msg = (
+            "Trophic levels are only defined for graphs where every "
+            + "node has a path from a basal node (basal nodes are nodes "
+            + "with no incoming edges)."
+        )
+        raise nx.NetworkXError(msg) from err
+    y = n.sum(axis=1) + 1
+
+    levels = {}
+
+    # all nodes with in-degree zero have trophic level == 1
+    zero_node_ids = (node_id for node_id, degree in G.in_degree if degree == 0)
+    for node_id in zero_node_ids:
+        levels[node_id] = 1
+
+    # all other nodes have levels as calculated
+    nonzero_node_ids = (node_id for node_id, degree in G.in_degree if degree != 0)
+    for i, node_id in enumerate(nonzero_node_ids):
+        levels[node_id] = y.item(i)
+
+    return levels
+
+
+@not_implemented_for("undirected")
+@nx._dispatchable(edge_attrs="weight")
+def trophic_differences(G, weight="weight"):
+    r"""Compute the trophic differences of the edges of a directed graph.

     The trophic difference $x_ij$ for each edge is defined in Johnson et al.
     [1]_ as:
@@ -71,13 +109,17 @@ def trophic_differences(G, weight='weight'):
     .. [1] Samuel Johnson, Virginia Dominguez-Garcia, Luca Donetti, Miguel A.
         Munoz (2014) PNAS "Trophic coherence determines food-web stability"
     """
-    pass
+    levels = trophic_levels(G, weight=weight)
+    diffs = {}
+    for u, v in G.edges:
+        diffs[(u, v)] = levels[v] - levels[u]
+    return diffs


-@not_implemented_for('undirected')
-@nx._dispatchable(edge_attrs='weight')
-def trophic_incoherence_parameter(G, weight='weight', cannibalism=False):
-    """Compute the trophic incoherence parameter of a graph.
+@not_implemented_for("undirected")
+@nx._dispatchable(edge_attrs="weight")
+def trophic_incoherence_parameter(G, weight="weight", cannibalism=False):
+    r"""Compute the trophic incoherence parameter of a graph.

     Trophic coherence is defined as the homogeneity of the distribution of
     trophic distances: the more similar, the more coherent. This is measured by
@@ -102,4 +144,19 @@ def trophic_incoherence_parameter(G, weight='weight', cannibalism=False):
     .. [1] Samuel Johnson, Virginia Dominguez-Garcia, Luca Donetti, Miguel A.
         Munoz (2014) PNAS "Trophic coherence determines food-web stability"
     """
-    pass
+    import numpy as np
+
+    if cannibalism:
+        diffs = trophic_differences(G, weight=weight)
+    else:
+        # If no cannibalism, remove self-edges
+        self_loops = list(nx.selfloop_edges(G))
+        if self_loops:
+            # Make a copy so we do not change G's edges in memory
+            G_2 = G.copy()
+            G_2.remove_edges_from(self_loops)
+        else:
+            # Avoid copy otherwise
+            G_2 = G
+        diffs = trophic_differences(G_2, weight=weight)
+    return float(np.std(list(diffs.values())))
diff --git a/networkx/algorithms/centrality/voterank_alg.py b/networkx/algorithms/centrality/voterank_alg.py
index 4cd3a4034..063dfdd64 100644
--- a/networkx/algorithms/centrality/voterank_alg.py
+++ b/networkx/algorithms/centrality/voterank_alg.py
@@ -1,6 +1,7 @@
 """Algorithm to select influential nodes in a graph using VoteRank."""
 import networkx as nx
-__all__ = ['voterank']
+
+__all__ = ["voterank"]


 @nx._dispatchable
@@ -51,4 +52,43 @@ def voterank(G, number_of_nodes=None):
         Identifying a set of influential spreaders in complex networks.
         Sci. Rep. 6, 27823; doi: 10.1038/srep27823.
     """
-    pass
+    influential_nodes = []
+    vote_rank = {}
+    if len(G) == 0:
+        return influential_nodes
+    if number_of_nodes is None or number_of_nodes > len(G):
+        number_of_nodes = len(G)
+    if G.is_directed():
+        # For directed graphs compute average out-degree
+        avgDegree = sum(deg for _, deg in G.out_degree()) / len(G)
+    else:
+        # For undirected graphs compute average degree
+        avgDegree = sum(deg for _, deg in G.degree()) / len(G)
+    # step 1 - initiate all nodes to (0,1) (score, voting ability)
+    for n in G.nodes():
+        vote_rank[n] = [0, 1]
+    # Repeat steps 1b to 4 until num_seeds are elected.
+    for _ in range(number_of_nodes):
+        # step 1b - reset rank
+        for n in G.nodes():
+            vote_rank[n][0] = 0
+        # step 2 - vote
+        for n, nbr in G.edges():
+            # In directed graphs nodes only vote for their in-neighbors
+            vote_rank[n][0] += vote_rank[nbr][1]
+            if not G.is_directed():
+                vote_rank[nbr][0] += vote_rank[n][1]
+        for n in influential_nodes:
+            vote_rank[n][0] = 0
+        # step 3 - select top node
+        n = max(G.nodes, key=lambda x: vote_rank[x][0])
+        if vote_rank[n][0] == 0:
+            return influential_nodes
+        influential_nodes.append(n)
+        # weaken the selected node
+        vote_rank[n] = [0, 0]
+        # step 4 - update voterank properties
+        for _, nbr in G.edges(n):
+            vote_rank[nbr][1] -= 1 / avgDegree
+            vote_rank[nbr][1] = max(vote_rank[nbr][1], 0)
+    return influential_nodes
diff --git a/networkx/algorithms/chains.py b/networkx/algorithms/chains.py
index 05f01ed84..ae342d9c8 100644
--- a/networkx/algorithms/chains.py
+++ b/networkx/algorithms/chains.py
@@ -1,11 +1,13 @@
 """Functions for finding chains in a graph."""
+
 import networkx as nx
 from networkx.utils import not_implemented_for
-__all__ = ['chain_decomposition']
+
+__all__ = ["chain_decomposition"]


-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def chain_decomposition(G, root=None):
     """Returns the chain decomposition of a graph.
@@ -61,4 +63,110 @@ def chain_decomposition(G, root=None):
        113, 241–244. Elsevier. <https://doi.org/10.1016/j.ipl.2013.01.016>

     """
-    pass
+
+    def _dfs_cycle_forest(G, root=None):
+        """Builds a directed graph composed of cycles from the given graph.
+
+        `G` is an undirected simple graph. `root` is a node in the graph
+        from which the depth-first search is started.
+
+        This function returns both the depth-first search cycle graph
+        (as a :class:`~networkx.DiGraph`) and the list of nodes in
+        depth-first preorder. The depth-first search cycle graph is a
+        directed graph whose edges are the edges of `G` oriented toward
+        the root if the edge is a tree edge and away from the root if
+        the edge is a non-tree edge. If `root` is not specified, this
+        performs a depth-first search on each connected component of `G`
+        and returns a directed forest instead.
+
+        If `root` is not in the graph, this raises :exc:`KeyError`.
+
+        """
+        # Create a directed graph from the depth-first search tree with
+        # root node `root` in which tree edges are directed toward the
+        # root and nontree edges are directed away from the root. For
+        # each node with an incident nontree edge, this creates a
+        # directed cycle starting with the nontree edge and returning to
+        # that node.
+        #
+        # The `parent` node attribute stores the parent of each node in
+        # the DFS tree. The `nontree` edge attribute indicates whether
+        # the edge is a tree edge or a nontree edge.
+        #
+        # We also store the order of the nodes found in the depth-first
+        # search in the `nodes` list.
+        H = nx.DiGraph()
+        nodes = []
+        for u, v, d in nx.dfs_labeled_edges(G, source=root):
+            if d == "forward":
+                # `dfs_labeled_edges()` yields (root, root, 'forward')
+                # if it is beginning the search on a new connected
+                # component.
+                if u == v:
+                    H.add_node(v, parent=None)
+                    nodes.append(v)
+                else:
+                    H.add_node(v, parent=u)
+                    H.add_edge(v, u, nontree=False)
+                    nodes.append(v)
+            # `dfs_labeled_edges` considers nontree edges in both
+            # orientations, so we need to not add the edge if it its
+            # other orientation has been added.
+            elif d == "nontree" and v not in H[u]:
+                H.add_edge(v, u, nontree=True)
+            else:
+                # Do nothing on 'reverse' edges; we only care about
+                # forward and nontree edges.
+                pass
+        return H, nodes
+
+    def _build_chain(G, u, v, visited):
+        """Generate the chain starting from the given nontree edge.
+
+        `G` is a DFS cycle graph as constructed by
+        :func:`_dfs_cycle_graph`. The edge (`u`, `v`) is a nontree edge
+        that begins a chain. `visited` is a set representing the nodes
+        in `G` that have already been visited.
+
+        This function yields the edges in an initial segment of the
+        fundamental cycle of `G` starting with the nontree edge (`u`,
+        `v`) that includes all the edges up until the first node that
+        appears in `visited`. The tree edges are given by the 'parent'
+        node attribute. The `visited` set is updated to add each node in
+        an edge yielded by this function.
+
+        """
+        while v not in visited:
+            yield u, v
+            visited.add(v)
+            u, v = v, G.nodes[v]["parent"]
+        yield u, v
+
+    # Check if the root is in the graph G. If not, raise NodeNotFound
+    if root is not None and root not in G:
+        raise nx.NodeNotFound(f"Root node {root} is not in graph")
+
+    # Create a directed version of H that has the DFS edges directed
+    # toward the root and the nontree edges directed away from the root
+    # (in each connected component).
+    H, nodes = _dfs_cycle_forest(G, root)
+
+    # Visit the nodes again in DFS order. For each node, and for each
+    # nontree edge leaving that node, compute the fundamental cycle for
+    # that nontree edge starting with that edge. If the fundamental
+    # cycle overlaps with any visited nodes, just take the prefix of the
+    # cycle up to the point of visited nodes.
+    #
+    # We repeat this process for each connected component (implicitly,
+    # since `nodes` already has a list of the nodes grouped by connected
+    # component).
+    visited = set()
+    for u in nodes:
+        visited.add(u)
+        # For each nontree edge going out of node u...
+        edges = ((u, v) for u, v, d in H.out_edges(u, data="nontree") if d)
+        for u, v in edges:
+            # Create the cycle or cycle prefix starting with the
+            # nontree edge.
+            chain = list(_build_chain(H, u, v, visited))
+            yield chain
diff --git a/networkx/algorithms/chordal.py b/networkx/algorithms/chordal.py
index ec2d178bd..6bd3ccd2e 100644
--- a/networkx/algorithms/chordal.py
+++ b/networkx/algorithms/chordal.py
@@ -6,12 +6,19 @@ A graph is chordal if every cycle of length at least 4 has a chord
 https://en.wikipedia.org/wiki/Chordal_graph
 """
 import sys
+
 import networkx as nx
 from networkx.algorithms.components import connected_components
 from networkx.utils import arbitrary_element, not_implemented_for
-__all__ = ['is_chordal', 'find_induced_nodes', 'chordal_graph_cliques',
-    'chordal_graph_treewidth', 'NetworkXTreewidthBoundExceeded',
-    'complete_to_chordal_graph']
+
+__all__ = [
+    "is_chordal",
+    "find_induced_nodes",
+    "chordal_graph_cliques",
+    "chordal_graph_treewidth",
+    "NetworkXTreewidthBoundExceeded",
+    "complete_to_chordal_graph",
+]


 class NetworkXTreewidthBoundExceeded(nx.NetworkXException):
@@ -19,8 +26,8 @@ class NetworkXTreewidthBoundExceeded(nx.NetworkXException):
     been exceeded"""


-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def is_chordal(G):
     """Checks whether G is a chordal graph.
@@ -76,7 +83,9 @@ def is_chordal(G):
        selectively reduce acyclic hypergraphs, SIAM J. Comput., 13 (1984),
        pp. 566–579.
     """
-    pass
+    if len(G.nodes) <= 3:
+        return True
+    return len(_find_chordality_breaker(G)) == 0


 @nx._dispatchable
@@ -135,7 +144,28 @@ def find_induced_nodes(G, s, t, treewidth_bound=sys.maxsize):
        Gal Elidan, Stephen Gould; JMLR, 9(Dec):2699--2731, 2008.
        http://jmlr.csail.mit.edu/papers/volume9/elidan08a/elidan08a.pdf
     """
-    pass
+    if not is_chordal(G):
+        raise nx.NetworkXError("Input graph is not chordal.")
+
+    H = nx.Graph(G)
+    H.add_edge(s, t)
+    induced_nodes = set()
+    triplet = _find_chordality_breaker(H, s, treewidth_bound)
+    while triplet:
+        (u, v, w) = triplet
+        induced_nodes.update(triplet)
+        for n in triplet:
+            if n != s:
+                H.add_edge(s, n)
+        triplet = _find_chordality_breaker(H, s, treewidth_bound)
+    if induced_nodes:
+        # Add t and the second node in the induced path from s to t.
+        induced_nodes.add(t)
+        for u in G[s]:
+            if len(induced_nodes & set(G[u])) == 2:
+                induced_nodes.add(u)
+                break
+    return induced_nodes


 @nx._dispatchable
@@ -184,7 +214,31 @@ def chordal_graph_cliques(G):
     >>> cliques[0]
     frozenset({1, 2, 3})
     """
-    pass
+    for C in (G.subgraph(c).copy() for c in connected_components(G)):
+        if C.number_of_nodes() == 1:
+            if nx.number_of_selfloops(C) > 0:
+                raise nx.NetworkXError("Input graph is not chordal.")
+            yield frozenset(C.nodes())
+        else:
+            unnumbered = set(C.nodes())
+            v = arbitrary_element(C)
+            unnumbered.remove(v)
+            numbered = {v}
+            clique_wanna_be = {v}
+            while unnumbered:
+                v = _max_cardinality_node(C, unnumbered, numbered)
+                unnumbered.remove(v)
+                numbered.add(v)
+                new_clique_wanna_be = set(C.neighbors(v)) & numbered
+                sg = C.subgraph(clique_wanna_be)
+                if _is_complete_graph(sg):
+                    new_clique_wanna_be.add(v)
+                    if not new_clique_wanna_be >= clique_wanna_be:
+                        yield frozenset(clique_wanna_be)
+                    clique_wanna_be = new_clique_wanna_be
+                else:
+                    raise nx.NetworkXError("Input graph is not chordal.")
+            yield frozenset(clique_wanna_be)


 @nx._dispatchable
@@ -232,24 +286,47 @@ def chordal_graph_treewidth(G):
     ----------
     .. [1] https://en.wikipedia.org/wiki/Tree_decomposition#Treewidth
     """
-    pass
+    if not is_chordal(G):
+        raise nx.NetworkXError("Input graph is not chordal.")
+
+    max_clique = -1
+    for clique in nx.chordal_graph_cliques(G):
+        max_clique = max(max_clique, len(clique))
+    return max_clique - 1


 def _is_complete_graph(G):
     """Returns True if G is a complete graph."""
-    pass
+    if nx.number_of_selfloops(G) > 0:
+        raise nx.NetworkXError("Self loop found in _is_complete_graph()")
+    n = G.number_of_nodes()
+    if n < 2:
+        return True
+    e = G.number_of_edges()
+    max_edges = (n * (n - 1)) / 2
+    return e == max_edges


 def _find_missing_edge(G):
     """Given a non-complete graph G, returns a missing edge."""
-    pass
+    nodes = set(G)
+    for u in G:
+        missing = nodes - set(list(G[u].keys()) + [u])
+        if missing:
+            return (u, missing.pop())


 def _max_cardinality_node(G, choices, wanna_connect):
     """Returns a the node in choices that has more connections in G
     to nodes in wanna_connect.
     """
-    pass
+    max_number = -1
+    for x in choices:
+        number = len([y for y in G[x] if y in wanna_connect])
+        if number > max_number:
+            max_number = number
+            max_cardinality_node = x
+    return max_cardinality_node


 def _find_chordality_breaker(G, s=None, treewidth_bound=sys.maxsize):
@@ -262,10 +339,36 @@ def _find_chordality_breaker(G, s=None, treewidth_bound=sys.maxsize):

     It ignores any self loops.
     """
-    pass
-
-
-@not_implemented_for('directed')
+    if len(G) == 0:
+        raise nx.NetworkXPointlessConcept("Graph has no nodes.")
+    unnumbered = set(G)
+    if s is None:
+        s = arbitrary_element(G)
+    unnumbered.remove(s)
+    numbered = {s}
+    current_treewidth = -1
+    while unnumbered:  # and current_treewidth <= treewidth_bound:
+        v = _max_cardinality_node(G, unnumbered, numbered)
+        unnumbered.remove(v)
+        numbered.add(v)
+        clique_wanna_be = set(G[v]) & numbered
+        sg = G.subgraph(clique_wanna_be)
+        if _is_complete_graph(sg):
+            # The graph seems to be chordal by now. We update the treewidth
+            current_treewidth = max(current_treewidth, len(clique_wanna_be))
+            if current_treewidth > treewidth_bound:
+                raise nx.NetworkXTreewidthBoundExceeded(
+                    f"treewidth_bound exceeded: {current_treewidth}"
+                )
+        else:
+            # sg is not a clique,
+            # look for an edge that is not included in sg
+            (u, w) = _find_missing_edge(sg)
+            return (u, v, w)
+    return ()
+
+
+@not_implemented_for("directed")
 @nx._dispatchable(returns_graph=True)
 def complete_to_chordal_graph(G):
     """Return a copy of G completed to a chordal graph
@@ -307,4 +410,33 @@ def complete_to_chordal_graph(G):
     >>> G = nx.wheel_graph(10)
     >>> H, alpha = complete_to_chordal_graph(G)
     """
-    pass
+    H = G.copy()
+    alpha = {node: 0 for node in H}
+    if nx.is_chordal(H):
+        return H, alpha
+    chords = set()
+    weight = {node: 0 for node in H.nodes()}
+    unnumbered_nodes = list(H.nodes())
+    for i in range(len(H.nodes()), 0, -1):
+        # get the node in unnumbered_nodes with the maximum weight
+        z = max(unnumbered_nodes, key=lambda node: weight[node])
+        unnumbered_nodes.remove(z)
+        alpha[z] = i
+        update_nodes = []
+        for y in unnumbered_nodes:
+            if G.has_edge(y, z):
+                update_nodes.append(y)
+            else:
+                # y_weight will be bigger than node weights between y and z
+                y_weight = weight[y]
+                lower_nodes = [
+                    node for node in unnumbered_nodes if weight[node] < y_weight
+                ]
+                if nx.has_path(H.subgraph(lower_nodes + [z, y]), y, z):
+                    update_nodes.append(y)
+                    chords.add((z, y))
+        # during calculation of paths the weights should not be updated
+        for node in update_nodes:
+            weight[node] += 1
+    H.add_edges_from(chords)
+    return H, alpha
diff --git a/networkx/algorithms/clique.py b/networkx/algorithms/clique.py
index c984ab13b..5f959dd46 100644
--- a/networkx/algorithms/clique.py
+++ b/networkx/algorithms/clique.py
@@ -9,14 +9,23 @@ see the Wikipedia article on the clique problem [1]_.
 """
 from collections import defaultdict, deque
 from itertools import chain, combinations, islice
+
 import networkx as nx
 from networkx.utils import not_implemented_for
-__all__ = ['find_cliques', 'find_cliques_recursive',
-    'make_max_clique_graph', 'make_clique_bipartite', 'node_clique_number',
-    'number_of_cliques', 'enumerate_all_cliques', 'max_weight_clique']
+
+__all__ = [
+    "find_cliques",
+    "find_cliques_recursive",
+    "make_max_clique_graph",
+    "make_clique_bipartite",
+    "node_clique_number",
+    "number_of_cliques",
+    "enumerate_all_cliques",
+    "max_weight_clique",
+]


-@not_implemented_for('directed')
+@not_implemented_for("directed")
 @nx._dispatchable
 def enumerate_all_cliques(G):
     """Returns all cliques in an undirected graph.
@@ -63,10 +72,32 @@ def enumerate_all_cliques(G):
            <https://doi.org/10.1109/SC.2005.29>.

     """
-    pass
-
-
-@not_implemented_for('directed')
+    index = {}
+    nbrs = {}
+    for u in G:
+        index[u] = len(index)
+        # Neighbors of u that appear after u in the iteration order of G.
+        nbrs[u] = {v for v in G[u] if v not in index}
+
+    queue = deque(([u], sorted(nbrs[u], key=index.__getitem__)) for u in G)
+    # Loop invariants:
+    # 1. len(base) is nondecreasing.
+    # 2. (base + cnbrs) is sorted with respect to the iteration order of G.
+    # 3. cnbrs is a set of common neighbors of nodes in base.
+    while queue:
+        base, cnbrs = map(list, queue.popleft())
+        yield base
+        for i, u in enumerate(cnbrs):
+            # Use generators to reduce memory consumption.
+            queue.append(
+                (
+                    chain(base, [u]),
+                    filter(nbrs[u].__contains__, islice(cnbrs, i + 1, None)),
+                )
+            )
+
+
+@not_implemented_for("directed")
 @nx._dispatchable
 def find_cliques(G, nodes=None):
     """Returns all maximal cliques in an undirected graph.
@@ -212,9 +243,57 @@ def find_cliques(G, nodes=None):
        <https://doi.org/10.1016/j.tcs.2008.05.010>

     """
-    pass
+    if len(G) == 0:
+        return
+
+    adj = {u: {v for v in G[u] if v != u} for u in G}
+
+    # Initialize Q with the given nodes and subg, cand with their nbrs
+    Q = nodes[:] if nodes is not None else []
+    cand = set(G)
+    for node in Q:
+        if node not in cand:
+            raise ValueError(f"The given `nodes` {nodes} do not form a clique")
+        cand &= adj[node]
+
+    if not cand:
+        yield Q[:]
+        return
+
+    subg = cand.copy()
+    stack = []
+    Q.append(None)
+
+    u = max(subg, key=lambda u: len(cand & adj[u]))
+    ext_u = cand - adj[u]
+
+    try:
+        while True:
+            if ext_u:
+                q = ext_u.pop()
+                cand.remove(q)
+                Q[-1] = q
+                adj_q = adj[q]
+                subg_q = subg & adj_q
+                if not subg_q:
+                    yield Q[:]
+                else:
+                    cand_q = cand & adj_q
+                    if cand_q:
+                        stack.append((subg, cand, ext_u))
+                        Q.append(None)
+                        subg = subg_q
+                        cand = cand_q
+                        u = max(subg, key=lambda u: len(cand & adj[u]))
+                        ext_u = cand - adj[u]
+            else:
+                Q.pop()
+                subg, cand, ext_u = stack.pop()
+    except IndexError:
+        pass


+# TODO Should this also be not implemented for directed graphs?
 @nx._dispatchable
 def find_cliques_recursive(G, nodes=None):
     """Returns all maximal cliques in a graph.
@@ -297,7 +376,40 @@ def find_cliques_recursive(G, nodes=None):
        <https://doi.org/10.1016/j.tcs.2008.05.010>

     """
-    pass
+    if len(G) == 0:
+        return iter([])
+
+    adj = {u: {v for v in G[u] if v != u} for u in G}
+
+    # Initialize Q with the given nodes and subg, cand with their nbrs
+    Q = nodes[:] if nodes is not None else []
+    cand_init = set(G)
+    for node in Q:
+        if node not in cand_init:
+            raise ValueError(f"The given `nodes` {nodes} do not form a clique")
+        cand_init &= adj[node]
+
+    if not cand_init:
+        return iter([Q])
+
+    subg_init = cand_init.copy()
+
+    def expand(subg, cand):
+        u = max(subg, key=lambda u: len(cand & adj[u]))
+        for q in cand - adj[u]:
+            cand.remove(q)
+            Q.append(q)
+            adj_q = adj[q]
+            subg_q = subg & adj_q
+            if not subg_q:
+                yield Q[:]
+            else:
+                cand_q = cand & adj_q
+                if cand_q:
+                    yield from expand(subg_q, cand_q)
+            Q.pop()
+
+    return expand(subg_init, cand_init)


 @nx._dispatchable(returns_graph=True)
@@ -335,7 +447,17 @@ def make_max_clique_graph(G, create_using=None):
     steps.

     """
-    pass
+    if create_using is None:
+        B = G.__class__()
+    else:
+        B = nx.empty_graph(0, create_using)
+    cliques = list(enumerate(set(c) for c in find_cliques(G)))
+    # Add a numbered node for each clique.
+    B.add_nodes_from(i for i, c in cliques)
+    # Join cliques by an edge if they share a node.
+    clique_pairs = combinations(cliques, 2)
+    B.add_edges_from((i, j) for (i, c1), (j, c2) in clique_pairs if c1 & c2)
+    return B


 @nx._dispatchable(returns_graph=True)
@@ -373,7 +495,18 @@ def make_clique_bipartite(G, fpos=None, create_using=None, name=None):
         convention for bipartite graphs in NetworkX.

     """
-    pass
+    B = nx.empty_graph(0, create_using)
+    B.clear()
+    # The "bottom" nodes in the bipartite graph are the nodes of the
+    # original graph, G.
+    B.add_nodes_from(G, bipartite=1)
+    for i, cl in enumerate(find_cliques(G)):
+        # The "top" nodes in the bipartite graph are the cliques. These
+        # nodes get negative numbers as labels.
+        name = -i - 1
+        B.add_node(name, bipartite=0)
+        B.add_edges_from((v, name) for v in cl)
+    return B


 @nx._dispatchable
@@ -409,7 +542,35 @@ def node_clique_number(G, nodes=None, cliques=None, separate_nodes=False):
         maximal cliques containing all the given `nodes`.
         The search for the cliques is optimized for `nodes`.
     """
-    pass
+    if cliques is None:
+        if nodes is not None:
+            # Use ego_graph to decrease size of graph
+            # check for single node
+            if nodes in G:
+                return max(len(c) for c in find_cliques(nx.ego_graph(G, nodes)))
+            # handle multiple nodes
+            return {
+                n: max(len(c) for c in find_cliques(nx.ego_graph(G, n))) for n in nodes
+            }
+
+        # nodes is None--find all cliques
+        cliques = list(find_cliques(G))
+
+    # single node requested
+    if nodes in G:
+        return max(len(c) for c in cliques if nodes in c)
+
+    # multiple nodes requested
+    # preprocess all nodes (faster than one at a time for even 2 nodes)
+    size_for_n = defaultdict(int)
+    for c in cliques:
+        size_of_c = len(c)
+        for n in c:
+            if size_for_n[n] < size_of_c:
+                size_for_n[n] = size_of_c
+    if nodes is None:
+        return size_for_n
+    return {n: size_for_n[n] for n in nodes}


 def number_of_cliques(G, nodes=None, cliques=None):
@@ -418,7 +579,21 @@ def number_of_cliques(G, nodes=None, cliques=None):
     Returns a single or list depending on input nodes.
     Optional list of cliques can be input if already computed.
     """
-    pass
+    if cliques is None:
+        cliques = list(find_cliques(G))
+
+    if nodes is None:
+        nodes = list(G.nodes())  # none, get entire graph
+
+    if not isinstance(nodes, list):  # check for a list
+        v = nodes
+        # assume it is a single value
+        numcliq = len([1 for c in cliques if v in c])
+    else:
+        numcliq = {}
+        for v in nodes:
+            numcliq[v] = len([1 for c in cliques if v in c])
+    return numcliq


 class MaxWeightClique:
@@ -451,19 +626,16 @@ class MaxWeightClique:
         self.G = G
         self.incumbent_nodes = []
         self.incumbent_weight = 0
+
         if weight is None:
-            self.node_weights = {v: (1) for v in G.nodes()}
+            self.node_weights = {v: 1 for v in G.nodes()}
         else:
             for v in G.nodes():
                 if weight not in G.nodes[v]:
-                    errmsg = (
-                        f'Node {v!r} does not have the requested weight field.'
-                        )
+                    errmsg = f"Node {v!r} does not have the requested weight field."
                     raise KeyError(errmsg)
                 if not isinstance(G.nodes[v][weight], int):
-                    errmsg = (
-                        f'The {weight!r} field of node {v!r} is not an integer.'
-                        )
+                    errmsg = f"The {weight!r} field of node {v!r} is not an integer."
                     raise ValueError(errmsg)
             self.node_weights = {v: G.nodes[v][weight] for v in G.nodes()}

@@ -472,32 +644,63 @@ class MaxWeightClique:

         C is assumed to be a clique.
         """
-        pass
+        if C_weight > self.incumbent_weight:
+            self.incumbent_nodes = C[:]
+            self.incumbent_weight = C_weight

     def greedily_find_independent_set(self, P):
         """Greedily find an independent set of nodes from a set of
         nodes P."""
-        pass
+        independent_set = []
+        P = P[:]
+        while P:
+            v = P[0]
+            independent_set.append(v)
+            P = [w for w in P if v != w and not self.G.has_edge(v, w)]
+        return independent_set

     def find_branching_nodes(self, P, target):
         """Find a set of nodes to branch on."""
-        pass
+        residual_wt = {v: self.node_weights[v] for v in P}
+        total_wt = 0
+        P = P[:]
+        while P:
+            independent_set = self.greedily_find_independent_set(P)
+            min_wt_in_class = min(residual_wt[v] for v in independent_set)
+            total_wt += min_wt_in_class
+            if total_wt > target:
+                break
+            for v in independent_set:
+                residual_wt[v] -= min_wt_in_class
+            P = [v for v in P if residual_wt[v] != 0]
+        return P

     def expand(self, C, C_weight, P):
         """Look for the best clique that contains all the nodes in C and zero or
         more of the nodes in P, backtracking if it can be shown that no such
         clique has greater weight than the incumbent.
         """
-        pass
+        self.update_incumbent_if_improved(C, C_weight)
+        branching_nodes = self.find_branching_nodes(P, self.incumbent_weight - C_weight)
+        while branching_nodes:
+            v = branching_nodes.pop()
+            P.remove(v)
+            new_C = C + [v]
+            new_C_weight = C_weight + self.node_weights[v]
+            new_P = [w for w in P if self.G.has_edge(v, w)]
+            self.expand(new_C, new_C_weight, new_P)

     def find_max_weight_clique(self):
         """Find a maximum weight clique."""
-        pass
+        # Sort nodes in reverse order of degree for speed
+        nodes = sorted(self.G.nodes(), key=lambda v: self.G.degree(v), reverse=True)
+        nodes = [v for v in nodes if self.node_weights[v] > 0]
+        self.expand([], 0, nodes)


-@not_implemented_for('directed')
-@nx._dispatchable(node_attrs='weight')
-def max_weight_clique(G, weight='weight'):
+@not_implemented_for("directed")
+@nx._dispatchable(node_attrs="weight")
+def max_weight_clique(G, weight="weight"):
     """Find a maximum weight clique in G.

     A *clique* in a graph is a set of nodes such that every two distinct nodes
@@ -545,4 +748,7 @@ def max_weight_clique(G, weight='weight'):
            for the Maximum Weight Independent Set Problem.  Technical Report,
            Texas A&M University (2016).
     """
-    pass
+
+    mwc = MaxWeightClique(G, weight)
+    mwc.find_max_weight_clique()
+    return mwc.incumbent_nodes, mwc.incumbent_weight
diff --git a/networkx/algorithms/cluster.py b/networkx/algorithms/cluster.py
index 58bcbf073..6c91ad281 100644
--- a/networkx/algorithms/cluster.py
+++ b/networkx/algorithms/cluster.py
@@ -1,13 +1,22 @@
 """Algorithms to characterize the number of triangles in a graph."""
+
 from collections import Counter
 from itertools import chain, combinations
+
 import networkx as nx
 from networkx.utils import not_implemented_for
-__all__ = ['triangles', 'average_clustering', 'clustering', 'transitivity',
-    'square_clustering', 'generalized_degree']
+
+__all__ = [
+    "triangles",
+    "average_clustering",
+    "clustering",
+    "transitivity",
+    "square_clustering",
+    "generalized_degree",
+]


-@not_implemented_for('directed')
+@not_implemented_for("directed")
 @nx._dispatchable
 def triangles(G, nodes=None):
     """Compute the number of triangles.
@@ -45,10 +54,40 @@ def triangles(G, nodes=None):
     Self loops are ignored.

     """
-    pass
+    if nodes is not None:
+        # If `nodes` represents a single node, return only its number of triangles
+        if nodes in G:
+            return next(_triangles_and_degree_iter(G, nodes))[2] // 2
+
+        # if `nodes` is a container of nodes, then return a
+        # dictionary mapping node to number of triangles.
+        return {v: t // 2 for v, d, t, _ in _triangles_and_degree_iter(G, nodes)}
+
+    # if nodes is None, then compute triangles for the complete graph
+
+    # dict used to avoid visiting the same nodes twice
+    # this allows calculating/counting each triangle only once
+    later_nbrs = {}
+
+    # iterate over the nodes in a graph
+    for node, neighbors in G.adjacency():
+        later_nbrs[node] = {n for n in neighbors if n not in later_nbrs and n != node}
+
+    # instantiate Counter for each node to include isolated nodes
+    # add 1 to the count if a nodes neighbor's neighbor is also a neighbor
+    triangle_counts = Counter(dict.fromkeys(G, 0))
+    for node1, neighbors in later_nbrs.items():
+        for node2 in neighbors:
+            third_nodes = neighbors & later_nbrs[node2]
+            m = len(third_nodes)
+            triangle_counts[node1] += m
+            triangle_counts[node2] += m
+            triangle_counts.update(third_nodes)
+
+    return dict(triangle_counts)


-@not_implemented_for('multigraph')
+@not_implemented_for("multigraph")
 def _triangles_and_degree_iter(G, nodes=None):
     """Return an iterator of (node, degree, triangles, generalized degree).

@@ -57,11 +96,20 @@ def _triangles_and_degree_iter(G, nodes=None):
     and details.

     """
-    pass
+    if nodes is None:
+        nodes_nbrs = G.adj.items()
+    else:
+        nodes_nbrs = ((n, G[n]) for n in G.nbunch_iter(nodes))

+    for v, v_nbrs in nodes_nbrs:
+        vs = set(v_nbrs) - {v}
+        gen_degree = Counter(len(vs & (set(G[w]) - {w})) for w in vs)
+        ntriangles = sum(k * val for k, val in gen_degree.items())
+        yield (v, len(vs), ntriangles, gen_degree)

-@not_implemented_for('multigraph')
-def _weighted_triangles_and_degree_iter(G, nodes=None, weight='weight'):
+
+@not_implemented_for("multigraph")
+def _weighted_triangles_and_degree_iter(G, nodes=None, weight="weight"):
     """Return an iterator of (node, degree, weighted_triangles).

     Used for weighted clustering.
@@ -70,10 +118,38 @@ def _weighted_triangles_and_degree_iter(G, nodes=None, weight='weight'):
     So you may want to divide by 2.

     """
-    pass
-
-
-@not_implemented_for('multigraph')
+    import numpy as np
+
+    if weight is None or G.number_of_edges() == 0:
+        max_weight = 1
+    else:
+        max_weight = max(d.get(weight, 1) for u, v, d in G.edges(data=True))
+    if nodes is None:
+        nodes_nbrs = G.adj.items()
+    else:
+        nodes_nbrs = ((n, G[n]) for n in G.nbunch_iter(nodes))
+
+    def wt(u, v):
+        return G[u][v].get(weight, 1) / max_weight
+
+    for i, nbrs in nodes_nbrs:
+        inbrs = set(nbrs) - {i}
+        weighted_triangles = 0
+        seen = set()
+        for j in inbrs:
+            seen.add(j)
+            # This avoids counting twice -- we double at the end.
+            jnbrs = set(G[j]) - seen
+            # Only compute the edge weight once, before the inner inner
+            # loop.
+            wij = wt(i, j)
+            weighted_triangles += np.cbrt(
+                [(wij * wt(j, k) * wt(k, i)) for k in inbrs & jnbrs]
+            ).sum()
+        yield (i, len(inbrs), 2 * float(weighted_triangles))
+
+
+@not_implemented_for("multigraph")
 def _directed_triangles_and_degree_iter(G, nodes=None):
     """Return an iterator of
     (node, total_degree, reciprocal_degree, directed_triangles).
@@ -83,12 +159,32 @@ def _directed_triangles_and_degree_iter(G, nodes=None):
     directed triangles so does not count triangles twice.

     """
-    pass
-
-
-@not_implemented_for('multigraph')
-def _directed_weighted_triangles_and_degree_iter(G, nodes=None, weight='weight'
-    ):
+    nodes_nbrs = ((n, G._pred[n], G._succ[n]) for n in G.nbunch_iter(nodes))
+
+    for i, preds, succs in nodes_nbrs:
+        ipreds = set(preds) - {i}
+        isuccs = set(succs) - {i}
+
+        directed_triangles = 0
+        for j in chain(ipreds, isuccs):
+            jpreds = set(G._pred[j]) - {j}
+            jsuccs = set(G._succ[j]) - {j}
+            directed_triangles += sum(
+                1
+                for k in chain(
+                    (ipreds & jpreds),
+                    (ipreds & jsuccs),
+                    (isuccs & jpreds),
+                    (isuccs & jsuccs),
+                )
+            )
+        dtotal = len(ipreds) + len(isuccs)
+        dbidirectional = len(ipreds & isuccs)
+        yield (i, dtotal, dbidirectional, directed_triangles)
+
+
+@not_implemented_for("multigraph")
+def _directed_weighted_triangles_and_degree_iter(G, nodes=None, weight="weight"):
     """Return an iterator of
     (node, total_degree, reciprocal_degree, directed_weighted_triangles).

@@ -97,18 +193,69 @@ def _directed_weighted_triangles_and_degree_iter(G, nodes=None, weight='weight'
     directed triangles so does not count triangles twice.

     """
-    pass
-
-
-@nx._dispatchable(edge_attrs='weight')
+    import numpy as np
+
+    if weight is None or G.number_of_edges() == 0:
+        max_weight = 1
+    else:
+        max_weight = max(d.get(weight, 1) for u, v, d in G.edges(data=True))
+
+    nodes_nbrs = ((n, G._pred[n], G._succ[n]) for n in G.nbunch_iter(nodes))
+
+    def wt(u, v):
+        return G[u][v].get(weight, 1) / max_weight
+
+    for i, preds, succs in nodes_nbrs:
+        ipreds = set(preds) - {i}
+        isuccs = set(succs) - {i}
+
+        directed_triangles = 0
+        for j in ipreds:
+            jpreds = set(G._pred[j]) - {j}
+            jsuccs = set(G._succ[j]) - {j}
+            directed_triangles += np.cbrt(
+                [(wt(j, i) * wt(k, i) * wt(k, j)) for k in ipreds & jpreds]
+            ).sum()
+            directed_triangles += np.cbrt(
+                [(wt(j, i) * wt(k, i) * wt(j, k)) for k in ipreds & jsuccs]
+            ).sum()
+            directed_triangles += np.cbrt(
+                [(wt(j, i) * wt(i, k) * wt(k, j)) for k in isuccs & jpreds]
+            ).sum()
+            directed_triangles += np.cbrt(
+                [(wt(j, i) * wt(i, k) * wt(j, k)) for k in isuccs & jsuccs]
+            ).sum()
+
+        for j in isuccs:
+            jpreds = set(G._pred[j]) - {j}
+            jsuccs = set(G._succ[j]) - {j}
+            directed_triangles += np.cbrt(
+                [(wt(i, j) * wt(k, i) * wt(k, j)) for k in ipreds & jpreds]
+            ).sum()
+            directed_triangles += np.cbrt(
+                [(wt(i, j) * wt(k, i) * wt(j, k)) for k in ipreds & jsuccs]
+            ).sum()
+            directed_triangles += np.cbrt(
+                [(wt(i, j) * wt(i, k) * wt(k, j)) for k in isuccs & jpreds]
+            ).sum()
+            directed_triangles += np.cbrt(
+                [(wt(i, j) * wt(i, k) * wt(j, k)) for k in isuccs & jsuccs]
+            ).sum()
+
+        dtotal = len(ipreds) + len(isuccs)
+        dbidirectional = len(ipreds & isuccs)
+        yield (i, dtotal, dbidirectional, float(directed_triangles))
+
+
+@nx._dispatchable(edge_attrs="weight")
 def average_clustering(G, nodes=None, weight=None, count_zeros=True):
-    """Compute the average clustering coefficient for the graph G.
+    r"""Compute the average clustering coefficient for the graph G.

     The clustering coefficient for the graph is the average,

     .. math::

-       C = \\frac{1}{n}\\sum_{v \\in G} c_v,
+       C = \frac{1}{n}\sum_{v \in G} c_v,

     where :math:`n` is the number of nodes in `G`.

@@ -154,19 +301,22 @@ def average_clustering(G, nodes=None, weight=None, count_zeros=True):
        nodes and leafs on clustering measures for small-world networks.
        https://arxiv.org/abs/0802.2512
     """
-    pass
+    c = clustering(G, nodes, weight=weight).values()
+    if not count_zeros:
+        c = [v for v in c if abs(v) > 0]
+    return sum(c) / len(c)


-@nx._dispatchable(edge_attrs='weight')
+@nx._dispatchable(edge_attrs="weight")
 def clustering(G, nodes=None, weight=None):
-    """Compute the clustering coefficient for nodes.
+    r"""Compute the clustering coefficient for nodes.

     For unweighted graphs, the clustering of a node :math:`u`
     is the fraction of possible triangles through that node that exist,

     .. math::

-      c_u = \\frac{2 T(u)}{deg(u)(deg(u)-1)},
+      c_u = \frac{2 T(u)}{deg(u)(deg(u)-1)},

     where :math:`T(u)` is the number of triangles through node :math:`u` and
     :math:`deg(u)` is the degree of :math:`u`.
@@ -177,11 +327,11 @@ def clustering(G, nodes=None, weight=None):

     .. math::

-       c_u = \\frac{1}{deg(u)(deg(u)-1))}
-             \\sum_{vw} (\\hat{w}_{uv} \\hat{w}_{uw} \\hat{w}_{vw})^{1/3}.
+       c_u = \frac{1}{deg(u)(deg(u)-1))}
+             \sum_{vw} (\hat{w}_{uv} \hat{w}_{uw} \hat{w}_{vw})^{1/3}.

-    The edge weights :math:`\\hat{w}_{uv}` are normalized by the maximum weight
-    in the network :math:`\\hat{w}_{uv} = w_{uv}/\\max(w)`.
+    The edge weights :math:`\hat{w}_{uv}` are normalized by the maximum weight
+    in the network :math:`\hat{w}_{uv} = w_{uv}/\max(w)`.

     The value of :math:`c_u` is assigned to 0 if :math:`deg(u) < 2`.

@@ -193,11 +343,11 @@ def clustering(G, nodes=None, weight=None):

     .. math::

-       c_u = \\frac{T(u)}{2(deg^{tot}(u)(deg^{tot}(u)-1) - 2deg^{\\leftrightarrow}(u))},
+       c_u = \frac{T(u)}{2(deg^{tot}(u)(deg^{tot}(u)-1) - 2deg^{\leftrightarrow}(u))},

     where :math:`T(u)` is the number of directed triangles through node
     :math:`u`, :math:`deg^{tot}(u)` is the sum of in degree and out degree of
-    :math:`u` and :math:`deg^{\\leftrightarrow}(u)` is the reciprocal degree of
+    :math:`u` and :math:`deg^{\leftrightarrow}(u)` is the reciprocal degree of
     :math:`u`.


@@ -245,12 +395,36 @@ def clustering(G, nodes=None, weight=None):
     .. [4] Clustering in complex directed networks by G. Fagiolo,
        Physical Review E, 76(2), 026107 (2007).
     """
-    pass
+    if G.is_directed():
+        if weight is not None:
+            td_iter = _directed_weighted_triangles_and_degree_iter(G, nodes, weight)
+            clusterc = {
+                v: 0 if t == 0 else t / ((dt * (dt - 1) - 2 * db) * 2)
+                for v, dt, db, t in td_iter
+            }
+        else:
+            td_iter = _directed_triangles_and_degree_iter(G, nodes)
+            clusterc = {
+                v: 0 if t == 0 else t / ((dt * (dt - 1) - 2 * db) * 2)
+                for v, dt, db, t in td_iter
+            }
+    else:
+        # The formula 2*T/(d*(d-1)) from docs is t/(d*(d-1)) here b/c t==2*T
+        if weight is not None:
+            td_iter = _weighted_triangles_and_degree_iter(G, nodes, weight)
+            clusterc = {v: 0 if t == 0 else t / (d * (d - 1)) for v, d, t in td_iter}
+        else:
+            td_iter = _triangles_and_degree_iter(G, nodes)
+            clusterc = {v: 0 if t == 0 else t / (d * (d - 1)) for v, d, t, _ in td_iter}
+    if nodes in G:
+        # Return the value of the sole entry in the dictionary.
+        return clusterc[nodes]
+    return clusterc


 @nx._dispatchable
 def transitivity(G):
-    """Compute graph transitivity, the fraction of all possible triangles
+    r"""Compute graph transitivity, the fraction of all possible triangles
     present in G.

     Possible triangles are identified by the number of "triads"
@@ -260,7 +434,7 @@ def transitivity(G):

     .. math::

-        T = 3\\frac{\\#triangles}{\\#triads}.
+        T = 3\frac{\#triangles}{\#triads}.

     Parameters
     ----------
@@ -281,25 +455,32 @@ def transitivity(G):
     >>> print(nx.transitivity(G))
     1.0
     """
-    pass
+    triangles_contri = [
+        (t, d * (d - 1)) for v, d, t, _ in _triangles_and_degree_iter(G)
+    ]
+    # If the graph is empty
+    if len(triangles_contri) == 0:
+        return 0
+    triangles, contri = map(sum, zip(*triangles_contri))
+    return 0 if triangles == 0 else triangles / contri


 @nx._dispatchable
 def square_clustering(G, nodes=None):
-    """Compute the squares clustering coefficient for nodes.
+    r"""Compute the squares clustering coefficient for nodes.

     For each node return the fraction of possible squares that exist at
     the node [1]_

     .. math::
-       C_4(v) = \\frac{ \\sum_{u=1}^{k_v}
-       \\sum_{w=u+1}^{k_v} q_v(u,w) }{ \\sum_{u=1}^{k_v}
-       \\sum_{w=u+1}^{k_v} [a_v(u,w) + q_v(u,w)]},
+       C_4(v) = \frac{ \sum_{u=1}^{k_v}
+       \sum_{w=u+1}^{k_v} q_v(u,w) }{ \sum_{u=1}^{k_v}
+       \sum_{w=u+1}^{k_v} [a_v(u,w) + q_v(u,w)]},

     where :math:`q_v(u,w)` are the number of common neighbors of :math:`u` and
     :math:`w` other than :math:`v` (ie squares), and :math:`a_v(u,w) = (k_u -
-    (1+q_v(u,w)+\\theta_{uv})) + (k_w - (1+q_v(u,w)+\\theta_{uw}))`, where
-    :math:`\\theta_{uw} = 1` if :math:`u` and :math:`w` are connected and 0
+    (1+q_v(u,w)+\theta_{uv})) + (k_w - (1+q_v(u,w)+\theta_{uw}))`, where
+    :math:`\theta_{uw} = 1` if :math:`u` and :math:`w` are connected and 0
     otherwise. [2]_

     Parameters
@@ -339,19 +520,39 @@ def square_clustering(G, nodes=None):
         Bipartite Networks. Physica A: Statistical Mechanics and its Applications 387.27 (2008): 6869–6875.
         https://arxiv.org/abs/0710.0117v1
     """
-    pass
-
-
-@not_implemented_for('directed')
+    if nodes is None:
+        node_iter = G
+    else:
+        node_iter = G.nbunch_iter(nodes)
+    clustering = {}
+    for v in node_iter:
+        clustering[v] = 0
+        potential = 0
+        for u, w in combinations(G[v], 2):
+            squares = len((set(G[u]) & set(G[w])) - {v})
+            clustering[v] += squares
+            degm = squares + 1
+            if w in G[u]:
+                degm += 1
+            potential += (len(G[u]) - degm) + (len(G[w]) - degm) + squares
+        if potential > 0:
+            clustering[v] /= potential
+    if nodes in G:
+        # Return the value of the sole entry in the dictionary.
+        return clustering[nodes]
+    return clustering
+
+
+@not_implemented_for("directed")
 @nx._dispatchable
 def generalized_degree(G, nodes=None):
-    """Compute the generalized degree for nodes.
+    r"""Compute the generalized degree for nodes.

     For each node, the generalized degree shows how many edges of given
     triangle multiplicity the node is connected to. The triangle multiplicity
     of an edge is the number of triangles an edge participates in. The
     generalized degree of node :math:`i` can be written as a vector
-    :math:`\\mathbf{k}_i=(k_i^{(0)}, \\dotsc, k_i^{(N-2)})` where
+    :math:`\mathbf{k}_i=(k_i^{(0)}, \dotsc, k_i^{(N-2)})` where
     :math:`k_i^{(j)}` is the number of edges attached to node :math:`i` that
     participate in :math:`j` triangles.

@@ -393,8 +594,8 @@ def generalized_degree(G, nodes=None):
     particular triangle multiplicity are present.

     The number of triangles node :math:`i` is attached to can be recovered from
-    the generalized degree :math:`\\mathbf{k}_i=(k_i^{(0)}, \\dotsc,
-    k_i^{(N-2)})` by :math:`(k_i^{(1)}+2k_i^{(2)}+\\dotsc +(N-2)k_i^{(N-2)})/2`.
+    the generalized degree :math:`\mathbf{k}_i=(k_i^{(0)}, \dotsc,
+    k_i^{(N-2)})` by :math:`(k_i^{(1)}+2k_i^{(2)}+\dotsc +(N-2)k_i^{(N-2)})/2`.

     References
     ----------
@@ -403,4 +604,6 @@ def generalized_degree(G, nodes=None):
         Volume 97, Number 2 (2012).
         https://iopscience.iop.org/article/10.1209/0295-5075/97/28005
     """
-    pass
+    if nodes in G:
+        return next(_triangles_and_degree_iter(G, nodes))[3]
+    return {v: gd for v, d, t, gd in _triangles_and_degree_iter(G, nodes)}
diff --git a/networkx/algorithms/coloring/equitable_coloring.py b/networkx/algorithms/coloring/equitable_coloring.py
index 05af03451..e464a0744 100644
--- a/networkx/algorithms/coloring/equitable_coloring.py
+++ b/networkx/algorithms/coloring/equitable_coloring.py
@@ -1,31 +1,115 @@
 """
 Equitable coloring of graphs with bounded degree.
 """
+
 from collections import defaultdict
+
 import networkx as nx
-__all__ = ['equitable_color']
+
+__all__ = ["equitable_color"]


 @nx._dispatchable
 def is_coloring(G, coloring):
     """Determine if the coloring is a valid coloring for the graph G."""
-    pass
+    # Verify that the coloring is valid.
+    return all(coloring[s] != coloring[d] for s, d in G.edges)


 @nx._dispatchable
 def is_equitable(G, coloring, num_colors=None):
     """Determines if the coloring is valid and equitable for the graph G."""
-    pass
+
+    if not is_coloring(G, coloring):
+        return False
+
+    # Verify whether it is equitable.
+    color_set_size = defaultdict(int)
+    for color in coloring.values():
+        color_set_size[color] += 1
+
+    if num_colors is not None:
+        for color in range(num_colors):
+            if color not in color_set_size:
+                # These colors do not have any vertices attached to them.
+                color_set_size[color] = 0
+
+    # If there are more than 2 distinct values, the coloring cannot be equitable
+    all_set_sizes = set(color_set_size.values())
+    if len(all_set_sizes) == 0 and num_colors is None:  # Was an empty graph
+        return True
+    elif len(all_set_sizes) == 1:
+        return True
+    elif len(all_set_sizes) == 2:
+        a, b = list(all_set_sizes)
+        return abs(a - b) <= 1
+    else:  # len(all_set_sizes) > 2:
+        return False
+
+
+def make_C_from_F(F):
+    C = defaultdict(list)
+    for node, color in F.items():
+        C[color].append(node)
+
+    return C
+
+
+def make_N_from_L_C(L, C):
+    nodes = L.keys()
+    colors = C.keys()
+    return {
+        (node, color): sum(1 for v in L[node] if v in C[color])
+        for node in nodes
+        for color in colors
+    }
+
+
+def make_H_from_C_N(C, N):
+    return {
+        (c1, c2): sum(1 for node in C[c1] if N[(node, c2)] == 0) for c1 in C for c2 in C
+    }


 def change_color(u, X, Y, N, H, F, C, L):
     """Change the color of 'u' from X to Y and update N, H, F, C."""
-    pass
+    assert F[u] == X and X != Y
+
+    # Change the class of 'u' from X to Y
+    F[u] = Y
+
+    for k in C:
+        # 'u' witnesses an edge from k -> Y instead of from k -> X now.
+        if N[u, k] == 0:
+            H[(X, k)] -= 1
+            H[(Y, k)] += 1
+
+    for v in L[u]:
+        # 'v' has lost a neighbor in X and gained one in Y
+        N[(v, X)] -= 1
+        N[(v, Y)] += 1
+
+        if N[(v, X)] == 0:
+            # 'v' witnesses F[v] -> X
+            H[(F[v], X)] += 1
+
+        if N[(v, Y)] == 1:
+            # 'v' no longer witnesses F[v] -> Y
+            H[(F[v], Y)] -= 1
+
+    C[X].remove(u)
+    C[Y].append(u)


 def move_witnesses(src_color, dst_color, N, H, F, C, T_cal, L):
     """Move witness along a path from src_color to dst_color."""
-    pass
+    X = src_color
+    while X != dst_color:
+        Y = T_cal[X]
+        # Move _any_ witness from X to Y = T_cal[X]
+        w = next(x for x in C[X] if N[(x, Y)] == 0)
+        change_color(w, X, Y, N=N, H=H, F=F, C=C, L=L)
+        X = Y


 @nx._dispatchable(mutates_input=True)
@@ -37,12 +121,269 @@ def pad_graph(G, num_colors):

     Returns the number of nodes with each color.
     """
-    pass
+
+    n_ = len(G)
+    r = num_colors - 1
+
+    # Ensure that the number of nodes in G is a multiple of (r + 1)
+    s = n_ // (r + 1)
+    if n_ != s * (r + 1):
+        p = (r + 1) - n_ % (r + 1)
+        s += 1
+
+        # Complete graph K_p between (imaginary) nodes [n_, ... , n_ + p]
+        K = nx.relabel_nodes(nx.complete_graph(p), {idx: idx + n_ for idx in range(p)})
+        G.add_edges_from(K.edges)
+
+    return s


 def procedure_P(V_minus, V_plus, N, H, F, C, L, excluded_colors=None):
     """Procedure P as described in the paper."""
-    pass
+
+    if excluded_colors is None:
+        excluded_colors = set()
+
+    A_cal = set()
+    T_cal = {}
+    R_cal = []
+
+    # BFS to determine A_cal, i.e. colors reachable from V-
+    reachable = [V_minus]
+    marked = set(reachable)
+    idx = 0
+
+    while idx < len(reachable):
+        pop = reachable[idx]
+        idx += 1
+
+        A_cal.add(pop)
+        R_cal.append(pop)
+
+        # TODO: Checking whether a color has been visited can be made faster by
+        # using a look-up table instead of testing for membership in a set by a
+        # logarithmic factor.
+        next_layer = []
+        for k in C:
+            if (
+                H[(k, pop)] > 0
+                and k not in A_cal
+                and k not in excluded_colors
+                and k not in marked
+            ):
+                next_layer.append(k)
+
+        for dst in next_layer:
+            # Record that `dst` can reach `pop`
+            T_cal[dst] = pop
+
+        marked.update(next_layer)
+        reachable.extend(next_layer)
+
+    # Variables for the algorithm
+    b = len(C) - len(A_cal)
+
+    if V_plus in A_cal:
+        # Easy case: V+ is in A_cal
+        # Move one node from V+ to V- using T_cal to find the parents.
+        move_witnesses(V_plus, V_minus, N=N, H=H, F=F, C=C, T_cal=T_cal, L=L)
+    else:
+        # If there is a solo edge, we can resolve the situation by
+        # moving witnesses from B to A, making G[A] equitable and then
+        # recursively balancing G[B - w] with a different V_minus and
+        # but the same V_plus.
+
+        A_0 = set()
+        A_cal_0 = set()
+        num_terminal_sets_found = 0
+        made_equitable = False
+
+        for W_1 in R_cal[::-1]:
+            for v in C[W_1]:
+                X = None
+
+                for U in C:
+                    if N[(v, U)] == 0 and U in A_cal and U != W_1:
+                        X = U
+
+                # v does not witness an edge in H[A_cal]
+                if X is None:
+                    continue
+
+                for U in C:
+                    # Note: Departing from the paper here.
+                    if N[(v, U)] >= 1 and U not in A_cal:
+                        X_prime = U
+                        w = v
+
+                        try:
+                            # Finding the solo neighbor of w in X_prime
+                            y = next(
+                                node
+                                for node in L[w]
+                                if F[node] == X_prime and N[(node, W_1)] == 1
+                            )
+                        except StopIteration:
+                            pass
+                        else:
+                            W = W_1
+
+                            # Move w from W to X, now X has one extra node.
+                            change_color(w, W, X, N=N, H=H, F=F, C=C, L=L)
+
+                            # Move witness from X to V_minus, making the coloring
+                            # equitable.
+                            move_witnesses(
+                                src_color=X,
+                                dst_color=V_minus,
+                                N=N,
+                                H=H,
+                                F=F,
+                                C=C,
+                                T_cal=T_cal,
+                                L=L,
+                            )
+
+                            # Move y from X_prime to W, making W the correct size.
+                            change_color(y, X_prime, W, N=N, H=H, F=F, C=C, L=L)
+
+                            # Then call the procedure on G[B - y]
+                            procedure_P(
+                                V_minus=X_prime,
+                                V_plus=V_plus,
+                                N=N,
+                                H=H,
+                                C=C,
+                                F=F,
+                                L=L,
+                                excluded_colors=excluded_colors.union(A_cal),
+                            )
+                            made_equitable = True
+                            break
+
+                if made_equitable:
+                    break
+            else:
+                # No node in W_1 was found such that
+                # it had a solo-neighbor.
+                A_cal_0.add(W_1)
+                A_0.update(C[W_1])
+                num_terminal_sets_found += 1
+
+            if num_terminal_sets_found == b:
+                # Otherwise, construct the maximal independent set and find
+                # a pair of z_1, z_2 as in Case II.
+
+                # BFS to determine B_cal': the set of colors reachable from V+
+                B_cal_prime = set()
+                T_cal_prime = {}
+
+                reachable = [V_plus]
+                marked = set(reachable)
+                idx = 0
+                while idx < len(reachable):
+                    pop = reachable[idx]
+                    idx += 1
+
+                    B_cal_prime.add(pop)
+
+                    # No need to check for excluded_colors here because
+                    # they only exclude colors from A_cal
+                    next_layer = [
+                        k
+                        for k in C
+                        if H[(pop, k)] > 0 and k not in B_cal_prime and k not in marked
+                    ]
+
+                    for dst in next_layer:
+                        T_cal_prime[pop] = dst
+
+                    marked.update(next_layer)
+                    reachable.extend(next_layer)
+
+                # Construct the independent set of G[B']
+                I_set = set()
+                I_covered = set()
+                W_covering = {}
+
+                B_prime = [node for k in B_cal_prime for node in C[k]]
+
+                # Add the nodes in V_plus to I first.
+                for z in C[V_plus] + B_prime:
+                    if z in I_covered or F[z] not in B_cal_prime:
+                        continue
+
+                    I_set.add(z)
+                    I_covered.add(z)
+                    I_covered.update(list(L[z]))
+
+                    for w in L[z]:
+                        if F[w] in A_cal_0 and N[(z, F[w])] == 1:
+                            if w not in W_covering:
+                                W_covering[w] = z
+                            else:
+                                # Found z1, z2 which have the same solo
+                                # neighbor in some W
+                                z_1 = W_covering[w]
+                                # z_2 = z
+
+                                Z = F[z_1]
+                                W = F[w]
+
+                                # shift nodes along W, V-
+                                move_witnesses(
+                                    W, V_minus, N=N, H=H, F=F, C=C, T_cal=T_cal, L=L
+                                )
+
+                                # shift nodes along V+ to Z
+                                move_witnesses(
+                                    V_plus,
+                                    Z,
+                                    N=N,
+                                    H=H,
+                                    F=F,
+                                    C=C,
+                                    T_cal=T_cal_prime,
+                                    L=L,
+                                )
+
+                                # change color of z_1 to W
+                                change_color(z_1, Z, W, N=N, H=H, F=F, C=C, L=L)
+
+                                # change color of w to some color in B_cal
+                                W_plus = next(
+                                    k for k in C if N[(w, k)] == 0 and k not in A_cal
+                                )
+                                change_color(w, W, W_plus, N=N, H=H, F=F, C=C, L=L)
+
+                                # recurse with G[B \cup W*]
+                                excluded_colors.update(
+                                    [k for k in C if k != W and k not in B_cal_prime]
+                                )
+                                procedure_P(
+                                    V_minus=W,
+                                    V_plus=W_plus,
+                                    N=N,
+                                    H=H,
+                                    C=C,
+                                    F=F,
+                                    L=L,
+                                    excluded_colors=excluded_colors,
+                                )
+
+                                made_equitable = True
+                                break
+
+                    if made_equitable:
+                        break
+                else:
+                    assert False, (
+                        "Must find a w which is the solo neighbor "
+                        "of two vertices in B_cal_prime."
+                    )
+
+            if made_equitable:
+                break


 @nx._dispatchable
@@ -86,4 +427,79 @@ def equitable_color(G, num_colors):
         (2010). A fast algorithm for equitable coloring. Combinatorica, 30(2),
         217-224.
     """
-    pass
+
+    # Map nodes to integers for simplicity later.
+    nodes_to_int = {}
+    int_to_nodes = {}
+
+    for idx, node in enumerate(G.nodes):
+        nodes_to_int[node] = idx
+        int_to_nodes[idx] = node
+
+    G = nx.relabel_nodes(G, nodes_to_int, copy=True)
+
+    # Basic graph statistics and sanity check.
+    if len(G.nodes) > 0:
+        r_ = max(G.degree(node) for node in G.nodes)
+    else:
+        r_ = 0
+
+    if r_ >= num_colors:
+        raise nx.NetworkXAlgorithmError(
+            f"Graph has maximum degree {r_}, needs "
+            f"{r_ + 1} (> {num_colors}) colors for guaranteed coloring."
+        )
+
+    # Ensure that the number of nodes in G is a multiple of (r + 1)
+    pad_graph(G, num_colors)
+
+    # Starting the algorithm.
+    # L = {node: list(G.neighbors(node)) for node in G.nodes}
+    L_ = {node: [] for node in G.nodes}
+
+    # Arbitrary equitable allocation of colors to nodes.
+    F = {node: idx % num_colors for idx, node in enumerate(G.nodes)}
+
+    C = make_C_from_F(F)
+
+    # The neighborhood is empty initially.
+    N = make_N_from_L_C(L_, C)
+
+    # Currently all nodes witness all edges.
+    H = make_H_from_C_N(C, N)
+
+    # Start of algorithm.
+    edges_seen = set()
+
+    for u in sorted(G.nodes):
+        for v in sorted(G.neighbors(u)):
+            # Do not double count edges if (v, u) has already been seen.
+            if (v, u) in edges_seen:
+                continue
+
+            edges_seen.add((u, v))
+
+            L_[u].append(v)
+            L_[v].append(u)
+
+            N[(u, F[v])] += 1
+            N[(v, F[u])] += 1
+
+            if F[u] != F[v]:
+                # Were 'u' and 'v' witnesses for F[u] -> F[v] or F[v] -> F[u]?
+                if N[(u, F[v])] == 1:
+                    H[F[u], F[v]] -= 1  # u cannot witness an edge between F[u], F[v]
+
+                if N[(v, F[u])] == 1:
+                    H[F[v], F[u]] -= 1  # v cannot witness an edge between F[v], F[u]
+
+        if N[(u, F[u])] != 0:
+            # Find the first color where 'u' does not have any neighbors.
+            Y = next(k for k in C if N[(u, k)] == 0)
+            X = F[u]
+            change_color(u, X, Y, N=N, H=H, F=F, C=C, L=L_)
+
+            # Procedure P
+            procedure_P(V_minus=X, V_plus=Y, N=N, H=H, F=F, C=C, L=L_)
+
+    return {int_to_nodes[x]: F[x] for x in int_to_nodes}
diff --git a/networkx/algorithms/coloring/greedy_coloring.py b/networkx/algorithms/coloring/greedy_coloring.py
index 7927585ea..61bc95367 100644
--- a/networkx/algorithms/coloring/greedy_coloring.py
+++ b/networkx/algorithms/coloring/greedy_coloring.py
@@ -3,13 +3,21 @@ Greedy graph coloring using various strategies.
 """
 import itertools
 from collections import defaultdict, deque
+
 import networkx as nx
 from networkx.utils import arbitrary_element, py_random_state
-__all__ = ['greedy_color', 'strategy_connected_sequential',
-    'strategy_connected_sequential_bfs',
-    'strategy_connected_sequential_dfs', 'strategy_independent_set',
-    'strategy_largest_first', 'strategy_random_sequential',
-    'strategy_saturation_largest_first', 'strategy_smallest_last']
+
+__all__ = [
+    "greedy_color",
+    "strategy_connected_sequential",
+    "strategy_connected_sequential_bfs",
+    "strategy_connected_sequential_dfs",
+    "strategy_independent_set",
+    "strategy_largest_first",
+    "strategy_random_sequential",
+    "strategy_saturation_largest_first",
+    "strategy_smallest_last",
+]


 def strategy_largest_first(G, colors):
@@ -19,7 +27,7 @@ def strategy_largest_first(G, colors):
     ``G`` is a NetworkX graph. ``colors`` is ignored.

     """
-    pass
+    return sorted(G, key=G.degree, reverse=True)


 @py_random_state(2)
@@ -32,7 +40,9 @@ def strategy_random_sequential(G, colors, seed=None):
         Indicator of random number generation state.
         See :ref:`Randomness<randomness>`.
     """
-    pass
+    nodes = list(G)
+    seed.shuffle(nodes)
+    return nodes


 def strategy_smallest_last(G, colors):
@@ -54,7 +64,42 @@ def strategy_smallest_last(G, colors):
     maximal independent set.

     """
-    pass
+    H = G.copy()
+    result = deque()
+
+    # Build initial degree list (i.e. the bucket queue data structure)
+    degrees = defaultdict(set)  # set(), for fast random-access removals
+    lbound = float("inf")
+    for node, d in H.degree():
+        degrees[d].add(node)
+        lbound = min(lbound, d)  # Lower bound on min-degree.
+
+    def find_min_degree():
+        # Save time by starting the iterator at `lbound`, not 0.
+        # The value that we find will be our new `lbound`, which we set later.
+        return next(d for d in itertools.count(lbound) if d in degrees)
+
+    for _ in G:
+        # Pop a min-degree node and add it to the list.
+        min_degree = find_min_degree()
+        u = degrees[min_degree].pop()
+        if not degrees[min_degree]:  # Clean up the degree list.
+            del degrees[min_degree]
+        result.appendleft(u)
+
+        # Update degrees of removed node's neighbors.
+        for v in H[u]:
+            degree = H.degree(v)
+            degrees[degree].remove(v)
+            if not degrees[degree]:  # Clean up the degree list.
+                del degrees[degree]
+            degrees[degree - 1].add(v)
+
+        # Finally, remove the node.
+        H.remove_node(u)
+        lbound = min_degree - 1  # Subtract 1 in case of tied neighbors.
+
+    return result


 def _maximal_independent_set(G):
@@ -63,7 +108,14 @@ def _maximal_independent_set(G):
     subgraph of unchosen nodes).

     """
-    pass
+    result = set()
+    remaining = set(G)
+    while remaining:
+        G = G.subgraph(remaining)
+        v = min(remaining, key=G.degree)
+        result.add(v)
+        remaining -= set(G[v]) | {v}
+    return result


 def strategy_independent_set(G, colors):
@@ -83,7 +135,11 @@ def strategy_independent_set(G, colors):
     instead of a maximal independent set.

     """
-    pass
+    remaining_nodes = set(G)
+    while len(remaining_nodes) > 0:
+        nodes = _maximal_independent_set(G.subgraph(remaining_nodes))
+        remaining_nodes -= nodes
+        yield from nodes


 def strategy_connected_sequential_bfs(G, colors):
@@ -96,7 +152,7 @@ def strategy_connected_sequential_bfs(G, colors):
     ``G`` is a NetworkX graph. ``colors`` is ignored.

     """
-    pass
+    return strategy_connected_sequential(G, colors, "bfs")


 def strategy_connected_sequential_dfs(G, colors):
@@ -109,10 +165,10 @@ def strategy_connected_sequential_dfs(G, colors):
     ``G`` is a NetworkX graph. ``colors`` is ignored.

     """
-    pass
+    return strategy_connected_sequential(G, colors, "dfs")


-def strategy_connected_sequential(G, colors, traversal='bfs'):
+def strategy_connected_sequential(G, colors, traversal="bfs"):
     """Returns an iterable over nodes in ``G`` in the order given by a
     breadth-first or depth-first traversal.

@@ -126,7 +182,22 @@ def strategy_connected_sequential(G, colors, traversal='bfs'):
     ``G`` is a NetworkX graph. ``colors`` is ignored.

     """
-    pass
+    if traversal == "bfs":
+        traverse = nx.bfs_edges
+    elif traversal == "dfs":
+        traverse = nx.dfs_edges
+    else:
+        raise nx.NetworkXError(
+            "Please specify one of the strings 'bfs' or"
+            " 'dfs' for connected sequential ordering"
+        )
+    for component in nx.connected_components(G):
+        source = arbitrary_element(component)
+        # Yield the source node, then all the nodes in the specified
+        # traversal order.
+        yield source
+        for _, end in traverse(G.subgraph(component), source):
+            yield end


 def strategy_saturation_largest_first(G, colors):
@@ -137,21 +208,61 @@ def strategy_saturation_largest_first(G, colors):
     ``G`` to colors, for those nodes that have already been colored.

     """
-    pass
-
-
-STRATEGIES = {'largest_first': strategy_largest_first, 'random_sequential':
-    strategy_random_sequential, 'smallest_last': strategy_smallest_last,
-    'independent_set': strategy_independent_set, 'connected_sequential_bfs':
-    strategy_connected_sequential_bfs, 'connected_sequential_dfs':
-    strategy_connected_sequential_dfs, 'connected_sequential':
-    strategy_connected_sequential, 'saturation_largest_first':
-    strategy_saturation_largest_first, 'DSATUR':
-    strategy_saturation_largest_first}
+    distinct_colors = {v: set() for v in G}
+
+    # Add the node color assignments given in colors to the
+    # distinct colors set for each neighbor of that node
+    for node, color in colors.items():
+        for neighbor in G[node]:
+            distinct_colors[neighbor].add(color)
+
+    # Check that the color assignments in colors are valid
+    # i.e. no neighboring nodes have the same color
+    if len(colors) >= 2:
+        for node, color in colors.items():
+            if color in distinct_colors[node]:
+                raise nx.NetworkXError("Neighboring nodes must have different colors")
+
+    # If 0 nodes have been colored, simply choose the node of highest degree.
+    if not colors:
+        node = max(G, key=G.degree)
+        yield node
+        # Add the color 0 to the distinct colors set for each
+        # neighbor of that node.
+        for v in G[node]:
+            distinct_colors[v].add(0)
+
+    while len(G) != len(colors):
+        # Update the distinct color sets for the neighbors.
+        for node, color in colors.items():
+            for neighbor in G[node]:
+                distinct_colors[neighbor].add(color)
+
+        # Compute the maximum saturation and the set of nodes that
+        # achieve that saturation.
+        saturation = {v: len(c) for v, c in distinct_colors.items() if v not in colors}
+        # Yield the node with the highest saturation, and break ties by
+        # degree.
+        node = max(saturation, key=lambda v: (saturation[v], G.degree(v)))
+        yield node
+
+
+#: Dictionary mapping name of a strategy as a string to the strategy function.
+STRATEGIES = {
+    "largest_first": strategy_largest_first,
+    "random_sequential": strategy_random_sequential,
+    "smallest_last": strategy_smallest_last,
+    "independent_set": strategy_independent_set,
+    "connected_sequential_bfs": strategy_connected_sequential_bfs,
+    "connected_sequential_dfs": strategy_connected_sequential_dfs,
+    "connected_sequential": strategy_connected_sequential,
+    "saturation_largest_first": strategy_saturation_largest_first,
+    "DSATUR": strategy_saturation_largest_first,
+}


 @nx._dispatchable
-def greedy_color(G, strategy='largest_first', interchange=False):
+def greedy_color(G, strategy="largest_first", interchange=False):
     """Color a graph using various strategies of greedy graph coloring.

     Attempts to color a graph using as few colors as possible, where no
@@ -230,11 +341,42 @@ def greedy_color(G, strategy='largest_first', interchange=False):
        ISBN 0-486-45353-7.

     """
-    pass
-
-
+    if len(G) == 0:
+        return {}
+    # Determine the strategy provided by the caller.
+    strategy = STRATEGIES.get(strategy, strategy)
+    if not callable(strategy):
+        raise nx.NetworkXError(
+            f"strategy must be callable or a valid string. {strategy} not valid."
+        )
+    # Perform some validation on the arguments before executing any
+    # strategy functions.
+    if interchange:
+        if strategy is strategy_independent_set:
+            msg = "interchange cannot be used with independent_set"
+            raise nx.NetworkXPointlessConcept(msg)
+        if strategy is strategy_saturation_largest_first:
+            msg = "interchange cannot be used with" " saturation_largest_first"
+            raise nx.NetworkXPointlessConcept(msg)
+    colors = {}
+    nodes = strategy(G, colors)
+    if interchange:
+        return _greedy_coloring_with_interchange(G, nodes)
+    for u in nodes:
+        # Set to keep track of colors of neighbors
+        nbr_colors = {colors[v] for v in G[u] if v in colors}
+        # Find the first unused color.
+        for color in itertools.count():
+            if color not in nbr_colors:
+                break
+        # Assign the new color to the current node.
+        colors[u] = color
+    return colors
+
+
+# Tools for coloring with interchanges
 class _Node:
-    __slots__ = ['node_id', 'color', 'adj_list', 'adj_color']
+    __slots__ = ["node_id", "color", "adj_list", "adj_color"]

     def __init__(self, node_id, n):
         self.node_id = node_id
@@ -244,12 +386,40 @@ class _Node:

     def __repr__(self):
         return (
-            f'Node_id: {self.node_id}, Color: {self.color}, Adj_list: ({self.adj_list}), adj_color: ({self.adj_color})'
-            )
+            f"Node_id: {self.node_id}, Color: {self.color}, "
+            f"Adj_list: ({self.adj_list}), adj_color: ({self.adj_color})"
+        )
+
+    def assign_color(self, adj_entry, color):
+        adj_entry.col_prev = None
+        adj_entry.col_next = self.adj_color[color]
+        self.adj_color[color] = adj_entry
+        if adj_entry.col_next is not None:
+            adj_entry.col_next.col_prev = adj_entry
+
+    def clear_color(self, adj_entry, color):
+        if adj_entry.col_prev is None:
+            self.adj_color[color] = adj_entry.col_next
+        else:
+            adj_entry.col_prev.col_next = adj_entry.col_next
+        if adj_entry.col_next is not None:
+            adj_entry.col_next.col_prev = adj_entry.col_prev
+
+    def iter_neighbors(self):
+        adj_node = self.adj_list
+        while adj_node is not None:
+            yield adj_node
+            adj_node = adj_node.next
+
+    def iter_neighbors_color(self, color):
+        adj_color_node = self.adj_color[color]
+        while adj_color_node is not None:
+            yield adj_color_node.node_id
+            adj_color_node = adj_color_node.col_next


 class _AdjEntry:
-    __slots__ = ['node_id', 'next', 'mate', 'col_next', 'col_prev']
+    __slots__ = ["node_id", "next", "mate", "col_next", "col_prev"]

     def __init__(self, node_id):
         self.node_id = node_id
@@ -262,8 +432,10 @@ class _AdjEntry:
         col_next = None if self.col_next is None else self.col_next.node_id
         col_prev = None if self.col_prev is None else self.col_prev.node_id
         return (
-            f'Node_id: {self.node_id}, Next: ({self.next}), Mate: ({self.mate.node_id}), col_next: ({col_next}), col_prev: ({col_prev})'
-            )
+            f"Node_id: {self.node_id}, Next: ({self.next}), "
+            f"Mate: ({self.mate.node_id}), "
+            f"col_next: ({col_next}), col_prev: ({col_prev})"
+        )


 def _greedy_coloring_with_interchange(G, nodes):
@@ -295,4 +467,98 @@ def _greedy_coloring_with_interchange(G, nodes):
        Discrete Optimization Algorithms with Pascal Programs, 415-424, 1983.
        ISBN 0-486-45353-7.
     """
-    pass
+    n = len(G)
+
+    graph = {node: _Node(node, n) for node in G}
+
+    for node1, node2 in G.edges():
+        adj_entry1 = _AdjEntry(node2)
+        adj_entry2 = _AdjEntry(node1)
+        adj_entry1.mate = adj_entry2
+        adj_entry2.mate = adj_entry1
+        node1_head = graph[node1].adj_list
+        adj_entry1.next = node1_head
+        graph[node1].adj_list = adj_entry1
+        node2_head = graph[node2].adj_list
+        adj_entry2.next = node2_head
+        graph[node2].adj_list = adj_entry2
+
+    k = 0
+    for node in nodes:
+        # Find the smallest possible, unused color
+        neighbors = graph[node].iter_neighbors()
+        col_used = {graph[adj_node.node_id].color for adj_node in neighbors}
+        col_used.discard(-1)
+        k1 = next(itertools.dropwhile(lambda x: x in col_used, itertools.count()))
+
+        # k1 is now the lowest available color
+        if k1 > k:
+            connected = True
+            visited = set()
+            col1 = -1
+            col2 = -1
+            while connected and col1 < k:
+                col1 += 1
+                neighbor_cols = graph[node].iter_neighbors_color(col1)
+                col1_adj = list(neighbor_cols)
+
+                col2 = col1
+                while connected and col2 < k:
+                    col2 += 1
+                    visited = set(col1_adj)
+                    frontier = list(col1_adj)
+                    i = 0
+                    while i < len(frontier):
+                        search_node = frontier[i]
+                        i += 1
+                        col_opp = col2 if graph[search_node].color == col1 else col1
+                        neighbor_cols = graph[search_node].iter_neighbors_color(col_opp)
+
+                        for neighbor in neighbor_cols:
+                            if neighbor not in visited:
+                                visited.add(neighbor)
+                                frontier.append(neighbor)
+
+                    # Search if node is not adj to any col2 vertex
+                    connected = (
+                        len(
+                            visited.intersection(graph[node].iter_neighbors_color(col2))
+                        )
+                        > 0
+                    )
+
+            # If connected is false then we can swap !!!
+            if not connected:
+                # Update all the nodes in the component
+                for search_node in visited:
+                    graph[search_node].color = (
+                        col2 if graph[search_node].color == col1 else col1
+                    )
+                    col2_adj = graph[search_node].adj_color[col2]
+                    graph[search_node].adj_color[col2] = graph[search_node].adj_color[
+                        col1
+                    ]
+                    graph[search_node].adj_color[col1] = col2_adj
+
+                # Update all the neighboring nodes
+                for search_node in visited:
+                    col = graph[search_node].color
+                    col_opp = col1 if col == col2 else col2
+                    for adj_node in graph[search_node].iter_neighbors():
+                        if graph[adj_node.node_id].color != col_opp:
+                            # Direct reference to entry
+                            adj_mate = adj_node.mate
+                            graph[adj_node.node_id].clear_color(adj_mate, col_opp)
+                            graph[adj_node.node_id].assign_color(adj_mate, col)
+                k1 = col1
+
+        # We can color this node color k1
+        graph[node].color = k1
+        k = max(k1, k)
+
+        # Update the neighbors of this node
+        for adj_node in graph[node].iter_neighbors():
+            adj_mate = adj_node.mate
+            graph[adj_node.node_id].assign_color(adj_mate, k1)
+
+    return {node.node_id: node.color for node in graph.values()}
diff --git a/networkx/algorithms/communicability_alg.py b/networkx/algorithms/communicability_alg.py
index 15d50b5a3..07316dc3a 100644
--- a/networkx/algorithms/communicability_alg.py
+++ b/networkx/algorithms/communicability_alg.py
@@ -3,14 +3,15 @@ Communicability.
 """
 import networkx as nx
 from networkx.utils import not_implemented_for
-__all__ = ['communicability', 'communicability_exp']

+__all__ = ["communicability", "communicability_exp"]

-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def communicability(G):
-    """Returns communicability between all pairs of nodes in G.
+    r"""Returns communicability between all pairs of nodes in G.

     The communicability between pairs of nodes in G is the sum of
     walks of different lengths starting at node u and ending at node v.
@@ -47,11 +48,11 @@ def communicability(G):
     is [1]_

     .. math::
-        C(u,v)=\\sum_{j=1}^{n}\\phi_{j}(u)\\phi_{j}(v)e^{\\lambda_{j}},
+        C(u,v)=\sum_{j=1}^{n}\phi_{j}(u)\phi_{j}(v)e^{\lambda_{j}},

-    where `\\phi_{j}(u)` is the `u\\rm{th}` element of the `j\\rm{th}` orthonormal
+    where `\phi_{j}(u)` is the `u\rm{th}` element of the `j\rm{th}` orthonormal
     eigenvector of the adjacency matrix associated with the eigenvalue
-    `\\lambda_{j}`.
+    `\lambda_{j}`.

     References
     ----------
@@ -65,14 +66,34 @@ def communicability(G):
     >>> G = nx.Graph([(0, 1), (1, 2), (1, 5), (5, 4), (2, 4), (2, 3), (4, 3), (3, 6)])
     >>> c = nx.communicability(G)
     """
-    pass
-
-
-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+    import numpy as np
+
+    nodelist = list(G)  # ordering of nodes in matrix
+    A = nx.to_numpy_array(G, nodelist)
+    # convert to 0-1 matrix
+    A[A != 0.0] = 1
+    w, vec = np.linalg.eigh(A)
+    expw = np.exp(w)
+    mapping = dict(zip(nodelist, range(len(nodelist))))
+    c = {}
+    # computing communicabilities
+    for u in G:
+        c[u] = {}
+        for v in G:
+            s = 0
+            p = mapping[u]
+            q = mapping[v]
+            for j in range(len(nodelist)):
+                s += vec[:, j][p] * vec[:, j][q] * expw[j]
+            c[u][v] = float(s)
+    return c
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def communicability_exp(G):
-    """Returns communicability between all pairs of nodes in G.
+    r"""Returns communicability between all pairs of nodes in G.

     Communicability between pair of node (u,v) of node in G is the sum of
     walks of different lengths starting at node u and ending at node v.
@@ -124,4 +145,18 @@ def communicability_exp(G):
     >>> G = nx.Graph([(0, 1), (1, 2), (1, 5), (5, 4), (2, 4), (2, 3), (4, 3), (3, 6)])
     >>> c = nx.communicability_exp(G)
     """
-    pass
+    import scipy as sp
+
+    nodelist = list(G)  # ordering of nodes in matrix
+    A = nx.to_numpy_array(G, nodelist)
+    # convert to 0-1 matrix
+    A[A != 0.0] = 1
+    # communicability matrix
+    expA = sp.linalg.expm(A)
+    mapping = dict(zip(nodelist, range(len(nodelist))))
+    c = {}
+    for u in G:
+        c[u] = {}
+        for v in G:
+            c[u][v] = float(expA[mapping[u], mapping[v]])
+    return c
diff --git a/networkx/algorithms/community/asyn_fluid.py b/networkx/algorithms/community/asyn_fluid.py
index ef356e67b..fea72c1bf 100644
--- a/networkx/algorithms/community/asyn_fluid.py
+++ b/networkx/algorithms/community/asyn_fluid.py
@@ -1,14 +1,17 @@
 """Asynchronous Fluid Communities algorithm for community detection."""
+
 from collections import Counter
+
 import networkx as nx
 from networkx.algorithms.components import is_connected
 from networkx.exception import NetworkXError
 from networkx.utils import groups, not_implemented_for, py_random_state
-__all__ = ['asyn_fluidc']
+
+__all__ = ["asyn_fluidc"]


-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @py_random_state(3)
 @nx._dispatchable
 def asyn_fluidc(G, k, max_iter=100, seed=None):
@@ -64,4 +67,85 @@ def asyn_fluidc(G, k, max_iter=100, seed=None):
        Competitive and Highly Scalable Community Detection Algorithm".
        [https://arxiv.org/pdf/1703.09307.pdf].
     """
-    pass
+    # Initial checks
+    if not isinstance(k, int):
+        raise NetworkXError("k must be an integer.")
+    if not k > 0:
+        raise NetworkXError("k must be greater than 0.")
+    if not is_connected(G):
+        raise NetworkXError("Fluid Communities require connected Graphs.")
+    if len(G) < k:
+        raise NetworkXError("k cannot be bigger than the number of nodes.")
+    # Initialization
+    max_density = 1.0
+    vertices = list(G)
+    seed.shuffle(vertices)
+    communities = {n: i for i, n in enumerate(vertices[:k])}
+    density = {}
+    com_to_numvertices = {}
+    for vertex in communities:
+        com_to_numvertices[communities[vertex]] = 1
+        density[communities[vertex]] = max_density
+    # Set up control variables and start iterating
+    iter_count = 0
+    cont = True
+    while cont:
+        cont = False
+        iter_count += 1
+        # Loop over all vertices in graph in a random order
+        vertices = list(G)
+        seed.shuffle(vertices)
+        for vertex in vertices:
+            # Updating rule
+            com_counter = Counter()
+            # Take into account self vertex community
+            try:
+                com_counter.update({communities[vertex]: density[communities[vertex]]})
+            except KeyError:
+                pass
+            # Gather neighbor vertex communities
+            for v in G[vertex]:
+                try:
+                    com_counter.update({communities[v]: density[communities[v]]})
+                except KeyError:
+                    continue
+            # Check which is the community with highest density
+            new_com = -1
+            if len(com_counter.keys()) > 0:
+                max_freq = max(com_counter.values())
+                best_communities = [
+                    com
+                    for com, freq in com_counter.items()
+                    if (max_freq - freq) < 0.0001
+                ]
+                # If actual vertex com in best communities, it is preserved
+                try:
+                    if communities[vertex] in best_communities:
+                        new_com = communities[vertex]
+                except KeyError:
+                    pass
+                # If vertex community changes...
+                if new_com == -1:
+                    # Set flag of non-convergence
+                    cont = True
+                    # Randomly chose a new community from candidates
+                    new_com = seed.choice(best_communities)
+                    # Update previous community status
+                    try:
+                        com_to_numvertices[communities[vertex]] -= 1
+                        density[communities[vertex]] = (
+                            max_density / com_to_numvertices[communities[vertex]]
+                        )
+                    except KeyError:
+                        pass
+                    # Update new community status
+                    communities[vertex] = new_com
+                    com_to_numvertices[communities[vertex]] += 1
+                    density[communities[vertex]] = (
+                        max_density / com_to_numvertices[communities[vertex]]
+                    )
+        # If maximum iterations reached --> output actual results
+        if iter_count > max_iter:
+            break
+    # Return results by grouping communities as list of vertices
+    return iter(groups(communities).values())
diff --git a/networkx/algorithms/community/centrality.py b/networkx/algorithms/community/centrality.py
index a1b112283..43281701d 100644
--- a/networkx/algorithms/community/centrality.py
+++ b/networkx/algorithms/community/centrality.py
@@ -1,9 +1,11 @@
 """Functions for computing communities based on centrality notions."""
+
 import networkx as nx
-__all__ = ['girvan_newman']
+
+__all__ = ["girvan_newman"]


-@nx._dispatchable(preserve_edge_attrs='most_valuable_edge')
+@nx._dispatchable(preserve_edge_attrs="most_valuable_edge")
 def girvan_newman(G, most_valuable_edge=None):
     """Finds communities in a graph using the Girvan–Newman method.

@@ -118,7 +120,32 @@ def girvan_newman(G, most_valuable_edge=None):
     result can be depicted as a dendrogram.

     """
-    pass
+    # If the graph is already empty, simply return its connected
+    # components.
+    if G.number_of_edges() == 0:
+        yield tuple(nx.connected_components(G))
+        return
+    # If no function is provided for computing the most valuable edge,
+    # use the edge betweenness centrality.
+    if most_valuable_edge is None:
+
+        def most_valuable_edge(G):
+            """Returns the edge with the highest betweenness centrality
+            in the graph `G`.
+
+            """
+            # We have guaranteed that the graph is non-empty, so this
+            # dictionary will never be empty.
+            betweenness = nx.edge_betweenness_centrality(G)
+            return max(betweenness, key=betweenness.get)
+
+    # The copy of G here must include the edge weight data.
+    g = G.copy().to_undirected()
+    # Self-loops must be removed because their removal has no effect on
+    # the connected components of the graph.
+    g.remove_edges_from(nx.selfloop_edges(g))
+    while g.number_of_edges() > 0:
+        yield _without_most_central_edges(g, most_valuable_edge)


 def _without_most_central_edges(G, most_valuable_edge):
@@ -134,4 +161,11 @@ def _without_most_central_edges(G, most_valuable_edge):
     until the number of connected components in the graph increases.

     """
-    pass
+    original_num_components = nx.number_connected_components(G)
+    num_new_components = original_num_components
+    while num_new_components <= original_num_components:
+        edge = most_valuable_edge(G)
+        G.remove_edge(*edge)
+        new_components = tuple(nx.connected_components(G))
+        num_new_components = len(new_components)
+    return new_components
diff --git a/networkx/algorithms/community/community_utils.py b/networkx/algorithms/community/community_utils.py
index fc2cf463b..b57cd9881 100644
--- a/networkx/algorithms/community/community_utils.py
+++ b/networkx/algorithms/community/community_utils.py
@@ -1,6 +1,7 @@
 """Helper functions for community-finding algorithms."""
 import networkx as nx
-__all__ = ['is_partition']
+
+__all__ = ["is_partition"]


 @nx._dispatchable
@@ -19,4 +20,10 @@ def is_partition(G, communities):
         If it is an iterator it is exhausted.

     """
-    pass
+    # Alternate implementation:
+    # return all(sum(1 if v in c else 0 for c in communities) == 1 for v in G)
+    if not isinstance(communities, list):
+        communities = list(communities)
+    nodes = {n for c in communities for n in c if n in G}
+
+    return len(G) == len(nodes) == sum(len(c) for c in communities)
diff --git a/networkx/algorithms/community/divisive.py b/networkx/algorithms/community/divisive.py
index 7fc0c5cb3..1fc395946 100644
--- a/networkx/algorithms/community/divisive.py
+++ b/networkx/algorithms/community/divisive.py
@@ -1,10 +1,14 @@
 import functools
+
 import networkx as nx
-__all__ = ['edge_betweenness_partition',
-    'edge_current_flow_betweenness_partition']
+
+__all__ = [
+    "edge_betweenness_partition",
+    "edge_current_flow_betweenness_partition",
+]


-@nx._dispatchable(edge_attrs='weight')
+@nx._dispatchable(edge_attrs="weight")
 def edge_betweenness_partition(G, number_of_sets, *, weight=None):
     """Partition created by iteratively removing the highest edge betweenness edge.

@@ -61,10 +65,26 @@ def edge_betweenness_partition(G, number_of_sets, *, weight=None):
        Volume 486, Issue 3-5 p. 75-174
        http://arxiv.org/abs/0906.0612
     """
-    pass
-
-
-@nx._dispatchable(edge_attrs='weight')
+    if number_of_sets <= 0:
+        raise nx.NetworkXError("number_of_sets must be >0")
+    if number_of_sets == 1:
+        return [set(G)]
+    if number_of_sets == len(G):
+        return [{n} for n in G]
+    if number_of_sets > len(G):
+        raise nx.NetworkXError("number_of_sets must be <= len(G)")
+
+    H = G.copy()
+    partition = list(nx.connected_components(H))
+    while len(partition) < number_of_sets:
+        ranking = nx.edge_betweenness_centrality(H, weight=weight)
+        edge = max(ranking, key=ranking.get)
+        H.remove_edge(*edge)
+        partition = list(nx.connected_components(H))
+    return partition
+
+
+@nx._dispatchable(edge_attrs="weight")
 def edge_current_flow_betweenness_partition(G, number_of_sets, *, weight=None):
     """Partition created by removing the highest edge current flow betweenness edge.

@@ -121,4 +141,56 @@ def edge_current_flow_betweenness_partition(G, number_of_sets, *, weight=None):
        Volume 486, Issue 3-5 p. 75-174
        http://arxiv.org/abs/0906.0612
     """
-    pass
+    if number_of_sets <= 0:
+        raise nx.NetworkXError("number_of_sets must be >0")
+    elif number_of_sets == 1:
+        return [set(G)]
+    elif number_of_sets == len(G):
+        return [{n} for n in G]
+    elif number_of_sets > len(G):
+        raise nx.NetworkXError("number_of_sets must be <= len(G)")
+
+    rank = functools.partial(
+        nx.edge_current_flow_betweenness_centrality, normalized=False, weight=weight
+    )
+
+    # current flow requires a connected network so we track the components explicitly
+    H = G.copy()
+    partition = list(nx.connected_components(H))
+    if len(partition) > 1:
+        Hcc_subgraphs = [H.subgraph(cc).copy() for cc in partition]
+    else:
+        Hcc_subgraphs = [H]
+
+    ranking = {}
+    for Hcc in Hcc_subgraphs:
+        ranking.update(rank(Hcc))
+
+    while len(partition) < number_of_sets:
+        edge = max(ranking, key=ranking.get)
+        for cc, Hcc in zip(partition, Hcc_subgraphs):
+            if edge[0] in cc:
+                Hcc.remove_edge(*edge)
+                del ranking[edge]
+                splitcc_list = list(nx.connected_components(Hcc))
+                if len(splitcc_list) > 1:
+                    # there are 2 connected components. split off smaller one
+                    cc_new = min(splitcc_list, key=len)
+                    Hcc_new = Hcc.subgraph(cc_new).copy()
+                    # update edge rankings for Hcc_new
+                    newranks = rank(Hcc_new)
+                    for e, r in newranks.items():
+                        ranking[e if e in ranking else e[::-1]] = r
+                    # append new cc and Hcc to their lists.
+                    partition.append(cc_new)
+                    Hcc_subgraphs.append(Hcc_new)
+
+                    # leave existing cc and Hcc in their lists, but shrink them
+                    Hcc.remove_nodes_from(cc_new)
+                    cc.difference_update(cc_new)
+                # update edge rankings for Hcc whether it was split or not
+                newranks = rank(Hcc)
+                for e, r in newranks.items():
+                    ranking[e if e in ranking else e[::-1]] = r
+                break
+    return partition
diff --git a/networkx/algorithms/community/kclique.py b/networkx/algorithms/community/kclique.py
index d9a8035c6..c72491042 100644
--- a/networkx/algorithms/community/kclique.py
+++ b/networkx/algorithms/community/kclique.py
@@ -1,6 +1,8 @@
 from collections import defaultdict
+
 import networkx as nx
-__all__ = ['k_clique_communities']
+
+__all__ = ["k_clique_communities"]


 @nx._dispatchable
@@ -42,4 +44,36 @@ def k_clique_communities(G, k, cliques=None):
        in nature and society Nature 435, 814-818, 2005,
        doi:10.1038/nature03607
     """
-    pass
+    if k < 2:
+        raise nx.NetworkXError(f"k={k}, k must be greater than 1.")
+    if cliques is None:
+        cliques = nx.find_cliques(G)
+    cliques = [frozenset(c) for c in cliques if len(c) >= k]
+
+    # First index which nodes are in which cliques
+    membership_dict = defaultdict(list)
+    for clique in cliques:
+        for node in clique:
+            membership_dict[node].append(clique)
+
+    # For each clique, see which adjacent cliques percolate
+    perc_graph = nx.Graph()
+    perc_graph.add_nodes_from(cliques)
+    for clique in cliques:
+        for adj_clique in _get_adjacent_cliques(clique, membership_dict):
+            if len(clique.intersection(adj_clique)) >= (k - 1):
+                perc_graph.add_edge(clique, adj_clique)
+
+    # Connected components of clique graph with perc edges
+    # are the percolated cliques
+    for component in nx.connected_components(perc_graph):
+        yield (frozenset.union(*component))
+
+
+def _get_adjacent_cliques(clique, membership_dict):
+    adjacent_cliques = set()
+    for n in clique:
+        for adj_clique in membership_dict[n]:
+            if clique != adj_clique:
+                adjacent_cliques.add(adj_clique)
+    return adjacent_cliques
diff --git a/networkx/algorithms/community/kernighan_lin.py b/networkx/algorithms/community/kernighan_lin.py
index 893673e41..f6397d82b 100644
--- a/networkx/algorithms/community/kernighan_lin.py
+++ b/networkx/algorithms/community/kernighan_lin.py
@@ -1,9 +1,12 @@
 """Functions for computing the Kernighan–Lin bipartition algorithm."""
+
 from itertools import count
+
 import networkx as nx
 from networkx.algorithms.community.community_utils import is_partition
 from networkx.utils import BinaryHeap, not_implemented_for, py_random_state
-__all__ = ['kernighan_lin_bisection']
+
+__all__ = ["kernighan_lin_bisection"]


 def _kernighan_lin_sweep(edges, side):
@@ -12,14 +15,35 @@ def _kernighan_lin_sweep(edges, side):
     time, alternating between sides to keep the bisection balanced.  We keep
     two min-heaps of swap costs to make optimal-next-move selection fast.
     """
-    pass
+    costs0, costs1 = costs = BinaryHeap(), BinaryHeap()
+    for u, side_u, edges_u in zip(count(), side, edges):
+        cost_u = sum(w if side[v] else -w for v, w in edges_u)
+        costs[side_u].insert(u, cost_u if side_u else -cost_u)
+
+    def _update_costs(costs_x, x):
+        for y, w in edges[x]:
+            costs_y = costs[side[y]]
+            cost_y = costs_y.get(y)
+            if cost_y is not None:
+                cost_y += 2 * (-w if costs_x is costs_y else w)
+                costs_y.insert(y, cost_y, True)
+
+    i = 0
+    totcost = 0
+    while costs0 and costs1:
+        u, cost_u = costs0.pop()
+        _update_costs(costs0, u)
+        v, cost_v = costs1.pop()
+        _update_costs(costs1, v)
+        totcost += cost_u + cost_v
+        i += 1
+        yield totcost, i, (u, v)


-@not_implemented_for('directed')
+@not_implemented_for("directed")
 @py_random_state(4)
-@nx._dispatchable(edge_attrs='weight')
-def kernighan_lin_bisection(G, partition=None, max_iter=10, weight='weight',
-    seed=None):
+@nx._dispatchable(edge_attrs="weight")
+def kernighan_lin_bisection(G, partition=None, max_iter=10, weight="weight", seed=None):
     """Partition a graph into two blocks using the Kernighan–Lin
     algorithm.

@@ -69,4 +93,47 @@ def kernighan_lin_bisection(G, partition=None, max_iter=10, weight='weight',
        Oxford University Press 2011.

     """
-    pass
+    n = len(G)
+    labels = list(G)
+    seed.shuffle(labels)
+    index = {v: i for i, v in enumerate(labels)}
+
+    if partition is None:
+        side = [0] * (n // 2) + [1] * ((n + 1) // 2)
+    else:
+        try:
+            A, B = partition
+        except (TypeError, ValueError) as err:
+            raise nx.NetworkXError("partition must be two sets") from err
+        if not is_partition(G, (A, B)):
+            raise nx.NetworkXError("partition invalid")
+        side = [0] * n
+        for a in A:
+            side[index[a]] = 1
+
+    if G.is_multigraph():
+        edges = [
+            [
+                (index[u], sum(e.get(weight, 1) for e in d.values()))
+                for u, d in G[v].items()
+            ]
+            for v in labels
+        ]
+    else:
+        edges = [
+            [(index[u], e.get(weight, 1)) for u, e in G[v].items()] for v in labels
+        ]
+
+    for i in range(max_iter):
+        costs = list(_kernighan_lin_sweep(edges, side))
+        min_cost, min_i, _ = min(costs)
+        if min_cost >= 0:
+            break
+
+        for _, _, (u, v) in costs[:min_i]:
+            side[u] = 1
+            side[v] = 0
+
+    A = {u for u, s in zip(labels, side) if s == 0}
+    B = {u for u, s in zip(labels, side) if s == 1}
+    return A, B
diff --git a/networkx/algorithms/community/label_propagation.py b/networkx/algorithms/community/label_propagation.py
index 06e9bef37..869085576 100644
--- a/networkx/algorithms/community/label_propagation.py
+++ b/networkx/algorithms/community/label_propagation.py
@@ -2,14 +2,19 @@
 Label propagation community detection algorithms.
 """
 from collections import Counter, defaultdict, deque
+
 import networkx as nx
 from networkx.utils import groups, not_implemented_for, py_random_state
-__all__ = ['label_propagation_communities', 'asyn_lpa_communities',
-    'fast_label_propagation_communities']
+
+__all__ = [
+    "label_propagation_communities",
+    "asyn_lpa_communities",
+    "fast_label_propagation_communities",
+]


-@py_random_state('seed')
-@nx._dispatchable(edge_attrs='weight')
+@py_random_state("seed")
+@nx._dispatchable(edge_attrs="weight")
 def fast_label_propagation_communities(G, *, weight=None, seed=None):
     """Returns communities in `G` as detected by fast label propagation.

@@ -57,7 +62,43 @@ def fast_label_propagation_communities(G, *, weight=None, seed=None):
        fast label propagation." Scientific Reports 13 (2023): 2701.
        https://doi.org/10.1038/s41598-023-29610-z
     """
-    pass
+
+    # Queue of nodes to be processed.
+    nodes_queue = deque(G)
+    seed.shuffle(nodes_queue)
+
+    # Set of nodes in the queue.
+    nodes_set = set(G)
+
+    # Assign unique label to each node.
+    comms = {node: i for i, node in enumerate(G)}
+
+    while nodes_queue:
+        # Remove next node from the queue to process.
+        node = nodes_queue.popleft()
+        nodes_set.remove(node)
+
+        # Isolated nodes retain their initial label.
+        if G.degree(node) > 0:
+            # Compute frequency of labels in node's neighborhood.
+            label_freqs = _fast_label_count(G, comms, node, weight)
+            max_freq = max(label_freqs.values())
+
+            # Always sample new label from most frequent labels.
+            comm = seed.choice(
+                [comm for comm in label_freqs if label_freqs[comm] == max_freq]
+            )
+
+            if comms[node] != comm:
+                comms[node] = comm
+
+                # Add neighbors that have different label to the queue.
+                for nbr in nx.all_neighbors(G, node):
+                    if comms[nbr] != comm and nbr not in nodes_set:
+                        nodes_queue.append(nbr)
+                        nodes_set.add(nbr)
+
+    yield from groups(comms).values()


 def _fast_label_count(G, comms, node, weight=None):
@@ -65,11 +106,38 @@ def _fast_label_count(G, comms, node, weight=None):

     Returns a dictionary keyed by label to the frequency of that label.
     """
-    pass
+
+    if weight is None:
+        # Unweighted (un)directed simple graph.
+        if not G.is_multigraph():
+            label_freqs = Counter(map(comms.get, nx.all_neighbors(G, node)))
+
+        # Unweighted (un)directed multigraph.
+        else:
+            label_freqs = defaultdict(int)
+            for nbr in G[node]:
+                label_freqs[comms[nbr]] += len(G[node][nbr])
+
+            if G.is_directed():
+                for nbr in G.pred[node]:
+                    label_freqs[comms[nbr]] += len(G.pred[node][nbr])
+
+    else:
+        # Weighted undirected simple/multigraph.
+        label_freqs = defaultdict(float)
+        for _, nbr, w in G.edges(node, data=weight, default=1):
+            label_freqs[comms[nbr]] += w
+
+        # Weighted directed simple/multigraph.
+        if G.is_directed():
+            for nbr, _, w in G.in_edges(node, data=weight, default=1):
+                label_freqs[comms[nbr]] += w
+
+    return label_freqs


 @py_random_state(2)
-@nx._dispatchable(edge_attrs='weight')
+@nx._dispatchable(edge_attrs="weight")
 def asyn_lpa_communities(G, weight=None, seed=None):
     """Returns communities in `G` as detected by asynchronous label
     propagation.
@@ -119,10 +187,52 @@ def asyn_lpa_communities(G, weight=None, seed=None):
            linear time algorithm to detect community structures in large-scale
            networks." Physical Review E 76.3 (2007): 036106.
     """
-    pass
-

-@not_implemented_for('directed')
+    labels = {n: i for i, n in enumerate(G)}
+    cont = True
+
+    while cont:
+        cont = False
+        nodes = list(G)
+        seed.shuffle(nodes)
+
+        for node in nodes:
+            if not G[node]:
+                continue
+
+            # Get label frequencies among adjacent nodes.
+            # Depending on the order they are processed in,
+            # some nodes will be in iteration t and others in t-1,
+            # making the algorithm asynchronous.
+            if weight is None:
+                # initialising a Counter from an iterator of labels is
+                # faster for getting unweighted label frequencies
+                label_freq = Counter(map(labels.get, G[node]))
+            else:
+                # updating a defaultdict is substantially faster
+                # for getting weighted label frequencies
+                label_freq = defaultdict(float)
+                for _, v, wt in G.edges(node, data=weight, default=1):
+                    label_freq[labels[v]] += wt
+
+            # Get the labels that appear with maximum frequency.
+            max_freq = max(label_freq.values())
+            best_labels = [
+                label for label, freq in label_freq.items() if freq == max_freq
+            ]
+
+            # If the node does not have one of the maximum frequency labels,
+            # randomly choose one of them and update the node's label.
+            # Continue the iteration as long as at least one node
+            # doesn't have a maximum frequency label.
+            if labels[node] not in best_labels:
+                labels[node] = seed.choice(best_labels)
+                cont = True
+
+    yield from groups(labels).values()
+
+
+@not_implemented_for("directed")
 @nx._dispatchable
 def label_propagation_communities(G):
     """Generates community sets determined by label propagation
@@ -153,7 +263,19 @@ def label_propagation_communities(G):
        Applications of Social Network Analysis (BASNA), 2010 IEEE International
        Workshop on (pp. 1-8). IEEE.
     """
-    pass
+    coloring = _color_network(G)
+    # Create a unique label for each node in the graph
+    labeling = {v: k for k, v in enumerate(G)}
+    while not _labeling_complete(labeling, G):
+        # Update the labels of every node with the same color.
+        for color, nodes in coloring.items():
+            for n in nodes:
+                _update_label(n, labeling, G)
+
+    clusters = defaultdict(set)
+    for node, label in labeling.items():
+        clusters[label].add(node)
+    return clusters.values()


 def _color_network(G):
@@ -161,7 +283,14 @@ def _color_network(G):

     Returns a dict keyed by color to a set of nodes with that color.
     """
-    pass
+    coloring = {}  # color => set(node)
+    colors = nx.coloring.greedy_color(G)
+    for node, color in colors.items():
+        if color in coloring:
+            coloring[color].add(node)
+        else:
+            coloring[color] = {node}
+    return coloring


 def _labeling_complete(labeling, G):
@@ -172,7 +301,9 @@ def _labeling_complete(labeling, G):

     Nodes with no neighbors are considered complete.
     """
-    pass
+    return all(
+        labeling[v] in _most_frequent_labels(v, labeling, G) for v in G if len(G[v]) > 0
+    )


 def _most_frequent_labels(node, labeling, G):
@@ -180,7 +311,15 @@ def _most_frequent_labels(node, labeling, G):

     Input `labeling` should be a dict keyed by node to labels.
     """
-    pass
+    if not G[node]:
+        # Nodes with no neighbors are themselves a community and are labeled
+        # accordingly, hence the immediate if statement.
+        return {labeling[node]}
+
+    # Compute the frequencies of all neighbors of node
+    freqs = Counter(labeling[q] for q in G[node])
+    max_freq = max(freqs.values())
+    return {label for label, freq in freqs.items() if freq == max_freq}


 def _update_label(node, labeling, G):
@@ -189,4 +328,10 @@ def _update_label(node, labeling, G):
     The algorithm is explained in: 'Community Detection via Semi-Synchronous
     Label Propagation Algorithms' Cordasco and Gargano, 2011
     """
-    pass
+    high_labels = _most_frequent_labels(node, labeling, G)
+    if len(high_labels) == 1:
+        labeling[node] = high_labels.pop()
+    elif len(high_labels) > 1:
+        # Prec-Max
+        if labeling[node] not in high_labels:
+            labeling[node] = max(high_labels)
diff --git a/networkx/algorithms/community/louvain.py b/networkx/algorithms/community/louvain.py
index c95008941..959c93a51 100644
--- a/networkx/algorithms/community/louvain.py
+++ b/networkx/algorithms/community/louvain.py
@@ -1,18 +1,22 @@
 """Function for detecting communities based on Louvain Community Detection
 Algorithm"""
+
 import itertools
 from collections import defaultdict, deque
+
 import networkx as nx
 from networkx.algorithms.community import modularity
 from networkx.utils import py_random_state
-__all__ = ['louvain_communities', 'louvain_partitions']
+
+__all__ = ["louvain_communities", "louvain_partitions"]


-@py_random_state('seed')
-@nx._dispatchable(edge_attrs='weight')
-def louvain_communities(G, weight='weight', resolution=1, threshold=1e-07,
-    max_level=None, seed=None):
-    """Find the best partition of a graph using the Louvain Community Detection
+@py_random_state("seed")
+@nx._dispatchable(edge_attrs="weight")
+def louvain_communities(
+    G, weight="weight", resolution=1, threshold=0.0000001, max_level=None, seed=None
+):
+    r"""Find the best partition of a graph using the Louvain Community Detection
     Algorithm.

     Louvain Community Detection Algorithm is a simple method to extract the community
@@ -27,21 +31,21 @@ def louvain_communities(G, weight='weight', resolution=1, threshold=1e-07,
     easily be calculated by the following formula (combining [1]_ [2]_ and some algebra):

     .. math::
-        \\Delta Q = \\frac{k_{i,in}}{2m} - \\gamma\\frac{ \\Sigma_{tot} \\cdot k_i}{2m^2}
+        \Delta Q = \frac{k_{i,in}}{2m} - \gamma\frac{ \Sigma_{tot} \cdot k_i}{2m^2}

     where $m$ is the size of the graph, $k_{i,in}$ is the sum of the weights of the links
     from $i$ to nodes in $C$, $k_i$ is the sum of the weights of the links incident to node $i$,
-    $\\Sigma_{tot}$ is the sum of the weights of the links incident to nodes in $C$ and $\\gamma$
+    $\Sigma_{tot}$ is the sum of the weights of the links incident to nodes in $C$ and $\gamma$
     is the resolution parameter.

     For the directed case the modularity gain can be computed using this formula according to [3]_

     .. math::
-        \\Delta Q = \\frac{k_{i,in}}{m}
-        - \\gamma\\frac{k_i^{out} \\cdot\\Sigma_{tot}^{in} + k_i^{in} \\cdot \\Sigma_{tot}^{out}}{m^2}
+        \Delta Q = \frac{k_{i,in}}{m}
+        - \gamma\frac{k_i^{out} \cdot\Sigma_{tot}^{in} + k_i^{in} \cdot \Sigma_{tot}^{out}}{m^2}

     where $k_i^{out}$, $k_i^{in}$ are the outer and inner weighted degrees of node $i$ and
-    $\\Sigma_{tot}^{in}$, $\\Sigma_{tot}^{out}$ are the sum of in-going and out-going links incident
+    $\Sigma_{tot}^{in}$, $\Sigma_{tot}^{out}$ are the sum of in-going and out-going links incident
     to nodes in $C$.

     The first phase continues until no individual move can improve the modularity.
@@ -115,13 +119,21 @@ def louvain_communities(G, weight='weight', resolution=1, threshold=1e-07,
     --------
     louvain_partitions
     """
-    pass

+    partitions = louvain_partitions(G, weight, resolution, threshold, seed)
+    if max_level is not None:
+        if max_level <= 0:
+            raise ValueError("max_level argument must be a positive integer or None")
+        partitions = itertools.islice(partitions, max_level)
+    final_partition = deque(partitions, maxlen=1)
+    return final_partition.pop()

-@py_random_state('seed')
-@nx._dispatchable(edge_attrs='weight')
-def louvain_partitions(G, weight='weight', resolution=1, threshold=1e-07,
-    seed=None):
+
+@py_random_state("seed")
+@nx._dispatchable(edge_attrs="weight")
+def louvain_partitions(
+    G, weight="weight", resolution=1, threshold=0.0000001, seed=None
+):
     """Yields partitions for each level of the Louvain Community Detection Algorithm

     Louvain Community Detection Algorithm is a simple method to extract the community
@@ -176,7 +188,38 @@ def louvain_partitions(G, weight='weight', resolution=1, threshold=1e-07,
     --------
     louvain_communities
     """
-    pass
+
+    partition = [{u} for u in G.nodes()]
+    if nx.is_empty(G):
+        yield partition
+        return
+    mod = modularity(G, partition, resolution=resolution, weight=weight)
+    is_directed = G.is_directed()
+    if G.is_multigraph():
+        graph = _convert_multigraph(G, weight, is_directed)
+    else:
+        graph = G.__class__()
+        graph.add_nodes_from(G)
+        graph.add_weighted_edges_from(G.edges(data=weight, default=1))
+
+    m = graph.size(weight="weight")
+    partition, inner_partition, improvement = _one_level(
+        graph, m, partition, resolution, is_directed, seed
+    )
+    improvement = True
+    while improvement:
+        # gh-5901 protect the sets in the yielded list from further manipulation here
+        yield [s.copy() for s in partition]
+        new_mod = modularity(
+            graph, inner_partition, resolution=resolution, weight="weight"
+        )
+        if new_mod - mod <= threshold:
+            return
+        mod = new_mod
+        graph = _gen_graph(graph, inner_partition)
+        partition, inner_partition, improvement = _one_level(
+            graph, m, partition, resolution, is_directed, seed
+        )


 def _one_level(G, m, partition, resolution=1, is_directed=False, seed=None):
@@ -199,7 +242,92 @@ def _one_level(G, m, partition, resolution=1, is_directed=False, seed=None):
         See :ref:`Randomness<randomness>`.

     """
-    pass
+    node2com = {u: i for i, u in enumerate(G.nodes())}
+    inner_partition = [{u} for u in G.nodes()]
+    if is_directed:
+        in_degrees = dict(G.in_degree(weight="weight"))
+        out_degrees = dict(G.out_degree(weight="weight"))
+        Stot_in = list(in_degrees.values())
+        Stot_out = list(out_degrees.values())
+        # Calculate weights for both in and out neighbors without considering self-loops
+        nbrs = {}
+        for u in G:
+            nbrs[u] = defaultdict(float)
+            for _, n, wt in G.out_edges(u, data="weight"):
+                if u != n:
+                    nbrs[u][n] += wt
+            for n, _, wt in G.in_edges(u, data="weight"):
+                if u != n:
+                    nbrs[u][n] += wt
+    else:
+        degrees = dict(G.degree(weight="weight"))
+        Stot = list(degrees.values())
+        nbrs = {u: {v: data["weight"] for v, data in G[u].items() if v != u} for u in G}
+    rand_nodes = list(G.nodes)
+    seed.shuffle(rand_nodes)
+    nb_moves = 1
+    improvement = False
+    while nb_moves > 0:
+        nb_moves = 0
+        for u in rand_nodes:
+            best_mod = 0
+            best_com = node2com[u]
+            weights2com = _neighbor_weights(nbrs[u], node2com)
+            if is_directed:
+                in_degree = in_degrees[u]
+                out_degree = out_degrees[u]
+                Stot_in[best_com] -= in_degree
+                Stot_out[best_com] -= out_degree
+                remove_cost = (
+                    -weights2com[best_com] / m
+                    + resolution
+                    * (out_degree * Stot_in[best_com] + in_degree * Stot_out[best_com])
+                    / m**2
+                )
+            else:
+                degree = degrees[u]
+                Stot[best_com] -= degree
+                remove_cost = -weights2com[best_com] / m + resolution * (
+                    Stot[best_com] * degree
+                ) / (2 * m**2)
+            for nbr_com, wt in weights2com.items():
+                if is_directed:
+                    gain = (
+                        remove_cost
+                        + wt / m
+                        - resolution
+                        * (
+                            out_degree * Stot_in[nbr_com]
+                            + in_degree * Stot_out[nbr_com]
+                        )
+                        / m**2
+                    )
+                else:
+                    gain = (
+                        remove_cost
+                        + wt / m
+                        - resolution * (Stot[nbr_com] * degree) / (2 * m**2)
+                    )
+                if gain > best_mod:
+                    best_mod = gain
+                    best_com = nbr_com
+            if is_directed:
+                Stot_in[best_com] += in_degree
+                Stot_out[best_com] += out_degree
+            else:
+                Stot[best_com] += degree
+            if best_com != node2com[u]:
+                com = G.nodes[u].get("nodes", {u})
+                partition[node2com[u]].difference_update(com)
+                inner_partition[node2com[u]].remove(u)
+                partition[best_com].update(com)
+                inner_partition[best_com].add(u)
+                improvement = True
+                nb_moves += 1
+                node2com[u] = best_com
+    partition = list(filter(len, partition))
+    inner_partition = list(filter(len, inner_partition))
+    return partition, inner_partition, improvement


 def _neighbor_weights(nbrs, node2com):
@@ -213,14 +341,42 @@ def _neighbor_weights(nbrs, node2com):
            Dictionary with all graph's nodes as keys and their community index as value.

     """
-    pass
+    weights = defaultdict(float)
+    for nbr, wt in nbrs.items():
+        weights[node2com[nbr]] += wt
+    return weights


 def _gen_graph(G, partition):
     """Generate a new graph based on the partitions of a given graph"""
-    pass
+    H = G.__class__()
+    node2com = {}
+    for i, part in enumerate(partition):
+        nodes = set()
+        for node in part:
+            node2com[node] = i
+            nodes.update(G.nodes[node].get("nodes", {node}))
+        H.add_node(i, nodes=nodes)
+
+    for node1, node2, wt in G.edges(data=True):
+        wt = wt["weight"]
+        com1 = node2com[node1]
+        com2 = node2com[node2]
+        temp = H.get_edge_data(com1, com2, {"weight": 0})["weight"]
+        H.add_edge(com1, com2, weight=wt + temp)
+    return H


 def _convert_multigraph(G, weight, is_directed):
     """Convert a Multigraph to normal Graph"""
-    pass
+    if is_directed:
+        H = nx.DiGraph()
+    else:
+        H = nx.Graph()
+    H.add_nodes_from(G)
+    for u, v, wt in G.edges(data=weight, default=1):
+        if H.has_edge(u, v):
+            H[u][v]["weight"] += wt
+        else:
+            H.add_edge(u, v, weight=wt)
+    return H
diff --git a/networkx/algorithms/community/lukes.py b/networkx/algorithms/community/lukes.py
index 4c1395b60..08dd7cd52 100644
--- a/networkx/algorithms/community/lukes.py
+++ b/networkx/algorithms/community/lukes.py
@@ -1,19 +1,31 @@
 """Lukes Algorithm for exact optimal weighted tree partitioning."""
+
 from copy import deepcopy
 from functools import lru_cache
 from random import choice
+
 import networkx as nx
 from networkx.utils import not_implemented_for
-__all__ = ['lukes_partitioning']
-D_EDGE_W = 'weight'
+
+__all__ = ["lukes_partitioning"]
+
+D_EDGE_W = "weight"
 D_EDGE_VALUE = 1.0
-D_NODE_W = 'weight'
+D_NODE_W = "weight"
 D_NODE_VALUE = 1
-PKEY = 'partitions'
+PKEY = "partitions"
 CLUSTER_EVAL_CACHE_SIZE = 2048


-@nx._dispatchable(node_attrs='node_weight', edge_attrs='edge_weight')
+def _split_n_from(n, min_size_of_first_part):
+    # splits j in two parts of which the first is at least
+    # the second argument
+    assert n >= min_size_of_first_part
+    for p1 in range(min_size_of_first_part, n + 1):
+        yield p1, n - p1
+
+
+@nx._dispatchable(node_attrs="node_weight", edge_attrs="edge_weight")
 def lukes_partitioning(G, max_size, node_weight=None, edge_weight=None):
     """Optimal partitioning of a weighted tree using the Lukes algorithm.

@@ -59,4 +71,157 @@ def lukes_partitioning(G, max_size, node_weight=None, edge_weight=None):
        IBM Journal of Research and Development, 18(3), 217–224.

     """
-    pass
+    # First sanity check and tree preparation
+    if not nx.is_tree(G):
+        raise nx.NotATree("lukes_partitioning works only on trees")
+    else:
+        if nx.is_directed(G):
+            root = [n for n, d in G.in_degree() if d == 0]
+            assert len(root) == 1
+            root = root[0]
+            t_G = deepcopy(G)
+        else:
+            root = choice(list(G.nodes))
+            # this has the desirable side effect of not inheriting attributes
+            t_G = nx.dfs_tree(G, root)
+
+    # Since we do not want to screw up the original graph,
+    # if we have a blank attribute, we make a deepcopy
+    if edge_weight is None or node_weight is None:
+        safe_G = deepcopy(G)
+        if edge_weight is None:
+            nx.set_edge_attributes(safe_G, D_EDGE_VALUE, D_EDGE_W)
+            edge_weight = D_EDGE_W
+        if node_weight is None:
+            nx.set_node_attributes(safe_G, D_NODE_VALUE, D_NODE_W)
+            node_weight = D_NODE_W
+    else:
+        safe_G = G
+
+    # Second sanity check
+    # The values of node_weight MUST BE int.
+    # I cannot see any room for duck typing without incurring serious
+    # danger of subtle bugs.
+    all_n_attr = nx.get_node_attributes(safe_G, node_weight).values()
+    for x in all_n_attr:
+        if not isinstance(x, int):
+            raise TypeError(
+                "lukes_partitioning needs integer "
+                f"values for node_weight ({node_weight})"
+            )
+
+    # SUBROUTINES -----------------------
+    # these functions are defined here for two reasons:
+    # - brevity: we can leverage global "safe_G"
+    # - caching: signatures are hashable
+
+    @not_implemented_for("undirected")
+    # this is intended to be called only on t_G
+    def _leaves(gr):
+        for x in gr.nodes:
+            if not nx.descendants(gr, x):
+                yield x
+
+    @not_implemented_for("undirected")
+    def _a_parent_of_leaves_only(gr):
+        tleaves = set(_leaves(gr))
+        for n in set(gr.nodes) - tleaves:
+            if all(x in tleaves for x in nx.descendants(gr, n)):
+                return n
+
+    @lru_cache(CLUSTER_EVAL_CACHE_SIZE)
+    def _value_of_cluster(cluster):
+        valid_edges = [e for e in safe_G.edges if e[0] in cluster and e[1] in cluster]
+        return sum(safe_G.edges[e][edge_weight] for e in valid_edges)
+
+    def _value_of_partition(partition):
+        return sum(_value_of_cluster(frozenset(c)) for c in partition)
+
+    @lru_cache(CLUSTER_EVAL_CACHE_SIZE)
+    def _weight_of_cluster(cluster):
+        return sum(safe_G.nodes[n][node_weight] for n in cluster)
+
+    def _pivot(partition, node):
+        ccx = [c for c in partition if node in c]
+        assert len(ccx) == 1
+        return ccx[0]
+
+    def _concatenate_or_merge(partition_1, partition_2, x, i, ref_weight):
+        ccx = _pivot(partition_1, x)
+        cci = _pivot(partition_2, i)
+        merged_xi = ccx.union(cci)
+
+        # We first check if we can do the merge.
+        # If so, we do the actual calculations, otherwise we concatenate
+        if _weight_of_cluster(frozenset(merged_xi)) <= ref_weight:
+            cp1 = list(filter(lambda x: x != ccx, partition_1))
+            cp2 = list(filter(lambda x: x != cci, partition_2))
+
+            option_2 = [merged_xi] + cp1 + cp2
+            return option_2, _value_of_partition(option_2)
+        else:
+            option_1 = partition_1 + partition_2
+            return option_1, _value_of_partition(option_1)
+
+    # INITIALIZATION -----------------------
+    leaves = set(_leaves(t_G))
+    for lv in leaves:
+        t_G.nodes[lv][PKEY] = {}
+        slot = safe_G.nodes[lv][node_weight]
+        t_G.nodes[lv][PKEY][slot] = [{lv}]
+        t_G.nodes[lv][PKEY][0] = [{lv}]
+
+    for inner in [x for x in t_G.nodes if x not in leaves]:
+        t_G.nodes[inner][PKEY] = {}
+        slot = safe_G.nodes[inner][node_weight]
+        t_G.nodes[inner][PKEY][slot] = [{inner}]
+    nx._clear_cache(t_G)
+
+    # CORE ALGORITHM -----------------------
+    while True:
+        x_node = _a_parent_of_leaves_only(t_G)
+        weight_of_x = safe_G.nodes[x_node][node_weight]
+        best_value = 0
+        best_partition = None
+        bp_buffer = {}
+        x_descendants = nx.descendants(t_G, x_node)
+        for i_node in x_descendants:
+            for j in range(weight_of_x, max_size + 1):
+                for a, b in _split_n_from(j, weight_of_x):
+                    if (
+                        a not in t_G.nodes[x_node][PKEY]
+                        or b not in t_G.nodes[i_node][PKEY]
+                    ):
+                        # it's not possible to form this particular weight sum
+                        continue
+
+                    part1 = t_G.nodes[x_node][PKEY][a]
+                    part2 = t_G.nodes[i_node][PKEY][b]
+                    part, value = _concatenate_or_merge(part1, part2, x_node, i_node, j)
+
+                    if j not in bp_buffer or bp_buffer[j][1] < value:
+                        # we annotate in the buffer the best partition for j
+                        bp_buffer[j] = part, value
+
+                    # we also keep track of the overall best partition
+                    if best_value <= value:
+                        best_value = value
+                        best_partition = part
+
+            # as illustrated in Lukes, once we finished a child, we can
+            # discharge the partitions we found into the graph
+            # (the key phrase is make all x == x')
+            # so that they are used by the subsequent children
+            for w, (best_part_for_vl, vl) in bp_buffer.items():
+                t_G.nodes[x_node][PKEY][w] = best_part_for_vl
+            bp_buffer.clear()
+
+        # the absolute best partition for this node
+        # across all weights has to be stored at 0
+        t_G.nodes[x_node][PKEY][0] = best_partition
+        t_G.remove_nodes_from(x_descendants)
+
+        if x_node == root:
+            # the 0-labeled partition of root
+            # is the optimal one for the whole tree
+            return t_G.nodes[root][PKEY][0]
diff --git a/networkx/algorithms/community/modularity_max.py b/networkx/algorithms/community/modularity_max.py
index 24913548c..f465e01c6 100644
--- a/networkx/algorithms/community/modularity_max.py
+++ b/networkx/algorithms/community/modularity_max.py
@@ -1,15 +1,20 @@
 """Functions for detecting communities based on modularity."""
+
 from collections import defaultdict
+
 import networkx as nx
 from networkx.algorithms.community.quality import modularity
 from networkx.utils import not_implemented_for
 from networkx.utils.mapped_queue import MappedQueue
-__all__ = ['greedy_modularity_communities',
-    'naive_greedy_modularity_communities']
+
+__all__ = [
+    "greedy_modularity_communities",
+    "naive_greedy_modularity_communities",
+]


 def _greedy_modularity_communities_generator(G, weight=None, resolution=1):
-    """Yield community partitions of G and the modularity change at each step.
+    r"""Yield community partitions of G and the modularity change at each step.

     This function performs Clauset-Newman-Moore greedy modularity maximization [2]_
     At each step of the process it yields the change in modularity that will occur in
@@ -20,7 +25,7 @@ def _greedy_modularity_communities_generator(G, weight=None, resolution=1):
     modularity until one community contains all nodes (the partition has one set).

     This function maximizes the generalized modularity, where `resolution`
-    is the resolution parameter, often expressed as $\\gamma$.
+    is the resolution parameter, often expressed as $\gamma$.
     See :func:`~networkx.algorithms.community.quality.modularity`.

     Parameters
@@ -65,13 +70,168 @@ def _greedy_modularity_communities_generator(G, weight=None, resolution=1):
     .. [4] Newman, M. E. J."Analysis of weighted networks"
        Physical Review E 70(5 Pt 2):056131, 2004.
     """
-    pass
-
-
-@nx._dispatchable(edge_attrs='weight')
-def greedy_modularity_communities(G, weight=None, resolution=1, cutoff=1,
-    best_n=None):
-    """Find communities in G using greedy modularity maximization.
+    directed = G.is_directed()
+    N = G.number_of_nodes()
+
+    # Count edges (or the sum of edge-weights for weighted graphs)
+    m = G.size(weight)
+    q0 = 1 / m
+
+    # Calculate degrees (notation from the papers)
+    # a : the fraction of (weighted) out-degree for each node
+    # b : the fraction of (weighted) in-degree for each node
+    if directed:
+        a = {node: deg_out * q0 for node, deg_out in G.out_degree(weight=weight)}
+        b = {node: deg_in * q0 for node, deg_in in G.in_degree(weight=weight)}
+    else:
+        a = b = {node: deg * q0 * 0.5 for node, deg in G.degree(weight=weight)}
+
+    # this preliminary step collects the edge weights for each node pair
+    # It handles multigraph and digraph and works fine for graph.
+    dq_dict = defaultdict(lambda: defaultdict(float))
+    for u, v, wt in G.edges(data=weight, default=1):
+        if u == v:
+            continue
+        dq_dict[u][v] += wt
+        dq_dict[v][u] += wt
+
+    # now scale and subtract the expected edge-weights term
+    for u, nbrdict in dq_dict.items():
+        for v, wt in nbrdict.items():
+            dq_dict[u][v] = q0 * wt - resolution * (a[u] * b[v] + b[u] * a[v])
+
+    # Use -dq to get a max_heap instead of a min_heap
+    # dq_heap holds a heap for each node's neighbors
+    dq_heap = {u: MappedQueue({(u, v): -dq for v, dq in dq_dict[u].items()}) for u in G}
+    # H -> all_dq_heap holds a heap with the best items for each node
+    H = MappedQueue([dq_heap[n].heap[0] for n in G if len(dq_heap[n]) > 0])
+
+    # Initialize single-node communities
+    communities = {n: frozenset([n]) for n in G}
+    yield communities.values()
+
+    # Merge the two communities that lead to the largest modularity
+    while len(H) > 1:
+        # Find best merge
+        # Remove from heap of row maxes
+        # Ties will be broken by choosing the pair with lowest min community id
+        try:
+            negdq, u, v = H.pop()
+        except IndexError:
+            break
+        dq = -negdq
+        yield dq
+        # Remove best merge from row u heap
+        dq_heap[u].pop()
+        # Push new row max onto H
+        if len(dq_heap[u]) > 0:
+            H.push(dq_heap[u].heap[0])
+        # If this element was also at the root of row v, we need to remove the
+        # duplicate entry from H
+        if dq_heap[v].heap[0] == (v, u):
+            H.remove((v, u))
+            # Remove best merge from row v heap
+            dq_heap[v].remove((v, u))
+            # Push new row max onto H
+            if len(dq_heap[v]) > 0:
+                H.push(dq_heap[v].heap[0])
+        else:
+            # Duplicate wasn't in H, just remove from row v heap
+            dq_heap[v].remove((v, u))
+
+        # Perform merge
+        communities[v] = frozenset(communities[u] | communities[v])
+        del communities[u]
+
+        # Get neighbor communities connected to the merged communities
+        u_nbrs = set(dq_dict[u])
+        v_nbrs = set(dq_dict[v])
+        all_nbrs = (u_nbrs | v_nbrs) - {u, v}
+        both_nbrs = u_nbrs & v_nbrs
+        # Update dq for merge of u into v
+        for w in all_nbrs:
+            # Calculate new dq value
+            if w in both_nbrs:
+                dq_vw = dq_dict[v][w] + dq_dict[u][w]
+            elif w in v_nbrs:
+                dq_vw = dq_dict[v][w] - resolution * (a[u] * b[w] + a[w] * b[u])
+            else:  # w in u_nbrs
+                dq_vw = dq_dict[u][w] - resolution * (a[v] * b[w] + a[w] * b[v])
+            # Update rows v and w
+            for row, col in [(v, w), (w, v)]:
+                dq_heap_row = dq_heap[row]
+                # Update dict for v,w only (u is removed below)
+                dq_dict[row][col] = dq_vw
+                # Save old max of per-row heap
+                if len(dq_heap_row) > 0:
+                    d_oldmax = dq_heap_row.heap[0]
+                else:
+                    d_oldmax = None
+                # Add/update heaps
+                d = (row, col)
+                d_negdq = -dq_vw
+                # Save old value for finding heap index
+                if w in v_nbrs:
+                    # Update existing element in per-row heap
+                    dq_heap_row.update(d, d, priority=d_negdq)
+                else:
+                    # We're creating a new nonzero element, add to heap
+                    dq_heap_row.push(d, priority=d_negdq)
+                # Update heap of row maxes if necessary
+                if d_oldmax is None:
+                    # No entries previously in this row, push new max
+                    H.push(d, priority=d_negdq)
+                else:
+                    # We've updated an entry in this row, has the max changed?
+                    row_max = dq_heap_row.heap[0]
+                    if d_oldmax != row_max or d_oldmax.priority != row_max.priority:
+                        H.update(d_oldmax, row_max)
+
+        # Remove row/col u from dq_dict matrix
+        for w in dq_dict[u]:
+            # Remove from dict
+            dq_old = dq_dict[w][u]
+            del dq_dict[w][u]
+            # Remove from heaps if we haven't already
+            if w != v:
+                # Remove both row and column
+                for row, col in [(w, u), (u, w)]:
+                    dq_heap_row = dq_heap[row]
+                    # Check if replaced dq is row max
+                    d_old = (row, col)
+                    if dq_heap_row.heap[0] == d_old:
+                        # Update per-row heap and heap of row maxes
+                        dq_heap_row.remove(d_old)
+                        H.remove(d_old)
+                        # Update row max
+                        if len(dq_heap_row) > 0:
+                            H.push(dq_heap_row.heap[0])
+                    else:
+                        # Only update per-row heap
+                        dq_heap_row.remove(d_old)
+
+        del dq_dict[u]
+        # Mark row u as deleted, but keep placeholder
+        dq_heap[u] = MappedQueue()
+        # Merge u into v and update a
+        a[v] += a[u]
+        a[u] = 0
+        if directed:
+            b[v] += b[u]
+            b[u] = 0
+
+        yield communities.values()
+
+
+@nx._dispatchable(edge_attrs="weight")
+def greedy_modularity_communities(
+    G,
+    weight=None,
+    resolution=1,
+    cutoff=1,
+    best_n=None,
+):
+    r"""Find communities in G using greedy modularity maximization.

     This function uses Clauset-Newman-Moore greedy modularity maximization [2]_
     to find the community partition with the largest modularity.
@@ -87,7 +247,7 @@ def greedy_modularity_communities(G, weight=None, resolution=1, cutoff=1,
     for more. To obtain exactly n communities, set both `cutoff` and `best_n` to n.

     This function maximizes the generalized modularity, where `resolution`
-    is the resolution parameter, often expressed as $\\gamma$.
+    is the resolution parameter, often expressed as $\gamma$.
     See :func:`~networkx.algorithms.community.quality.modularity`.

     Parameters
@@ -149,14 +309,56 @@ def greedy_modularity_communities(G, weight=None, resolution=1, cutoff=1,
     .. [4] Newman, M. E. J."Analysis of weighted networks"
        Physical Review E 70(5 Pt 2):056131, 2004.
     """
-    pass
-
-
-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
-@nx._dispatchable(edge_attrs='weight')
+    if not G.size():
+        return [{n} for n in G]
+
+    if (cutoff < 1) or (cutoff > G.number_of_nodes()):
+        raise ValueError(f"cutoff must be between 1 and {len(G)}. Got {cutoff}.")
+    if best_n is not None:
+        if (best_n < 1) or (best_n > G.number_of_nodes()):
+            raise ValueError(f"best_n must be between 1 and {len(G)}. Got {best_n}.")
+        if best_n < cutoff:
+            raise ValueError(f"Must have best_n >= cutoff. Got {best_n} < {cutoff}")
+        if best_n == 1:
+            return [set(G)]
+    else:
+        best_n = G.number_of_nodes()
+
+    # retrieve generator object to construct output
+    community_gen = _greedy_modularity_communities_generator(
+        G, weight=weight, resolution=resolution
+    )
+
+    # construct the first best community
+    communities = next(community_gen)
+
+    # continue merging communities until one of the breaking criteria is satisfied
+    while len(communities) > cutoff:
+        try:
+            dq = next(community_gen)
+        # StopIteration occurs when communities are the connected components
+        except StopIteration:
+            communities = sorted(communities, key=len, reverse=True)
+            # if best_n requires more merging, merge big sets for highest modularity
+            while len(communities) > best_n:
+                comm1, comm2, *rest = communities
+                communities = [comm1 ^ comm2]
+                communities.extend(rest)
+            return communities
+
+        # keep going unless max_mod is reached or best_n says to merge more
+        if dq < 0 and len(communities) <= best_n:
+            break
+        communities = next(community_gen)
+
+    return sorted(communities, key=len, reverse=True)
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
+@nx._dispatchable(edge_attrs="weight")
 def naive_greedy_modularity_communities(G, resolution=1, weight=None):
-    """Find communities in G using greedy modularity maximization.
+    r"""Find communities in G using greedy modularity maximization.

     This implementation is O(n^4), much slower than alternatives, but it is
     provided as an easy-to-understand reference implementation.
@@ -166,7 +368,7 @@ def naive_greedy_modularity_communities(G, resolution=1, weight=None):
     such pair exists.

     This function maximizes the generalized modularity, where `resolution`
-    is the resolution parameter, often expressed as $\\gamma$.
+    is the resolution parameter, often expressed as $\gamma$.
     See :func:`~networkx.algorithms.community.quality.modularity`.

     Parameters
@@ -201,4 +403,49 @@ def naive_greedy_modularity_communities(G, resolution=1, weight=None):
     greedy_modularity_communities
     modularity
     """
-    pass
+    # First create one community for each node
+    communities = [frozenset([u]) for u in G.nodes()]
+    # Track merges
+    merges = []
+    # Greedily merge communities until no improvement is possible
+    old_modularity = None
+    new_modularity = modularity(G, communities, resolution=resolution, weight=weight)
+    while old_modularity is None or new_modularity > old_modularity:
+        # Save modularity for comparison
+        old_modularity = new_modularity
+        # Find best pair to merge
+        trial_communities = list(communities)
+        to_merge = None
+        for i, u in enumerate(communities):
+            for j, v in enumerate(communities):
+                # Skip i==j and empty communities
+                if j <= i or len(u) == 0 or len(v) == 0:
+                    continue
+                # Merge communities u and v
+                trial_communities[j] = u | v
+                trial_communities[i] = frozenset([])
+                trial_modularity = modularity(
+                    G, trial_communities, resolution=resolution, weight=weight
+                )
+                if trial_modularity >= new_modularity:
+                    # Check if strictly better or tie
+                    if trial_modularity > new_modularity:
+                        # Found new best, save modularity and group indexes
+                        new_modularity = trial_modularity
+                        to_merge = (i, j, new_modularity - old_modularity)
+                    elif to_merge and min(i, j) < min(to_merge[0], to_merge[1]):
+                        # Break ties by choosing pair with lowest min id
+                        new_modularity = trial_modularity
+                        to_merge = (i, j, new_modularity - old_modularity)
+                # Un-merge
+                trial_communities[i] = u
+                trial_communities[j] = v
+        if to_merge is not None:
+            # If the best merge improves modularity, use it
+            merges.append(to_merge)
+            i, j, dq = to_merge
+            u, v = communities[i], communities[j]
+            communities[j] = u | v
+            communities[i] = frozenset([])
+    # Remove empty communities and sort
+    return sorted((c for c in communities if len(c) > 0), key=len, reverse=True)
diff --git a/networkx/algorithms/community/quality.py b/networkx/algorithms/community/quality.py
index d58afd00f..f09a6d454 100644
--- a/networkx/algorithms/community/quality.py
+++ b/networkx/algorithms/community/quality.py
@@ -2,19 +2,22 @@
 communities).

 """
+
 from itertools import combinations
+
 import networkx as nx
 from networkx import NetworkXError
 from networkx.algorithms.community.community_utils import is_partition
 from networkx.utils.decorators import argmap
-__all__ = ['modularity', 'partition_quality']
+
+__all__ = ["modularity", "partition_quality"]


 class NotAPartition(NetworkXError):
     """Raised if a given collection is not a partition."""

     def __init__(self, G, collection):
-        msg = f'{collection} is not a valid partition of the graph {G}'
+        msg = f"{collection} is not a valid partition of the graph {G}"
         super().__init__(msg)


@@ -47,7 +50,9 @@ def _require_partition(G, partition):
         networkx.exception.NetworkXError: `partition` is not a valid partition of the nodes of G

     """
-    pass
+    if is_partition(G, partition):
+        return G, partition
+    raise nx.NetworkXError("`partition` is not a valid partition of the nodes of G")


 require_partition = argmap(_require_partition, (0, 1))
@@ -68,7 +73,7 @@ def intra_community_edges(G, partition):
     in the same block of the partition.

     """
-    pass
+    return sum(G.subgraph(block).size() for block in partition)


 @nx._dispatchable
@@ -91,7 +96,16 @@ def inter_community_edges(G, partition):
     that may require the same amount of memory as that of `G`.

     """
-    pass
+    # Alternate implementation that does not require constructing a new
+    # graph object (but does require constructing an affiliation
+    # dictionary):
+    #
+    #     aff = dict(chain.from_iterable(((v, block) for v in block)
+    #                                    for block in partition))
+    #     return sum(1 for u, v in G.edges() if aff[u] != aff[v])
+    #
+    MG = nx.MultiDiGraph if G.is_directed() else nx.MultiGraph
+    return nx.quotient_graph(G, partition, create_using=MG).size()


 @nx._dispatchable
@@ -116,34 +130,42 @@ def inter_community_non_edges(G, partition):
     store `G`.

     """
-    pass
+    # Alternate implementation that does not require constructing two
+    # new graph objects (but does require constructing an affiliation
+    # dictionary):
+    #
+    #     aff = dict(chain.from_iterable(((v, block) for v in block)
+    #                                    for block in partition))
+    #     return sum(1 for u, v in nx.non_edges(G) if aff[u] != aff[v])
+    #
+    return inter_community_edges(nx.complement(G), partition)


-@nx._dispatchable(edge_attrs='weight')
-def modularity(G, communities, weight='weight', resolution=1):
-    """Returns the modularity of the given partition of the graph.
+@nx._dispatchable(edge_attrs="weight")
+def modularity(G, communities, weight="weight", resolution=1):
+    r"""Returns the modularity of the given partition of the graph.

     Modularity is defined in [1]_ as

     .. math::
-        Q = \\frac{1}{2m} \\sum_{ij} \\left( A_{ij} - \\gamma\\frac{k_ik_j}{2m}\\right)
-            \\delta(c_i,c_j)
+        Q = \frac{1}{2m} \sum_{ij} \left( A_{ij} - \gamma\frac{k_ik_j}{2m}\right)
+            \delta(c_i,c_j)

     where $m$ is the number of edges (or sum of all edge weights as in [5]_),
     $A$ is the adjacency matrix of `G`, $k_i$ is the (weighted) degree of $i$,
-    $\\gamma$ is the resolution parameter, and $\\delta(c_i, c_j)$ is 1 if $i$ and
+    $\gamma$ is the resolution parameter, and $\delta(c_i, c_j)$ is 1 if $i$ and
     $j$ are in the same community else 0.

     According to [2]_ (and verified by some algebra) this can be reduced to

     .. math::
-       Q = \\sum_{c=1}^{n}
-       \\left[ \\frac{L_c}{m} - \\gamma\\left( \\frac{k_c}{2m} \\right) ^2 \\right]
+       Q = \sum_{c=1}^{n}
+       \left[ \frac{L_c}{m} - \gamma\left( \frac{k_c}{2m} \right) ^2 \right]

     where the sum iterates over all communities $c$, $m$ is the number of edges,
     $L_c$ is the number of intra-community links for community $c$,
     $k_c$ is the sum of degrees of the nodes in community $c$,
-    and $\\gamma$ is the resolution parameter.
+    and $\gamma$ is the resolution parameter.

     The resolution parameter sets an arbitrary tradeoff between intra-group
     edges and inter-group edges. More complex grouping patterns can be
@@ -204,7 +226,33 @@ def modularity(G, communities, weight='weight', resolution=1):
        networks" J. Stat. Mech 10008, 1-12 (2008).
        https://doi.org/10.1088/1742-5468/2008/10/P10008
     """
-    pass
+    if not isinstance(communities, list):
+        communities = list(communities)
+    if not is_partition(G, communities):
+        raise NotAPartition(G, communities)
+
+    directed = G.is_directed()
+    if directed:
+        out_degree = dict(G.out_degree(weight=weight))
+        in_degree = dict(G.in_degree(weight=weight))
+        m = sum(out_degree.values())
+        norm = 1 / m**2
+    else:
+        out_degree = in_degree = dict(G.degree(weight=weight))
+        deg_sum = sum(out_degree.values())
+        m = deg_sum / 2
+        norm = 1 / deg_sum**2
+
+    def community_contribution(community):
+        comm = set(community)
+        L_c = sum(wt for u, v, wt in G.edges(comm, data=weight, default=1) if v in comm)
+
+        out_degree_sum = sum(out_degree[u] for u in comm)
+        in_degree_sum = sum(in_degree[u] for u in comm) if directed else out_degree_sum
+
+        return L_c / m - resolution * out_degree_sum * in_degree_sum * norm
+
+    return sum(map(community_contribution, communities))


 @require_partition
@@ -253,4 +301,46 @@ def partition_quality(G, partition):
            *Physical Reports*, Volume 486, Issue 3--5 pp. 75--174
            <https://arxiv.org/abs/0906.0612>
     """
-    pass
+
+    node_community = {}
+    for i, community in enumerate(partition):
+        for node in community:
+            node_community[node] = i
+
+    # `performance` is not defined for multigraphs
+    if not G.is_multigraph():
+        # Iterate over the communities, quadratic, to calculate `possible_inter_community_edges`
+        possible_inter_community_edges = sum(
+            len(p1) * len(p2) for p1, p2 in combinations(partition, 2)
+        )
+
+        if G.is_directed():
+            possible_inter_community_edges *= 2
+    else:
+        possible_inter_community_edges = 0
+
+    # Compute the number of edges in the complete graph -- `n` nodes,
+    # directed or undirected, depending on `G`
+    n = len(G)
+    total_pairs = n * (n - 1)
+    if not G.is_directed():
+        total_pairs //= 2
+
+    intra_community_edges = 0
+    inter_community_non_edges = possible_inter_community_edges
+
+    # Iterate over the links to count `intra_community_edges` and `inter_community_non_edges`
+    for e in G.edges():
+        if node_community[e[0]] == node_community[e[1]]:
+            intra_community_edges += 1
+        else:
+            inter_community_non_edges -= 1
+
+    coverage = intra_community_edges / len(G.edges)
+
+    if G.is_multigraph():
+        performance = -1.0
+    else:
+        performance = (intra_community_edges + inter_community_non_edges) / total_pairs
+
+    return coverage, performance
diff --git a/networkx/algorithms/components/attracting.py b/networkx/algorithms/components/attracting.py
index a77cebd2f..305c69635 100644
--- a/networkx/algorithms/components/attracting.py
+++ b/networkx/algorithms/components/attracting.py
@@ -1,11 +1,15 @@
 """Attracting components."""
 import networkx as nx
 from networkx.utils.decorators import not_implemented_for
-__all__ = ['number_attracting_components', 'attracting_components',
-    'is_attracting_component']

+__all__ = [
+    "number_attracting_components",
+    "attracting_components",
+    "is_attracting_component",
+]

-@not_implemented_for('undirected')
+
+@not_implemented_for("undirected")
 @nx._dispatchable
 def attracting_components(G):
     """Generates the attracting components in `G`.
@@ -42,10 +46,14 @@ def attracting_components(G):
     is_attracting_component

     """
-    pass
+    scc = list(nx.strongly_connected_components(G))
+    cG = nx.condensation(G, scc)
+    for n in cG:
+        if cG.out_degree(n) == 0:
+            yield scc[n]


-@not_implemented_for('undirected')
+@not_implemented_for("undirected")
 @nx._dispatchable
 def number_attracting_components(G):
     """Returns the number of attracting components in `G`.
@@ -71,10 +79,10 @@ def number_attracting_components(G):
     is_attracting_component

     """
-    pass
+    return sum(1 for ac in attracting_components(G))


-@not_implemented_for('undirected')
+@not_implemented_for("undirected")
 @nx._dispatchable
 def is_attracting_component(G):
     """Returns True if `G` consists of a single attracting component.
@@ -100,4 +108,7 @@ def is_attracting_component(G):
     number_attracting_components

     """
-    pass
+    ac = list(attracting_components(G))
+    if len(ac) == 1:
+        return len(ac[0]) == len(G)
+    return False
diff --git a/networkx/algorithms/components/biconnected.py b/networkx/algorithms/components/biconnected.py
index 011a81e1e..0d2f06975 100644
--- a/networkx/algorithms/components/biconnected.py
+++ b/networkx/algorithms/components/biconnected.py
@@ -1,12 +1,18 @@
 """Biconnected components and articulation points."""
 from itertools import chain
+
 import networkx as nx
 from networkx.utils.decorators import not_implemented_for
-__all__ = ['biconnected_components', 'biconnected_component_edges',
-    'is_biconnected', 'articulation_points']
+
+__all__ = [
+    "biconnected_components",
+    "biconnected_component_edges",
+    "is_biconnected",
+    "articulation_points",
+]


-@not_implemented_for('directed')
+@not_implemented_for("directed")
 @nx._dispatchable
 def is_biconnected(G):
     """Returns True if the graph is biconnected, False otherwise.
@@ -71,10 +77,23 @@ def is_biconnected(G):
        Communications of the ACM 16: 372–378. doi:10.1145/362248.362272

     """
-    pass
-
-
-@not_implemented_for('directed')
+    bccs = biconnected_components(G)
+    try:
+        bcc = next(bccs)
+    except StopIteration:
+        # No bicomponents (empty graph?)
+        return False
+    try:
+        next(bccs)
+    except StopIteration:
+        # Only one bicomponent
+        return len(bcc) == len(G)
+    else:
+        # Multiple bicomponents
+        return False
+
+
+@not_implemented_for("directed")
 @nx._dispatchable
 def biconnected_component_edges(G):
     """Returns a generator of lists of edges, one list for each biconnected
@@ -144,10 +163,10 @@ def biconnected_component_edges(G):
            Communications of the ACM 16: 372–378. doi:10.1145/362248.362272

     """
-    pass
+    yield from _biconnected_dfs(G, components=True)


-@not_implemented_for('directed')
+@not_implemented_for("directed")
 @nx._dispatchable
 def biconnected_components(G):
     """Returns a generator of sets of nodes, one set for each biconnected
@@ -236,10 +255,11 @@ def biconnected_components(G):
            Communications of the ACM 16: 372–378. doi:10.1145/362248.362272

     """
-    pass
+    for comp in _biconnected_dfs(G, components=True):
+        yield set(chain.from_iterable(comp))


-@not_implemented_for('directed')
+@not_implemented_for("directed")
 @nx._dispatchable
 def articulation_points(G):
     """Yield the articulation points, or cut vertices, of a graph.
@@ -307,4 +327,67 @@ def articulation_points(G):
            Communications of the ACM 16: 372–378. doi:10.1145/362248.362272

     """
-    pass
+    seen = set()
+    for articulation in _biconnected_dfs(G, components=False):
+        if articulation not in seen:
+            seen.add(articulation)
+            yield articulation
+
+
+@not_implemented_for("directed")
+def _biconnected_dfs(G, components=True):
+    # depth-first search algorithm to generate articulation points
+    # and biconnected components
+    visited = set()
+    for start in G:
+        if start in visited:
+            continue
+        discovery = {start: 0}  # time of first discovery of node during search
+        low = {start: 0}
+        root_children = 0
+        visited.add(start)
+        edge_stack = []
+        stack = [(start, start, iter(G[start]))]
+        edge_index = {}
+        while stack:
+            grandparent, parent, children = stack[-1]
+            try:
+                child = next(children)
+                if grandparent == child:
+                    continue
+                if child in visited:
+                    if discovery[child] <= discovery[parent]:  # back edge
+                        low[parent] = min(low[parent], discovery[child])
+                        if components:
+                            edge_index[parent, child] = len(edge_stack)
+                            edge_stack.append((parent, child))
+                else:
+                    low[child] = discovery[child] = len(discovery)
+                    visited.add(child)
+                    stack.append((parent, child, iter(G[child])))
+                    if components:
+                        edge_index[parent, child] = len(edge_stack)
+                        edge_stack.append((parent, child))
+
+            except StopIteration:
+                stack.pop()
+                if len(stack) > 1:
+                    if low[parent] >= discovery[grandparent]:
+                        if components:
+                            ind = edge_index[grandparent, parent]
+                            yield edge_stack[ind:]
+                            del edge_stack[ind:]
+
+                        else:
+                            yield grandparent
+                    low[grandparent] = min(low[parent], low[grandparent])
+                elif stack:  # length 1 so grandparent is root
+                    root_children += 1
+                    if components:
+                        ind = edge_index[grandparent, parent]
+                        yield edge_stack[ind:]
+                        del edge_stack[ind:]
+        if not components:
+            # root node is articulation point if it has more than 1 child
+            if root_children > 1:
+                yield start
diff --git a/networkx/algorithms/components/connected.py b/networkx/algorithms/components/connected.py
index 725e6fd5a..ad3e0155a 100644
--- a/networkx/algorithms/components/connected.py
+++ b/networkx/algorithms/components/connected.py
@@ -1,12 +1,18 @@
 """Connected components."""
 import networkx as nx
 from networkx.utils.decorators import not_implemented_for
+
 from ...utils import arbitrary_element
-__all__ = ['number_connected_components', 'connected_components',
-    'is_connected', 'node_connected_component']
+
+__all__ = [
+    "number_connected_components",
+    "connected_components",
+    "is_connected",
+    "node_connected_component",
+]


-@not_implemented_for('directed')
+@not_implemented_for("directed")
 @nx._dispatchable
 def connected_components(G):
     """Generate connected components.
@@ -54,10 +60,15 @@ def connected_components(G):
     For undirected graphs only.

     """
-    pass
+    seen = set()
+    for v in G:
+        if v not in seen:
+            c = _plain_bfs(G, v)
+            seen.update(c)
+            yield c


-@not_implemented_for('directed')
+@not_implemented_for("directed")
 @nx._dispatchable
 def number_connected_components(G):
     """Returns the number of connected components.
@@ -94,10 +105,10 @@ def number_connected_components(G):
     For undirected graphs only.

     """
-    pass
+    return sum(1 for cc in connected_components(G))


-@not_implemented_for('directed')
+@not_implemented_for("directed")
 @nx._dispatchable
 def is_connected(G):
     """Returns True if the graph is connected, False otherwise.
@@ -136,10 +147,14 @@ def is_connected(G):
     For undirected graphs only.

     """
-    pass
+    if len(G) == 0:
+        raise nx.NetworkXPointlessConcept(
+            "Connectivity is undefined for the null graph."
+        )
+    return sum(1 for node in _plain_bfs(G, arbitrary_element(G))) == len(G)


-@not_implemented_for('directed')
+@not_implemented_for("directed")
 @nx._dispatchable
 def node_connected_component(G, n):
     """Returns the set of nodes in the component of graph containing node n.
@@ -177,9 +192,23 @@ def node_connected_component(G, n):
     For undirected graphs only.

     """
-    pass
+    return _plain_bfs(G, n)


 def _plain_bfs(G, source):
     """A fast BFS node generator"""
-    pass
+    adj = G._adj
+    n = len(adj)
+    seen = {source}
+    nextlevel = [source]
+    while nextlevel:
+        thislevel = nextlevel
+        nextlevel = []
+        for v in thislevel:
+            for w in adj[v]:
+                if w not in seen:
+                    seen.add(w)
+                    nextlevel.append(w)
+            if len(seen) == n:
+                return seen
+    return seen
diff --git a/networkx/algorithms/components/semiconnected.py b/networkx/algorithms/components/semiconnected.py
index d6838ed1b..13cfa988a 100644
--- a/networkx/algorithms/components/semiconnected.py
+++ b/networkx/algorithms/components/semiconnected.py
@@ -1,13 +1,14 @@
 """Semiconnectedness."""
 import networkx as nx
 from networkx.utils import not_implemented_for, pairwise
-__all__ = ['is_semiconnected']

+__all__ = ["is_semiconnected"]

-@not_implemented_for('undirected')
+
+@not_implemented_for("undirected")
 @nx._dispatchable
 def is_semiconnected(G):
-    """Returns True if the graph is semiconnected, False otherwise.
+    r"""Returns True if the graph is semiconnected, False otherwise.

     A graph is semiconnected if and only if for any pair of nodes, either one
     is reachable from the other, or they are mutually reachable.
@@ -18,7 +19,7 @@ def is_semiconnected(G):
     semiconnected by condensing the graph: i.e. constructing a new graph `H`
     with nodes being the strongly connected components of `G`, and edges
     (scc_1, scc_2) if there is a edge $(v_1, v_2)$ in `G` for some
-    $v_1 \\in scc_1$ and $v_2 \\in scc_2$. That results in a DAG, so we compute
+    $v_1 \in scc_1$ and $v_2 \in scc_2$. That results in a DAG, so we compute
     the topological sort of `H` and check if for every $n$ there is an edge
     $(scc_n, scc_{n+1})$.

@@ -56,4 +57,14 @@ def is_semiconnected(G):
     is_connected
     is_biconnected
     """
-    pass
+    if len(G) == 0:
+        raise nx.NetworkXPointlessConcept(
+            "Connectivity is undefined for the null graph."
+        )
+
+    if not nx.is_weakly_connected(G):
+        return False
+
+    H = nx.condensation(G)
+
+    return all(H.has_edge(u, v) for u, v in pairwise(nx.topological_sort(H)))
diff --git a/networkx/algorithms/components/strongly_connected.py b/networkx/algorithms/components/strongly_connected.py
index e1dc2128a..febd1b9b5 100644
--- a/networkx/algorithms/components/strongly_connected.py
+++ b/networkx/algorithms/components/strongly_connected.py
@@ -1,13 +1,18 @@
 """Strongly connected components."""
 import networkx as nx
 from networkx.utils.decorators import not_implemented_for
-__all__ = ['number_strongly_connected_components',
-    'strongly_connected_components', 'is_strongly_connected',
-    'strongly_connected_components_recursive',
-    'kosaraju_strongly_connected_components', 'condensation']

+__all__ = [
+    "number_strongly_connected_components",
+    "strongly_connected_components",
+    "is_strongly_connected",
+    "strongly_connected_components_recursive",
+    "kosaraju_strongly_connected_components",
+    "condensation",
+]

-@not_implemented_for('undirected')
+
+@not_implemented_for("undirected")
 @nx._dispatchable
 def strongly_connected_components(G):
     """Generate nodes in strongly connected components of graph.
@@ -63,10 +68,47 @@ def strongly_connected_components(G):
        Information Processing Letters 49(1): 9-14, (1994)..

     """
-    pass
-
-
-@not_implemented_for('undirected')
+    preorder = {}
+    lowlink = {}
+    scc_found = set()
+    scc_queue = []
+    i = 0  # Preorder counter
+    neighbors = {v: iter(G[v]) for v in G}
+    for source in G:
+        if source not in scc_found:
+            queue = [source]
+            while queue:
+                v = queue[-1]
+                if v not in preorder:
+                    i = i + 1
+                    preorder[v] = i
+                done = True
+                for w in neighbors[v]:
+                    if w not in preorder:
+                        queue.append(w)
+                        done = False
+                        break
+                if done:
+                    lowlink[v] = preorder[v]
+                    for w in G[v]:
+                        if w not in scc_found:
+                            if preorder[w] > preorder[v]:
+                                lowlink[v] = min([lowlink[v], lowlink[w]])
+                            else:
+                                lowlink[v] = min([lowlink[v], preorder[w]])
+                    queue.pop()
+                    if lowlink[v] == preorder[v]:
+                        scc = {v}
+                        while scc_queue and preorder[scc_queue[-1]] > preorder[v]:
+                            k = scc_queue.pop()
+                            scc.add(k)
+                        scc_found.update(scc)
+                        yield scc
+                    else:
+                        scc_queue.append(v)
+
+
+@not_implemented_for("undirected")
 @nx._dispatchable
 def kosaraju_strongly_connected_components(G, source=None):
     """Generate nodes in strongly connected components of graph.
@@ -115,10 +157,20 @@ def kosaraju_strongly_connected_components(G, source=None):
     Uses Kosaraju's algorithm.

     """
-    pass
+    post = list(nx.dfs_postorder_nodes(G.reverse(copy=False), source=source))
+
+    seen = set()
+    while post:
+        r = post.pop()
+        if r in seen:
+            continue
+        c = nx.dfs_preorder_nodes(G, r)
+        new = {v for v in c if v not in seen}
+        seen.update(new)
+        yield new


-@not_implemented_for('undirected')
+@not_implemented_for("undirected")
 @nx._dispatchable
 def strongly_connected_components_recursive(G):
     """Generate nodes in strongly connected components of graph.
@@ -186,10 +238,21 @@ def strongly_connected_components_recursive(G):
        Information Processing Letters 49(1): 9-14, (1994)..

     """
-    pass
+    import warnings

+    warnings.warn(
+        (
+            "\n\nstrongly_connected_components_recursive is deprecated and will be\n"
+            "removed in the future. Use strongly_connected_components instead."
+        ),
+        category=DeprecationWarning,
+        stacklevel=2,
+    )

-@not_implemented_for('undirected')
+    yield from strongly_connected_components(G)
+
+
+@not_implemented_for("undirected")
 @nx._dispatchable
 def number_strongly_connected_components(G):
     """Returns number of strongly connected components in graph.
@@ -227,10 +290,10 @@ def number_strongly_connected_components(G):
     -----
     For directed graphs only.
     """
-    pass
+    return sum(1 for scc in strongly_connected_components(G))


-@not_implemented_for('undirected')
+@not_implemented_for("undirected")
 @nx._dispatchable
 def is_strongly_connected(G):
     """Test directed graph for strong connectivity.
@@ -274,10 +337,15 @@ def is_strongly_connected(G):
     -----
     For directed graphs only.
     """
-    pass
+    if len(G) == 0:
+        raise nx.NetworkXPointlessConcept(
+            """Connectivity is undefined for the null graph."""
+        )
+
+    return len(next(strongly_connected_components(G))) == len(G)


-@not_implemented_for('undirected')
+@not_implemented_for("undirected")
 @nx._dispatchable(returns_graph=True)
 def condensation(G, scc=None):
     """Returns the condensation of G.
@@ -340,4 +408,23 @@ def condensation(G, scc=None):
     the resulting graph is a directed acyclic graph.

     """
-    pass
+    if scc is None:
+        scc = nx.strongly_connected_components(G)
+    mapping = {}
+    members = {}
+    C = nx.DiGraph()
+    # Add mapping dict as graph attribute
+    C.graph["mapping"] = mapping
+    if len(G) == 0:
+        return C
+    for i, component in enumerate(scc):
+        members[i] = component
+        mapping.update((n, i) for n in component)
+    number_of_components = i + 1
+    C.add_nodes_from(range(number_of_components))
+    C.add_edges_from(
+        (mapping[u], mapping[v]) for u, v in G.edges() if mapping[u] != mapping[v]
+    )
+    # Add a list of members (ie original nodes) to each node (ie scc) in C.
+    nx.set_node_attributes(C, members, "members")
+    return C
diff --git a/networkx/algorithms/components/weakly_connected.py b/networkx/algorithms/components/weakly_connected.py
index 3926c7239..499c2ba74 100644
--- a/networkx/algorithms/components/weakly_connected.py
+++ b/networkx/algorithms/components/weakly_connected.py
@@ -1,11 +1,15 @@
 """Weakly connected components."""
 import networkx as nx
 from networkx.utils.decorators import not_implemented_for
-__all__ = ['number_weakly_connected_components',
-    'weakly_connected_components', 'is_weakly_connected']

+__all__ = [
+    "number_weakly_connected_components",
+    "weakly_connected_components",
+    "is_weakly_connected",
+]

-@not_implemented_for('undirected')
+
+@not_implemented_for("undirected")
 @nx._dispatchable
 def weakly_connected_components(G):
     """Generate weakly connected components of G.
@@ -50,10 +54,15 @@ def weakly_connected_components(G):
     For directed graphs only.

     """
-    pass
+    seen = set()
+    for v in G:
+        if v not in seen:
+            c = set(_plain_bfs(G, v))
+            seen.update(c)
+            yield c


-@not_implemented_for('undirected')
+@not_implemented_for("undirected")
 @nx._dispatchable
 def number_weakly_connected_components(G):
     """Returns the number of weakly connected components in G.
@@ -90,10 +99,10 @@ def number_weakly_connected_components(G):
     For directed graphs only.

     """
-    pass
+    return sum(1 for wcc in weakly_connected_components(G))


-@not_implemented_for('undirected')
+@not_implemented_for("undirected")
 @nx._dispatchable
 def is_weakly_connected(G):
     """Test directed graph for weak connectivity.
@@ -143,7 +152,12 @@ def is_weakly_connected(G):
     For directed graphs only.

     """
-    pass
+    if len(G) == 0:
+        raise nx.NetworkXPointlessConcept(
+            """Connectivity is undefined for the null graph."""
+        )
+
+    return len(next(weakly_connected_components(G))) == len(G)


 def _plain_bfs(G, source):
@@ -154,4 +168,26 @@ def _plain_bfs(G, source):
     For directed graphs only.

     """
-    pass
+    n = len(G)
+    Gsucc = G._succ
+    Gpred = G._pred
+    seen = {source}
+    nextlevel = [source]
+
+    yield source
+    while nextlevel:
+        thislevel = nextlevel
+        nextlevel = []
+        for v in thislevel:
+            for w in Gsucc[v]:
+                if w not in seen:
+                    seen.add(w)
+                    nextlevel.append(w)
+                    yield w
+            for w in Gpred[v]:
+                if w not in seen:
+                    seen.add(w)
+                    nextlevel.append(w)
+                    yield w
+            if len(seen) == n:
+                return
diff --git a/networkx/algorithms/connectivity/connectivity.py b/networkx/algorithms/connectivity/connectivity.py
index 3f2ded06b..8ccca88d2 100644
--- a/networkx/algorithms/connectivity/connectivity.py
+++ b/networkx/algorithms/connectivity/connectivity.py
@@ -1,22 +1,41 @@
 """
 Flow based connectivity algorithms
 """
+
 import itertools
 from operator import itemgetter
+
 import networkx as nx
-from networkx.algorithms.flow import boykov_kolmogorov, build_residual_network, dinitz, edmonds_karp, shortest_augmenting_path
+
+# Define the default maximum flow function to use in all flow based
+# connectivity algorithms.
+from networkx.algorithms.flow import (
+    boykov_kolmogorov,
+    build_residual_network,
+    dinitz,
+    edmonds_karp,
+    shortest_augmenting_path,
+)
+
 default_flow_func = edmonds_karp
+
 from .utils import build_auxiliary_edge_connectivity, build_auxiliary_node_connectivity
-__all__ = ['average_node_connectivity', 'local_node_connectivity',
-    'node_connectivity', 'local_edge_connectivity', 'edge_connectivity',
-    'all_pairs_node_connectivity']
+
+__all__ = [
+    "average_node_connectivity",
+    "local_node_connectivity",
+    "node_connectivity",
+    "local_edge_connectivity",
+    "edge_connectivity",
+    "all_pairs_node_connectivity",
+]


-@nx._dispatchable(graphs={'G': 0, 'auxiliary?': 4}, preserve_graph_attrs={
-    'auxiliary'})
-def local_node_connectivity(G, s, t, flow_func=None, auxiliary=None,
-    residual=None, cutoff=None):
-    """Computes local node connectivity for nodes s and t.
+@nx._dispatchable(graphs={"G": 0, "auxiliary?": 4}, preserve_graph_attrs={"auxiliary"})
+def local_node_connectivity(
+    G, s, t, flow_func=None, auxiliary=None, residual=None, cutoff=None
+):
+    r"""Computes local node connectivity for nodes s and t.

     Local node connectivity for two non adjacent nodes s and t is the
     minimum number of nodes that must be removed (along with their incident
@@ -163,12 +182,35 @@ def local_node_connectivity(G, s, t, flow_func=None, auxiliary=None,
         http://www.informatik.uni-augsburg.de/thi/personen/kammer/Graph_Connectivity.pdf

     """
-    pass
+    if flow_func is None:
+        flow_func = default_flow_func
+
+    if auxiliary is None:
+        H = build_auxiliary_node_connectivity(G)
+    else:
+        H = auxiliary
+
+    mapping = H.graph.get("mapping", None)
+    if mapping is None:
+        raise nx.NetworkXError("Invalid auxiliary digraph.")
+
+    kwargs = {"flow_func": flow_func, "residual": residual}
+    if flow_func is shortest_augmenting_path:
+        kwargs["cutoff"] = cutoff
+        kwargs["two_phase"] = True
+    elif flow_func is edmonds_karp:
+        kwargs["cutoff"] = cutoff
+    elif flow_func is dinitz:
+        kwargs["cutoff"] = cutoff
+    elif flow_func is boykov_kolmogorov:
+        kwargs["cutoff"] = cutoff
+
+    return nx.maximum_flow_value(H, f"{mapping[s]}B", f"{mapping[t]}A", **kwargs)


 @nx._dispatchable
 def node_connectivity(G, s=None, t=None, flow_func=None):
-    """Returns node connectivity for a graph or digraph G.
+    r"""Returns node connectivity for a graph or digraph G.

     Node connectivity is equal to the minimum number of nodes that
     must be removed to disconnect G or render it trivial. If source
@@ -235,8 +277,8 @@ def node_connectivity(G, s=None, t=None, flow_func=None):
     Notes
     -----
     This is a flow based implementation of node connectivity. The
-    algorithm works by solving $O((n-\\delta-1+\\delta(\\delta-1)/2))$
-    maximum flow problems on an auxiliary digraph. Where $\\delta$
+    algorithm works by solving $O((n-\delta-1+\delta(\delta-1)/2))$
+    maximum flow problems on an auxiliary digraph. Where $\delta$
     is the minimum degree of G. For details about the auxiliary
     digraph and the computation of local node connectivity see
     :meth:`local_node_connectivity`. This implementation is based
@@ -257,19 +299,66 @@ def node_connectivity(G, s=None, t=None, flow_func=None):
         http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf

     """
-    pass
+    if (s is not None and t is None) or (s is None and t is not None):
+        raise nx.NetworkXError("Both source and target must be specified.")
+
+    # Local node connectivity
+    if s is not None and t is not None:
+        if s not in G:
+            raise nx.NetworkXError(f"node {s} not in graph")
+        if t not in G:
+            raise nx.NetworkXError(f"node {t} not in graph")
+        return local_node_connectivity(G, s, t, flow_func=flow_func)
+
+    # Global node connectivity
+    if G.is_directed():
+        if not nx.is_weakly_connected(G):
+            return 0
+        iter_func = itertools.permutations
+        # It is necessary to consider both predecessors
+        # and successors for directed graphs
+
+        def neighbors(v):
+            return itertools.chain.from_iterable([G.predecessors(v), G.successors(v)])
+
+    else:
+        if not nx.is_connected(G):
+            return 0
+        iter_func = itertools.combinations
+        neighbors = G.neighbors
+
+    # Reuse the auxiliary digraph and the residual network
+    H = build_auxiliary_node_connectivity(G)
+    R = build_residual_network(H, "capacity")
+    kwargs = {"flow_func": flow_func, "auxiliary": H, "residual": R}
+
+    # Pick a node with minimum degree
+    # Node connectivity is bounded by degree.
+    v, K = min(G.degree(), key=itemgetter(1))
+    # compute local node connectivity with all its non-neighbors nodes
+    for w in set(G) - set(neighbors(v)) - {v}:
+        kwargs["cutoff"] = K
+        K = min(K, local_node_connectivity(G, v, w, **kwargs))
+    # Also for non adjacent pairs of neighbors of v
+    for x, y in iter_func(neighbors(v), 2):
+        if y in G[x]:
+            continue
+        kwargs["cutoff"] = K
+        K = min(K, local_node_connectivity(G, x, y, **kwargs))
+
+    return K


 @nx._dispatchable
 def average_node_connectivity(G, flow_func=None):
-    """Returns the average connectivity of a graph G.
+    r"""Returns the average connectivity of a graph G.

-    The average connectivity `\\bar{\\kappa}` of a graph G is the average
+    The average connectivity `\bar{\kappa}` of a graph G is the average
     of local node connectivity over all pairs of nodes of G [1]_ .

     .. math::

-        \\bar{\\kappa}(G) = \\frac{\\sum_{u,v} \\kappa_{G}(u,v)}{{n \\choose 2}}
+        \bar{\kappa}(G) = \frac{\sum_{u,v} \kappa_{G}(u,v)}{{n \choose 2}}

     Parameters
     ----------
@@ -309,7 +398,24 @@ def average_node_connectivity(G, flow_func=None):
             http://www.sciencedirect.com/science/article/pii/S0012365X01001807

     """
-    pass
+    if G.is_directed():
+        iter_func = itertools.permutations
+    else:
+        iter_func = itertools.combinations
+
+    # Reuse the auxiliary digraph and the residual network
+    H = build_auxiliary_node_connectivity(G)
+    R = build_residual_network(H, "capacity")
+    kwargs = {"flow_func": flow_func, "auxiliary": H, "residual": R}
+
+    num, den = 0, 0
+    for u, v in iter_func(G, 2):
+        num += local_node_connectivity(G, u, v, **kwargs)
+        den += 1
+
+    if den == 0:  # Null Graph
+        return 0
+    return num / den


 @nx._dispatchable
@@ -352,13 +458,39 @@ def all_pairs_node_connectivity(G, nbunch=None, flow_func=None):
     :meth:`shortest_augmenting_path`

     """
-    pass
+    if nbunch is None:
+        nbunch = G
+    else:
+        nbunch = set(nbunch)
+
+    directed = G.is_directed()
+    if directed:
+        iter_func = itertools.permutations
+    else:
+        iter_func = itertools.combinations
+
+    all_pairs = {n: {} for n in nbunch}
+
+    # Reuse auxiliary digraph and residual network
+    H = build_auxiliary_node_connectivity(G)
+    mapping = H.graph["mapping"]
+    R = build_residual_network(H, "capacity")
+    kwargs = {"flow_func": flow_func, "auxiliary": H, "residual": R}

+    for u, v in iter_func(nbunch, 2):
+        K = local_node_connectivity(G, u, v, **kwargs)
+        all_pairs[u][v] = K
+        if not directed:
+            all_pairs[v][u] = K

-@nx._dispatchable(graphs={'G': 0, 'auxiliary?': 4})
-def local_edge_connectivity(G, s, t, flow_func=None, auxiliary=None,
-    residual=None, cutoff=None):
-    """Returns local edge connectivity for nodes s and t in G.
+    return all_pairs
+
+
+@nx._dispatchable(graphs={"G": 0, "auxiliary?": 4})
+def local_edge_connectivity(
+    G, s, t, flow_func=None, auxiliary=None, residual=None, cutoff=None
+):
+    r"""Returns local edge connectivity for nodes s and t in G.

     Local edge connectivity for two nodes s and t is the minimum number
     of edges that must be removed to disconnect them.
@@ -494,12 +626,31 @@ def local_edge_connectivity(G, s, t, flow_func=None, auxiliary=None,
         http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf

     """
-    pass
+    if flow_func is None:
+        flow_func = default_flow_func
+
+    if auxiliary is None:
+        H = build_auxiliary_edge_connectivity(G)
+    else:
+        H = auxiliary
+
+    kwargs = {"flow_func": flow_func, "residual": residual}
+    if flow_func is shortest_augmenting_path:
+        kwargs["cutoff"] = cutoff
+        kwargs["two_phase"] = True
+    elif flow_func is edmonds_karp:
+        kwargs["cutoff"] = cutoff
+    elif flow_func is dinitz:
+        kwargs["cutoff"] = cutoff
+    elif flow_func is boykov_kolmogorov:
+        kwargs["cutoff"] = cutoff
+
+    return nx.maximum_flow_value(H, s, t, **kwargs)


 @nx._dispatchable
 def edge_connectivity(G, s=None, t=None, flow_func=None, cutoff=None):
-    """Returns the edge connectivity of the graph or digraph G.
+    r"""Returns the edge connectivity of the graph or digraph G.

     The edge connectivity is equal to the minimum number of edges that
     must be removed to disconnect G or render it trivial. If source
@@ -598,4 +749,68 @@ def edge_connectivity(G, s=None, t=None, flow_func=None, cutoff=None):
         http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf

     """
-    pass
+    if (s is not None and t is None) or (s is None and t is not None):
+        raise nx.NetworkXError("Both source and target must be specified.")
+
+    # Local edge connectivity
+    if s is not None and t is not None:
+        if s not in G:
+            raise nx.NetworkXError(f"node {s} not in graph")
+        if t not in G:
+            raise nx.NetworkXError(f"node {t} not in graph")
+        return local_edge_connectivity(G, s, t, flow_func=flow_func, cutoff=cutoff)
+
+    # Global edge connectivity
+    # reuse auxiliary digraph and residual network
+    H = build_auxiliary_edge_connectivity(G)
+    R = build_residual_network(H, "capacity")
+    kwargs = {"flow_func": flow_func, "auxiliary": H, "residual": R}
+
+    if G.is_directed():
+        # Algorithm 8 in [1]
+        if not nx.is_weakly_connected(G):
+            return 0
+
+        # initial value for \lambda is minimum degree
+        L = min(d for n, d in G.degree())
+        nodes = list(G)
+        n = len(nodes)
+
+        if cutoff is not None:
+            L = min(cutoff, L)
+
+        for i in range(n):
+            kwargs["cutoff"] = L
+            try:
+                L = min(L, local_edge_connectivity(G, nodes[i], nodes[i + 1], **kwargs))
+            except IndexError:  # last node!
+                L = min(L, local_edge_connectivity(G, nodes[i], nodes[0], **kwargs))
+        return L
+    else:  # undirected
+        # Algorithm 6 in [1]
+        if not nx.is_connected(G):
+            return 0
+
+        # initial value for \lambda is minimum degree
+        L = min(d for n, d in G.degree())
+
+        if cutoff is not None:
+            L = min(cutoff, L)
+
+        # A dominating set is \lambda-covering
+        # We need a dominating set with at least two nodes
+        for node in G:
+            D = nx.dominating_set(G, start_with=node)
+            v = D.pop()
+            if D:
+                break
+        else:
+            # in complete graphs the dominating sets will always be of one node
+            # thus we return min degree
+            return L
+
+        for w in D:
+            kwargs["cutoff"] = L
+            L = min(L, local_edge_connectivity(G, v, w, **kwargs))
+
+        return L
diff --git a/networkx/algorithms/connectivity/cuts.py b/networkx/algorithms/connectivity/cuts.py
index bce929eda..117004406 100644
--- a/networkx/algorithms/connectivity/cuts.py
+++ b/networkx/algorithms/connectivity/cuts.py
@@ -2,19 +2,31 @@
 Flow based cut algorithms
 """
 import itertools
+
 import networkx as nx
+
+# Define the default maximum flow function to use in all flow based
+# cut algorithms.
 from networkx.algorithms.flow import build_residual_network, edmonds_karp
+
 default_flow_func = edmonds_karp
+
 from .utils import build_auxiliary_edge_connectivity, build_auxiliary_node_connectivity
-__all__ = ['minimum_st_node_cut', 'minimum_node_cut', 'minimum_st_edge_cut',
-    'minimum_edge_cut']

+__all__ = [
+    "minimum_st_node_cut",
+    "minimum_node_cut",
+    "minimum_st_edge_cut",
+    "minimum_edge_cut",
+]

-@nx._dispatchable(graphs={'G': 0, 'auxiliary?': 4}, preserve_edge_attrs={
-    'auxiliary': {'capacity': float('inf')}}, preserve_graph_attrs={
-    'auxiliary'})
-def minimum_st_edge_cut(G, s, t, flow_func=None, auxiliary=None, residual=None
-    ):
+
+@nx._dispatchable(
+    graphs={"G": 0, "auxiliary?": 4},
+    preserve_edge_attrs={"auxiliary": {"capacity": float("inf")}},
+    preserve_graph_attrs={"auxiliary"},
+)
+def minimum_st_edge_cut(G, s, t, flow_func=None, auxiliary=None, residual=None):
     """Returns the edges of the cut-set of a minimum (s, t)-cut.

     This function returns the set of edges of minimum cardinality that,
@@ -125,14 +137,34 @@ def minimum_st_edge_cut(G, s, t, flow_func=None, auxiliary=None, residual=None
     5

     """
-    pass
-
-
-@nx._dispatchable(graphs={'G': 0, 'auxiliary?': 4}, preserve_node_attrs={
-    'auxiliary': {'id': None}}, preserve_graph_attrs={'auxiliary'})
-def minimum_st_node_cut(G, s, t, flow_func=None, auxiliary=None, residual=None
-    ):
-    """Returns a set of nodes of minimum cardinality that disconnect source
+    if flow_func is None:
+        flow_func = default_flow_func
+
+    if auxiliary is None:
+        H = build_auxiliary_edge_connectivity(G)
+    else:
+        H = auxiliary
+
+    kwargs = {"capacity": "capacity", "flow_func": flow_func, "residual": residual}
+
+    cut_value, partition = nx.minimum_cut(H, s, t, **kwargs)
+    reachable, non_reachable = partition
+    # Any edge in the original graph linking the two sets in the
+    # partition is part of the edge cutset
+    cutset = set()
+    for u, nbrs in ((n, G[n]) for n in reachable):
+        cutset.update((u, v) for v in nbrs if v in non_reachable)
+
+    return cutset
+
+
+@nx._dispatchable(
+    graphs={"G": 0, "auxiliary?": 4},
+    preserve_node_attrs={"auxiliary": {"id": None}},
+    preserve_graph_attrs={"auxiliary"},
+)
+def minimum_st_node_cut(G, s, t, flow_func=None, auxiliary=None, residual=None):
+    r"""Returns a set of nodes of minimum cardinality that disconnect source
     from target in G.

     This function returns the set of nodes of minimum cardinality that,
@@ -249,12 +281,29 @@ def minimum_st_node_cut(G, s, t, flow_func=None, auxiliary=None, residual=None
         http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf

     """
-    pass
+    if auxiliary is None:
+        H = build_auxiliary_node_connectivity(G)
+    else:
+        H = auxiliary
+
+    mapping = H.graph.get("mapping", None)
+    if mapping is None:
+        raise nx.NetworkXError("Invalid auxiliary digraph.")
+    if G.has_edge(s, t) or G.has_edge(t, s):
+        return {}
+    kwargs = {"flow_func": flow_func, "residual": residual, "auxiliary": H}
+
+    # The edge cut in the auxiliary digraph corresponds to the node cut in the
+    # original graph.
+    edge_cut = minimum_st_edge_cut(H, f"{mapping[s]}B", f"{mapping[t]}A", **kwargs)
+    # Each node in the original graph maps to two nodes of the auxiliary graph
+    node_cut = {H.nodes[node]["id"] for edge in edge_cut for node in edge}
+    return node_cut - {s, t}


 @nx._dispatchable
 def minimum_node_cut(G, s=None, t=None, flow_func=None):
-    """Returns a set of nodes of minimum cardinality that disconnects G.
+    r"""Returns a set of nodes of minimum cardinality that disconnects G.

     If source and target nodes are provided, this function returns the
     set of nodes of minimum cardinality that, if removed, would destroy
@@ -346,12 +395,61 @@ def minimum_node_cut(G, s=None, t=None, flow_func=None):
         http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf

     """
-    pass
+    if (s is not None and t is None) or (s is None and t is not None):
+        raise nx.NetworkXError("Both source and target must be specified.")
+
+    # Local minimum node cut.
+    if s is not None and t is not None:
+        if s not in G:
+            raise nx.NetworkXError(f"node {s} not in graph")
+        if t not in G:
+            raise nx.NetworkXError(f"node {t} not in graph")
+        return minimum_st_node_cut(G, s, t, flow_func=flow_func)
+
+    # Global minimum node cut.
+    # Analog to the algorithm 11 for global node connectivity in [1].
+    if G.is_directed():
+        if not nx.is_weakly_connected(G):
+            raise nx.NetworkXError("Input graph is not connected")
+        iter_func = itertools.permutations
+
+        def neighbors(v):
+            return itertools.chain.from_iterable([G.predecessors(v), G.successors(v)])
+
+    else:
+        if not nx.is_connected(G):
+            raise nx.NetworkXError("Input graph is not connected")
+        iter_func = itertools.combinations
+        neighbors = G.neighbors
+
+    # Reuse the auxiliary digraph and the residual network.
+    H = build_auxiliary_node_connectivity(G)
+    R = build_residual_network(H, "capacity")
+    kwargs = {"flow_func": flow_func, "auxiliary": H, "residual": R}
+
+    # Choose a node with minimum degree.
+    v = min(G, key=G.degree)
+    # Initial node cutset is all neighbors of the node with minimum degree.
+    min_cut = set(G[v])
+    # Compute st node cuts between v and all its non-neighbors nodes in G.
+    for w in set(G) - set(neighbors(v)) - {v}:
+        this_cut = minimum_st_node_cut(G, v, w, **kwargs)
+        if len(min_cut) >= len(this_cut):
+            min_cut = this_cut
+    # Also for non adjacent pairs of neighbors of v.
+    for x, y in iter_func(neighbors(v), 2):
+        if y in G[x]:
+            continue
+        this_cut = minimum_st_node_cut(G, x, y, **kwargs)
+        if len(min_cut) >= len(this_cut):
+            min_cut = this_cut
+
+    return min_cut


 @nx._dispatchable
 def minimum_edge_cut(G, s=None, t=None, flow_func=None):
-    """Returns a set of edges of minimum cardinality that disconnects G.
+    r"""Returns a set of edges of minimum cardinality that disconnects G.

     If source and target nodes are provided, this function returns the
     set of edges of minimum cardinality that, if removed, would break
@@ -445,4 +543,69 @@ def minimum_edge_cut(G, s=None, t=None, flow_func=None):
         http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf

     """
-    pass
+    if (s is not None and t is None) or (s is None and t is not None):
+        raise nx.NetworkXError("Both source and target must be specified.")
+
+    # reuse auxiliary digraph and residual network
+    H = build_auxiliary_edge_connectivity(G)
+    R = build_residual_network(H, "capacity")
+    kwargs = {"flow_func": flow_func, "residual": R, "auxiliary": H}
+
+    # Local minimum edge cut if s and t are not None
+    if s is not None and t is not None:
+        if s not in G:
+            raise nx.NetworkXError(f"node {s} not in graph")
+        if t not in G:
+            raise nx.NetworkXError(f"node {t} not in graph")
+        return minimum_st_edge_cut(H, s, t, **kwargs)
+
+    # Global minimum edge cut
+    # Analog to the algorithm for global edge connectivity
+    if G.is_directed():
+        # Based on algorithm 8 in [1]
+        if not nx.is_weakly_connected(G):
+            raise nx.NetworkXError("Input graph is not connected")
+
+        # Initial cutset is all edges of a node with minimum degree
+        node = min(G, key=G.degree)
+        min_cut = set(G.edges(node))
+        nodes = list(G)
+        n = len(nodes)
+        for i in range(n):
+            try:
+                this_cut = minimum_st_edge_cut(H, nodes[i], nodes[i + 1], **kwargs)
+                if len(this_cut) <= len(min_cut):
+                    min_cut = this_cut
+            except IndexError:  # Last node!
+                this_cut = minimum_st_edge_cut(H, nodes[i], nodes[0], **kwargs)
+                if len(this_cut) <= len(min_cut):
+                    min_cut = this_cut
+
+        return min_cut
+
+    else:  # undirected
+        # Based on algorithm 6 in [1]
+        if not nx.is_connected(G):
+            raise nx.NetworkXError("Input graph is not connected")
+
+        # Initial cutset is all edges of a node with minimum degree
+        node = min(G, key=G.degree)
+        min_cut = set(G.edges(node))
+        # A dominating set is \lambda-covering
+        # We need a dominating set with at least two nodes
+        for node in G:
+            D = nx.dominating_set(G, start_with=node)
+            v = D.pop()
+            if D:
+                break
+        else:
+            # in complete graphs the dominating set will always be of one node
+            # thus we return min_cut, which now contains the edges of a node
+            # with minimum degree
+            return min_cut
+        for w in D:
+            this_cut = minimum_st_edge_cut(H, v, w, **kwargs)
+            if len(this_cut) <= len(min_cut):
+                min_cut = this_cut
+
+        return min_cut
diff --git a/networkx/algorithms/connectivity/disjoint_paths.py b/networkx/algorithms/connectivity/disjoint_paths.py
index ebe417b6b..e4634e7dd 100644
--- a/networkx/algorithms/connectivity/disjoint_paths.py
+++ b/networkx/algorithms/connectivity/disjoint_paths.py
@@ -1,17 +1,31 @@
 """Flow based node and edge disjoint paths."""
 import networkx as nx
-from networkx.algorithms.flow import edmonds_karp, preflow_push, shortest_augmenting_path
+
+# Define the default maximum flow function to use for the underlying
+# maximum flow computations
+from networkx.algorithms.flow import (
+    edmonds_karp,
+    preflow_push,
+    shortest_augmenting_path,
+)
 from networkx.exception import NetworkXNoPath
+
 default_flow_func = edmonds_karp
 from itertools import filterfalse as _filterfalse
+
+# Functions to build auxiliary data structures.
 from .utils import build_auxiliary_edge_connectivity, build_auxiliary_node_connectivity
-__all__ = ['edge_disjoint_paths', 'node_disjoint_paths']

+__all__ = ["edge_disjoint_paths", "node_disjoint_paths"]

-@nx._dispatchable(graphs={'G': 0, 'auxiliary?': 5}, preserve_edge_attrs={
-    'auxiliary': {'capacity': float('inf')}})
-def edge_disjoint_paths(G, s, t, flow_func=None, cutoff=None, auxiliary=
-    None, residual=None):
+
+@nx._dispatchable(
+    graphs={"G": 0, "auxiliary?": 5},
+    preserve_edge_attrs={"auxiliary": {"capacity": float("inf")}},
+)
+def edge_disjoint_paths(
+    G, s, t, flow_func=None, cutoff=None, auxiliary=None, residual=None
+):
     """Returns the edges disjoint paths between source and target.

     Edge disjoint paths are paths that do not share any edge. The
@@ -138,14 +152,93 @@ def edge_disjoint_paths(G, s, t, flow_func=None, cutoff=None, auxiliary=
     package.

     """
-    pass
-
-
-@nx._dispatchable(graphs={'G': 0, 'auxiliary?': 5}, preserve_node_attrs={
-    'auxiliary': {'id': None}}, preserve_graph_attrs={'auxiliary'})
-def node_disjoint_paths(G, s, t, flow_func=None, cutoff=None, auxiliary=
-    None, residual=None):
-    """Computes node disjoint paths between source and target.
+    if s not in G:
+        raise nx.NetworkXError(f"node {s} not in graph")
+    if t not in G:
+        raise nx.NetworkXError(f"node {t} not in graph")
+
+    if flow_func is None:
+        flow_func = default_flow_func
+
+    if auxiliary is None:
+        H = build_auxiliary_edge_connectivity(G)
+    else:
+        H = auxiliary
+
+    # Maximum possible edge disjoint paths
+    possible = min(H.out_degree(s), H.in_degree(t))
+    if not possible:
+        raise NetworkXNoPath
+
+    if cutoff is None:
+        cutoff = possible
+    else:
+        cutoff = min(cutoff, possible)
+
+    # Compute maximum flow between source and target. Flow functions in
+    # NetworkX return a residual network.
+    kwargs = {
+        "capacity": "capacity",
+        "residual": residual,
+        "cutoff": cutoff,
+        "value_only": True,
+    }
+    if flow_func is preflow_push:
+        del kwargs["cutoff"]
+    if flow_func is shortest_augmenting_path:
+        kwargs["two_phase"] = True
+    R = flow_func(H, s, t, **kwargs)
+
+    if R.graph["flow_value"] == 0:
+        raise NetworkXNoPath
+
+    # Saturated edges in the residual network form the edge disjoint paths
+    # between source and target
+    cutset = [
+        (u, v)
+        for u, v, d in R.edges(data=True)
+        if d["capacity"] == d["flow"] and d["flow"] > 0
+    ]
+    # This is equivalent of what flow.utils.build_flow_dict returns, but
+    # only for the nodes with saturated edges and without reporting 0 flows.
+    flow_dict = {n: {} for edge in cutset for n in edge}
+    for u, v in cutset:
+        flow_dict[u][v] = 1
+
+    # Rebuild the edge disjoint paths from the flow dictionary.
+    paths_found = 0
+    for v in list(flow_dict[s]):
+        if paths_found >= cutoff:
+            # preflow_push does not support cutoff: we have to
+            # keep track of the paths founds and stop at cutoff.
+            break
+        path = [s]
+        if v == t:
+            path.append(v)
+            yield path
+            continue
+        u = v
+        while u != t:
+            path.append(u)
+            try:
+                u, _ = flow_dict[u].popitem()
+            except KeyError:
+                break
+        else:
+            path.append(t)
+            yield path
+            paths_found += 1
+
+
+@nx._dispatchable(
+    graphs={"G": 0, "auxiliary?": 5},
+    preserve_node_attrs={"auxiliary": {"id": None}},
+    preserve_graph_attrs={"auxiliary"},
+)
+def node_disjoint_paths(
+    G, s, t, flow_func=None, cutoff=None, auxiliary=None, residual=None
+):
+    r"""Computes node disjoint paths between source and target.

     Node disjoint paths are paths that only share their first and last
     nodes. The number of node independent paths between two nodes is
@@ -264,9 +357,51 @@ def node_disjoint_paths(G, s, t, flow_func=None, cutoff=None, auxiliary=
     :meth:`shortest_augmenting_path`

     """
-    pass
+    if s not in G:
+        raise nx.NetworkXError(f"node {s} not in graph")
+    if t not in G:
+        raise nx.NetworkXError(f"node {t} not in graph")
+
+    if auxiliary is None:
+        H = build_auxiliary_node_connectivity(G)
+    else:
+        H = auxiliary
+
+    mapping = H.graph.get("mapping", None)
+    if mapping is None:
+        raise nx.NetworkXError("Invalid auxiliary digraph.")
+
+    # Maximum possible edge disjoint paths
+    possible = min(H.out_degree(f"{mapping[s]}B"), H.in_degree(f"{mapping[t]}A"))
+    if not possible:
+        raise NetworkXNoPath
+
+    if cutoff is None:
+        cutoff = possible
+    else:
+        cutoff = min(cutoff, possible)
+
+    kwargs = {
+        "flow_func": flow_func,
+        "residual": residual,
+        "auxiliary": H,
+        "cutoff": cutoff,
+    }
+
+    # The edge disjoint paths in the auxiliary digraph correspond to the node
+    # disjoint paths in the original graph.
+    paths_edges = edge_disjoint_paths(H, f"{mapping[s]}B", f"{mapping[t]}A", **kwargs)
+    for path in paths_edges:
+        # Each node in the original graph maps to two nodes in auxiliary graph
+        yield list(_unique_everseen(H.nodes[node]["id"] for node in path))


 def _unique_everseen(iterable):
-    """List unique elements, preserving order. Remember all elements ever seen."""
-    pass
+    # Adapted from https://docs.python.org/3/library/itertools.html examples
+    "List unique elements, preserving order. Remember all elements ever seen."
+    # unique_everseen('AAAABBBCCDAABBB') --> A B C D
+    seen = set()
+    seen_add = seen.add
+    for element in _filterfalse(seen.__contains__, iterable):
+        seen_add(element)
+        yield element
diff --git a/networkx/algorithms/connectivity/edge_augmentation.py b/networkx/algorithms/connectivity/edge_augmentation.py
index bffa31877..d095ed519 100644
--- a/networkx/algorithms/connectivity/edge_augmentation.py
+++ b/networkx/algorithms/connectivity/edge_augmentation.py
@@ -15,14 +15,15 @@ See Also
 import itertools as it
 import math
 from collections import defaultdict, namedtuple
+
 import networkx as nx
 from networkx.utils import not_implemented_for, py_random_state
-__all__ = ['k_edge_augmentation', 'is_k_edge_connected',
-    'is_locally_k_edge_connected']
+
+__all__ = ["k_edge_augmentation", "is_k_edge_connected", "is_locally_k_edge_connected"]


-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def is_k_edge_connected(G, k):
     """Tests to see if a graph is k-edge-connected.
@@ -55,11 +56,25 @@ def is_k_edge_connected(G, k):
     >>> nx.is_k_edge_connected(G, k=2)
     False
     """
-    pass
-
-
-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+    if k < 1:
+        raise ValueError(f"k must be positive, not {k}")
+    # First try to quickly determine if G is not k-edge-connected
+    if G.number_of_nodes() < k + 1:
+        return False
+    elif any(d < k for n, d in G.degree()):
+        return False
+    else:
+        # Otherwise perform the full check
+        if k == 1:
+            return nx.is_connected(G)
+        elif k == 2:
+            return nx.is_connected(G) and not nx.has_bridges(G)
+        else:
+            return nx.edge_connectivity(G, cutoff=k) >= k
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def is_locally_k_edge_connected(G, s, t, k):
     """Tests to see if an edge in a graph is locally k-edge-connected.
@@ -101,11 +116,23 @@ def is_locally_k_edge_connected(G, s, t, k):
     >>> is_locally_k_edge_connected(G, 1, 5, k=2)
     True
     """
-    pass
-
-
-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+    if k < 1:
+        raise ValueError(f"k must be positive, not {k}")
+
+    # First try to quickly determine s, t is not k-locally-edge-connected in G
+    if G.degree(s) < k or G.degree(t) < k:
+        return False
+    else:
+        # Otherwise perform the full check
+        if k == 1:
+            return nx.has_path(G, s, t)
+        else:
+            localk = nx.connectivity.local_edge_connectivity(G, s, t, cutoff=k)
+            return localk >= k
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def k_edge_augmentation(G, k, avail=None, weight=None, partial=False):
     """Finds set of edges to k-edge-connect G.
@@ -217,7 +244,44 @@ def k_edge_augmentation(G, k, avail=None, weight=None, partial=False):
     >>> sorted(nx.k_edge_augmentation(G, k=2, avail=avail, partial=True))
     [(1, 5)]
     """
-    pass
+    try:
+        if k <= 0:
+            raise ValueError(f"k must be a positive integer, not {k}")
+        elif G.number_of_nodes() < k + 1:
+            msg = f"impossible to {k} connect in graph with less than {k + 1} nodes"
+            raise nx.NetworkXUnfeasible(msg)
+        elif avail is not None and len(avail) == 0:
+            if not nx.is_k_edge_connected(G, k):
+                raise nx.NetworkXUnfeasible("no available edges")
+            aug_edges = []
+        elif k == 1:
+            aug_edges = one_edge_augmentation(
+                G, avail=avail, weight=weight, partial=partial
+            )
+        elif k == 2:
+            aug_edges = bridge_augmentation(G, avail=avail, weight=weight)
+        else:
+            # raise NotImplementedError(f'not implemented for k>2. k={k}')
+            aug_edges = greedy_k_edge_augmentation(
+                G, k=k, avail=avail, weight=weight, seed=0
+            )
+        # Do eager evaluation so we can catch any exceptions
+        # Before executing partial code.
+        yield from list(aug_edges)
+    except nx.NetworkXUnfeasible:
+        if partial:
+            # Return all available edges
+            if avail is None:
+                aug_edges = complement_edges(G)
+            else:
+                # If we can't k-edge-connect the entire graph, try to
+                # k-edge-connect as much as possible
+                aug_edges = partial_k_edge_augmentation(
+                    G, k=k, avail=avail, weight=weight
+                )
+            yield from aug_edges
+        else:
+            raise


 @nx._dispatchable
@@ -273,11 +337,56 @@ def partial_k_edge_augmentation(G, k, avail, weight=None):
     >>> sorted(partial_k_edge_augmentation(G, k=2, avail=avail))
     [(1, 5), (1, 8)]
     """
-    pass
-

-@not_implemented_for('multigraph')
-@not_implemented_for('directed')
+    def _edges_between_disjoint(H, only1, only2):
+        """finds edges between disjoint nodes"""
+        only1_adj = {u: set(H.adj[u]) for u in only1}
+        for u, neighbs in only1_adj.items():
+            # Find the neighbors of u in only1 that are also in only2
+            neighbs12 = neighbs.intersection(only2)
+            for v in neighbs12:
+                yield (u, v)
+
+    avail_uv, avail_w = _unpack_available_edges(avail, weight=weight, G=G)
+
+    # Find which parts of the graph can be k-edge-connected
+    H = G.copy()
+    H.add_edges_from(
+        (
+            (u, v, {"weight": w, "generator": (u, v)})
+            for (u, v), w in zip(avail, avail_w)
+        )
+    )
+    k_edge_subgraphs = list(nx.k_edge_subgraphs(H, k=k))
+
+    # Generate edges to k-edge-connect internal subgraphs
+    for nodes in k_edge_subgraphs:
+        if len(nodes) > 1:
+            # Get the k-edge-connected subgraph
+            C = H.subgraph(nodes).copy()
+            # Find the internal edges that were available
+            sub_avail = {
+                d["generator"]: d["weight"]
+                for (u, v, d) in C.edges(data=True)
+                if "generator" in d
+            }
+            # Remove potential augmenting edges
+            C.remove_edges_from(sub_avail.keys())
+            # Find a subset of these edges that makes the component
+            # k-edge-connected and ignore the rest
+            yield from nx.k_edge_augmentation(C, k=k, avail=sub_avail)
+
+    # Generate all edges between CCs that could not be k-edge-connected
+    for cc1, cc2 in it.combinations(k_edge_subgraphs, 2):
+        for u, v in _edges_between_disjoint(H, cc1, cc2):
+            d = H.get_edge_data(u, v)
+            edge = d.get("generator", None)
+            if edge is not None:
+                yield edge
+
+
+@not_implemented_for("multigraph")
+@not_implemented_for("directed")
 @nx._dispatchable
 def one_edge_augmentation(G, avail=None, weight=None, partial=False):
     """Finds minimum weight set of edges to connect G.
@@ -323,11 +432,16 @@ def one_edge_augmentation(G, avail=None, weight=None, partial=False):
     --------
     :func:`k_edge_augmentation`
     """
-    pass
+    if avail is None:
+        return unconstrained_one_edge_augmentation(G)
+    else:
+        return weighted_one_edge_augmentation(
+            G, avail=avail, weight=weight, partial=partial
+        )


-@not_implemented_for('multigraph')
-@not_implemented_for('directed')
+@not_implemented_for("multigraph")
+@not_implemented_for("directed")
 @nx._dispatchable
 def bridge_augmentation(G, avail=None, weight=None):
     """Finds the a set of edges that bridge connects G.
@@ -370,20 +484,49 @@ def bridge_augmentation(G, avail=None, weight=None):
     --------
     :func:`k_edge_augmentation`
     """
-    pass
+    if G.number_of_nodes() < 3:
+        raise nx.NetworkXUnfeasible("impossible to bridge connect less than 3 nodes")
+    if avail is None:
+        return unconstrained_bridge_augmentation(G)
+    else:
+        return weighted_bridge_augmentation(G, avail, weight=weight)
+
+
+# --- Algorithms and Helpers ---


 def _ordered(u, v):
     """Returns the nodes in an undirected edge in lower-triangular order"""
-    pass
+    return (u, v) if u < v else (v, u)


 def _unpack_available_edges(avail, weight=None, G=None):
     """Helper to separate avail into edges and corresponding weights"""
-    pass
+    if weight is None:
+        weight = "weight"
+    if isinstance(avail, dict):
+        avail_uv = list(avail.keys())
+        avail_w = list(avail.values())
+    else:
+
+        def _try_getitem(d):
+            try:
+                return d[weight]
+            except TypeError:
+                return d

+        avail_uv = [tup[0:2] for tup in avail]
+        avail_w = [1 if len(tup) == 2 else _try_getitem(tup[-1]) for tup in avail]

-MetaEdge = namedtuple('MetaEdge', ('meta_uv', 'uv', 'w'))
+    if G is not None:
+        # Edges already in the graph are filtered
+        flags = [not G.has_edge(u, v) for u, v in avail_uv]
+        avail_uv = list(it.compress(avail_uv, flags))
+        avail_w = list(it.compress(avail_w, flags))
+    return avail_uv, avail_w
+
+
+MetaEdge = namedtuple("MetaEdge", ("meta_uv", "uv", "w"))


 def _lightest_meta_edges(mapping, avail_uv, avail_w):
@@ -419,7 +562,20 @@ def _lightest_meta_edges(mapping, avail_uv, avail_w):
     >>> sorted(_lightest_meta_edges(mapping, avail_uv, avail_w))
     [MetaEdge(meta_uv=(0, 1), uv=(5, 2), w=15), MetaEdge(meta_uv=(0, 2), uv=(6, 1), w=50)]
     """
-    pass
+    grouped_wuv = defaultdict(list)
+    for w, (u, v) in zip(avail_w, avail_uv):
+        # Order the meta-edge so it can be used as a dict key
+        meta_uv = _ordered(mapping[u], mapping[v])
+        # Group each available edge using the meta-edge as a key
+        grouped_wuv[meta_uv].append((w, u, v))
+
+    # Now that all available edges are grouped, choose one per group
+    for (mu, mv), choices_wuv in grouped_wuv.items():
+        # Ignore available edges within the same meta-node
+        if mu != mv:
+            # Choose the lightest available edge belonging to each meta-edge
+            w, u, v = min(choices_wuv)
+            yield MetaEdge((mu, mv), (u, v), w)


 @nx._dispatchable
@@ -451,7 +607,18 @@ def unconstrained_one_edge_augmentation(G):
     >>> sorted(unconstrained_one_edge_augmentation(G))
     [(1, 4), (4, 6), (6, 7), (7, 8)]
     """
-    pass
+    ccs1 = list(nx.connected_components(G))
+    C = collapse(G, ccs1)
+    # When we are not constrained, we can just make a meta graph tree.
+    meta_nodes = list(C.nodes())
+    # build a path in the metagraph
+    meta_aug = list(zip(meta_nodes, meta_nodes[1:]))
+    # map that path to the original graph
+    inverse = defaultdict(list)
+    for k, v in C.graph["mapping"].items():
+        inverse[v].append(k)
+    for mu, mv in meta_aug:
+        yield (inverse[mu][0], inverse[mv][0])


 @nx._dispatchable
@@ -500,7 +667,27 @@ def weighted_one_edge_augmentation(G, avail, weight=None, partial=False):
     >>> sorted(weighted_one_edge_augmentation(G, avail))
     [(1, 5), (4, 7), (6, 1), (8, 2)]
     """
-    pass
+    avail_uv, avail_w = _unpack_available_edges(avail, weight=weight, G=G)
+    # Collapse CCs in the original graph into nodes in a metagraph
+    # Then find an MST of the metagraph instead of the original graph
+    C = collapse(G, nx.connected_components(G))
+    mapping = C.graph["mapping"]
+    # Assign each available edge to an edge in the metagraph
+    candidate_mapping = _lightest_meta_edges(mapping, avail_uv, avail_w)
+    # nx.set_edge_attributes(C, name='weight', values=0)
+    C.add_edges_from(
+        (mu, mv, {"weight": w, "generator": uv})
+        for (mu, mv), uv, w in candidate_mapping
+    )
+    # Find MST of the meta graph
+    meta_mst = nx.minimum_spanning_tree(C)
+    if not partial and not nx.is_connected(meta_mst):
+        raise nx.NetworkXUnfeasible("Not possible to connect G with available edges")
+    # Yield the edge that generated the meta-edge
+    for mu, mv, d in meta_mst.edges(data=True):
+        if "generator" in d:
+            edge = d["generator"]
+            yield edge


 @nx._dispatchable
@@ -580,7 +767,82 @@ def unconstrained_bridge_augmentation(G):
     >>> sorted(unconstrained_bridge_augmentation(G))
     [(1, 4), (4, 0)]
     """
-    pass
+    # -----
+    # Mapping of terms from (Eswaran and Tarjan):
+    #     G = G_0 - the input graph
+    #     C = G_0' - the bridge condensation of G. (This is a forest of trees)
+    #     A1 = A_1 - the edges to connect the forest into a tree
+    #         leaf = pendant - a node with degree of 1
+
+    #     alpha(v) = maps the node v in G to its meta-node in C
+    #     beta(x) = maps the meta-node x in C to any node in the bridge
+    #         component of G corresponding to x.
+
+    # find the 2-edge-connected components of G
+    bridge_ccs = list(nx.connectivity.bridge_components(G))
+    # condense G into an forest C
+    C = collapse(G, bridge_ccs)
+
+    # Choose pairs of distinct leaf nodes in each tree. If this is not
+    # possible then make a pair using the single isolated node in the tree.
+    vset1 = [
+        tuple(cc) * 2  # case1: an isolated node
+        if len(cc) == 1
+        else sorted(cc, key=C.degree)[0:2]  # case2: pair of leaf nodes
+        for cc in nx.connected_components(C)
+    ]
+    if len(vset1) > 1:
+        # Use this set to construct edges that connect C into a tree.
+        nodes1 = [vs[0] for vs in vset1]
+        nodes2 = [vs[1] for vs in vset1]
+        A1 = list(zip(nodes1[1:], nodes2))
+    else:
+        A1 = []
+    # Connect each tree in the forest to construct an arborescence
+    T = C.copy()
+    T.add_edges_from(A1)
+
+    # If there are only two leaf nodes, we simply connect them.
+    leafs = [n for n, d in T.degree() if d == 1]
+    if len(leafs) == 1:
+        A2 = []
+    if len(leafs) == 2:
+        A2 = [tuple(leafs)]
+    else:
+        # Choose an arbitrary non-leaf root
+        try:
+            root = next(n for n, d in T.degree() if d > 1)
+        except StopIteration:  # no nodes found with degree > 1
+            return
+        # order the leaves of C by (induced directed) preorder
+        v2 = [n for n in nx.dfs_preorder_nodes(T, root) if T.degree(n) == 1]
+        # connecting first half of the leafs in pre-order to the second
+        # half will bridge connect the tree with the fewest edges.
+        half = math.ceil(len(v2) / 2)
+        A2 = list(zip(v2[:half], v2[-half:]))
+
+    # collect the edges used to augment the original forest
+    aug_tree_edges = A1 + A2
+
+    # Construct the mapping (beta) from meta-nodes to regular nodes
+    inverse = defaultdict(list)
+    for k, v in C.graph["mapping"].items():
+        inverse[v].append(k)
+    # sort so we choose minimum degree nodes first
+    inverse = {
+        mu: sorted(mapped, key=lambda u: (G.degree(u), u))
+        for mu, mapped in inverse.items()
+    }
+
+    # For each meta-edge, map back to an arbitrary pair in the original graph
+    G2 = G.copy()
+    for mu, mv in aug_tree_edges:
+        # Find the first available edge that doesn't exist and return it
+        for u, v in it.product(inverse[mu], inverse[mv]):
+            if not G2.has_edge(u, v):
+                G2.add_edge(u, v)
+                yield u, v
+                break


 @nx._dispatchable
@@ -648,7 +910,110 @@ def weighted_bridge_augmentation(G, avail, weight=None):
     >>> sorted(weighted_bridge_augmentation(G, avail=avail))
     [(1, 5), (2, 5), (4, 5)]
     """
-    pass
+
+    if weight is None:
+        weight = "weight"
+
+    # If input G is not connected the approximation factor increases to 3
+    if not nx.is_connected(G):
+        H = G.copy()
+        connectors = list(one_edge_augmentation(H, avail=avail, weight=weight))
+        H.add_edges_from(connectors)
+
+        yield from connectors
+    else:
+        connectors = []
+        H = G
+
+    if len(avail) == 0:
+        if nx.has_bridges(H):
+            raise nx.NetworkXUnfeasible("no augmentation possible")
+
+    avail_uv, avail_w = _unpack_available_edges(avail, weight=weight, G=H)
+
+    # Collapse input into a metagraph. Meta nodes are bridge-ccs
+    bridge_ccs = nx.connectivity.bridge_components(H)
+    C = collapse(H, bridge_ccs)
+
+    # Use the meta graph to shrink avail to a small feasible subset
+    mapping = C.graph["mapping"]
+    # Choose the minimum weight feasible edge in each group
+    meta_to_wuv = {
+        (mu, mv): (w, uv)
+        for (mu, mv), uv, w in _lightest_meta_edges(mapping, avail_uv, avail_w)
+    }
+
+    # Mapping of terms from (Khuller and Thurimella):
+    #     C         : G_0 = (V, E^0)
+    #        This is the metagraph where each node is a 2-edge-cc in G.
+    #        The edges in C represent bridges in the original graph.
+    #     (mu, mv)  : E - E^0  # they group both avail and given edges in E
+    #     T         : \Gamma
+    #     D         : G^D = (V, E_D)
+
+    #     The paper uses ancestor because children point to parents, which is
+    #     contrary to networkx standards.  So, we actually need to run
+    #     nx.least_common_ancestor on the reversed Tree.
+
+    # Pick an arbitrary leaf from C as the root
+    try:
+        root = next(n for n, d in C.degree() if d == 1)
+    except StopIteration:  # no nodes found with degree == 1
+        return
+    # Root C into a tree TR by directing all edges away from the root
+    # Note in their paper T directs edges towards the root
+    TR = nx.dfs_tree(C, root)
+
+    # Add to D the directed edges of T and set their weight to zero
+    # This indicates that it costs nothing to use edges that were given.
+    D = nx.reverse(TR).copy()
+
+    nx.set_edge_attributes(D, name="weight", values=0)
+
+    # The LCA of mu and mv in T is the shared ancestor of mu and mv that is
+    # located farthest from the root.
+    lca_gen = nx.tree_all_pairs_lowest_common_ancestor(
+        TR, root=root, pairs=meta_to_wuv.keys()
+    )
+
+    for (mu, mv), lca in lca_gen:
+        w, uv = meta_to_wuv[(mu, mv)]
+        if lca == mu:
+            # If u is an ancestor of v in TR, then add edge u->v to D
+            D.add_edge(lca, mv, weight=w, generator=uv)
+        elif lca == mv:
+            # If v is an ancestor of u in TR, then add edge v->u to D
+            D.add_edge(lca, mu, weight=w, generator=uv)
+        else:
+            # If neither u nor v is a ancestor of the other in TR
+            # let t = lca(TR, u, v) and add edges t->u and t->v
+            # Track the original edge that GENERATED these edges.
+            D.add_edge(lca, mu, weight=w, generator=uv)
+            D.add_edge(lca, mv, weight=w, generator=uv)
+
+    # Then compute a minimum rooted branching
+    try:
+        # Note the original edges must be directed towards to root for the
+        # branching to give us a bridge-augmentation.
+        A = _minimum_rooted_branching(D, root)
+    except nx.NetworkXException as err:
+        # If there is no branching then augmentation is not possible
+        raise nx.NetworkXUnfeasible("no 2-edge-augmentation possible") from err
+
+    # For each edge e, in the branching that did not belong to the directed
+    # tree T, add the corresponding edge that **GENERATED** it (this is not
+    # necessarily e itself!)
+
+    # ensure the third case does not generate edges twice
+    bridge_connectors = set()
+    for mu, mv in A.edges():
+        data = D.get_edge_data(mu, mv)
+        if "generator" in data:
+            # Add the avail edge that generated the branching edge.
+            edge = data["generator"]
+            bridge_connectors.add(edge)
+
+    yield from bridge_connectors


 def _minimum_rooted_branching(D, root):
@@ -667,7 +1032,12 @@ def _minimum_rooted_branching(D, root):
     [1] Khuller, Samir (2002) Advanced Algorithms Lecture 24 Notes.
     https://web.archive.org/web/20121030033722/https://www.cs.umd.edu/class/spring2011/cmsc651/lec07.pdf
     """
-    pass
+    rooted = D.copy()
+    # root the graph by removing all predecessors to `root`.
+    rooted.remove_edges_from([(u, root) for u in D.predecessors(root)])
+    # Then compute the branching / arborescence.
+    A = nx.minimum_spanning_arborescence(rooted)
+    return A


 @nx._dispatchable(returns_graph=True)
@@ -712,7 +1082,34 @@ def collapse(G, grouped_nodes):
     >>> assert {5, 6, 7} in member_values
     >>> assert {"A"} in member_values
     """
-    pass
+    mapping = {}
+    members = {}
+    C = G.__class__()
+    i = 0  # required if G is empty
+    remaining = set(G.nodes())
+    for i, group in enumerate(grouped_nodes):
+        group = set(group)
+        assert remaining.issuperset(
+            group
+        ), "grouped nodes must exist in G and be disjoint"
+        remaining.difference_update(group)
+        members[i] = group
+        mapping.update((n, i) for n in group)
+    # remaining nodes are in their own group
+    for i, node in enumerate(remaining, start=i + 1):
+        group = {node}
+        members[i] = group
+        mapping.update((n, i) for n in group)
+    number_of_groups = i + 1
+    C.add_nodes_from(range(number_of_groups))
+    C.add_edges_from(
+        (mapping[u], mapping[v]) for u, v in G.edges() if mapping[u] != mapping[v]
+    )
+    # Add a list of members (ie original nodes) to each node (ie scc) in C.
+    nx.set_node_attributes(C, name="members", values=members)
+    # Add mapping dict as graph attribute
+    C.graph["mapping"] = mapping
+    return C


 @nx._dispatchable
@@ -740,16 +1137,26 @@ def complement_edges(G):
     >>> sorted(complement_edges(G))
     []
     """
-    pass
+    G_adj = G._adj  # Store as a variable to eliminate attribute lookup
+    if G.is_directed():
+        for u, v in it.combinations(G.nodes(), 2):
+            if v not in G_adj[u]:
+                yield (u, v)
+            if u not in G_adj[v]:
+                yield (v, u)
+    else:
+        for u, v in it.combinations(G.nodes(), 2):
+            if v not in G_adj[u]:
+                yield (u, v)


 def _compat_shuffle(rng, input):
     """wrapper around rng.shuffle for python 2 compatibility reasons"""
-    pass
+    rng.shuffle(input)


-@not_implemented_for('multigraph')
-@not_implemented_for('directed')
+@not_implemented_for("multigraph")
+@not_implemented_for("directed")
 @py_random_state(4)
 @nx._dispatchable
 def greedy_k_edge_augmentation(G, k, avail=None, weight=None, seed=None):
@@ -808,4 +1215,55 @@ def greedy_k_edge_augmentation(G, k, avail=None, weight=None, seed=None):
     >>> sorted(greedy_k_edge_augmentation(G, k=4, avail=avail, seed=3))
     [(1, 3), (1, 5), (1, 6), (2, 4), (2, 6), (3, 7), (4, 7), (5, 7)]
     """
-    pass
+    # Result set
+    aug_edges = []
+
+    done = is_k_edge_connected(G, k)
+    if done:
+        return
+    if avail is None:
+        # all edges are available
+        avail_uv = list(complement_edges(G))
+        avail_w = [1] * len(avail_uv)
+    else:
+        # Get the unique set of unweighted edges
+        avail_uv, avail_w = _unpack_available_edges(avail, weight=weight, G=G)
+
+    # Greedy: order lightest edges. Use degree sum to tie-break
+    tiebreaker = [sum(map(G.degree, uv)) for uv in avail_uv]
+    avail_wduv = sorted(zip(avail_w, tiebreaker, avail_uv))
+    avail_uv = [uv for w, d, uv in avail_wduv]
+
+    # Incrementally add edges in until we are k-connected
+    H = G.copy()
+    for u, v in avail_uv:
+        done = False
+        if not is_locally_k_edge_connected(H, u, v, k=k):
+            # Only add edges in parts that are not yet locally k-edge-connected
+            aug_edges.append((u, v))
+            H.add_edge(u, v)
+            # Did adding this edge help?
+            if H.degree(u) >= k and H.degree(v) >= k:
+                done = is_k_edge_connected(H, k)
+        if done:
+            break
+
+    # Check for feasibility
+    if not done:
+        raise nx.NetworkXUnfeasible("not able to k-edge-connect with available edges")
+
+    # Randomized attempt to reduce the size of the solution
+    _compat_shuffle(seed, aug_edges)
+    for u, v in list(aug_edges):
+        # Don't remove if we know it would break connectivity
+        if H.degree(u) <= k or H.degree(v) <= k:
+            continue
+        H.remove_edge(u, v)
+        aug_edges.remove((u, v))
+        if not is_k_edge_connected(H, k=k):
+            # If removing this edge breaks feasibility, undo
+            H.add_edge(u, v)
+            aug_edges.append((u, v))
+
+    # Generate results
+    yield from aug_edges
diff --git a/networkx/algorithms/connectivity/edge_kcomponents.py b/networkx/algorithms/connectivity/edge_kcomponents.py
index 35329f643..e071f4d3d 100644
--- a/networkx/algorithms/connectivity/edge_kcomponents.py
+++ b/networkx/algorithms/connectivity/edge_kcomponents.py
@@ -10,13 +10,19 @@ least k.
 """
 import itertools as it
 from functools import partial
+
 import networkx as nx
 from networkx.utils import arbitrary_element, not_implemented_for
-__all__ = ['k_edge_components', 'k_edge_subgraphs', 'bridge_components',
-    'EdgeComponentAuxGraph']
+
+__all__ = [
+    "k_edge_components",
+    "k_edge_subgraphs",
+    "bridge_components",
+    "EdgeComponentAuxGraph",
+]


-@not_implemented_for('multigraph')
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def k_edge_components(G, k):
     """Generates nodes in each maximal k-edge-connected component in G.
@@ -80,10 +86,27 @@ def k_edge_components(G, k):
         k-edge-connected components.
         http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0136264
     """
-    pass
-
-
-@not_implemented_for('multigraph')
+    # Compute k-edge-ccs using the most efficient algorithms available.
+    if k < 1:
+        raise ValueError("k cannot be less than 1")
+    if G.is_directed():
+        if k == 1:
+            return nx.strongly_connected_components(G)
+        else:
+            # TODO: investigate https://arxiv.org/abs/1412.6466 for k=2
+            aux_graph = EdgeComponentAuxGraph.construct(G)
+            return aux_graph.k_edge_components(k)
+    else:
+        if k == 1:
+            return nx.connected_components(G)
+        elif k == 2:
+            return bridge_components(G)
+        else:
+            aux_graph = EdgeComponentAuxGraph.construct(G)
+            return aux_graph.k_edge_components(k)
+
+
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def k_edge_subgraphs(G, k):
     """Generates nodes in each maximal k-edge-connected subgraph in G.
@@ -144,7 +167,22 @@ def k_edge_subgraphs(G, k):
         Technology 2012 480-–491.
         https://openproceedings.org/2012/conf/edbt/ZhouLYLCL12.pdf
     """
-    pass
+    if k < 1:
+        raise ValueError("k cannot be less than 1")
+    if G.is_directed():
+        if k <= 1:
+            # For directed graphs ,
+            # When k == 1, k-edge-ccs and k-edge-subgraphs are the same
+            return k_edge_components(G, k)
+        else:
+            return _k_edge_subgraphs_nodes(G, k)
+    else:
+        if k <= 2:
+            # For undirected graphs,
+            # when k <= 2, k-edge-ccs and k-edge-subgraphs are the same
+            return k_edge_components(G, k)
+        else:
+            return _k_edge_subgraphs_nodes(G, k)


 def _k_edge_subgraphs_nodes(G, k):
@@ -152,11 +190,12 @@ def _k_edge_subgraphs_nodes(G, k):

     This allows k_edge_subgraphs to return a generator.
     """
-    pass
+    for C in general_k_edge_subgraphs(G, k):
+        yield set(C.nodes())


-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def bridge_components(G):
     """Finds all bridge-connected components G.
@@ -194,11 +233,13 @@ def bridge_components(G):
     >>> sorted(map(sorted, bridge_components(G)))
     [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
     """
-    pass
+    H = G.copy()
+    H.remove_edges_from(nx.bridges(G))
+    yield from nx.connected_components(H)


 class EdgeComponentAuxGraph:
-    """A simple algorithm to find all k-edge-connected components in a graph.
+    r"""A simple algorithm to find all k-edge-connected components in a graph.

     Constructing the auxiliary graph (which may take some time) allows for the
     k-edge-ccs to be found in linear time for arbitrary k.
@@ -207,7 +248,7 @@ class EdgeComponentAuxGraph:
     -----
     This implementation is based on [1]_. The idea is to construct an auxiliary
     graph from which the k-edge-ccs can be extracted in linear time. The
-    auxiliary graph is constructed in $O(|V|\\cdot F)$ operations, where F is the
+    auxiliary graph is constructed in $O(|V|\cdot F)$ operations, where F is the
     complexity of max flow. Querying the components takes an additional $O(|V|)$
     operations. This algorithm can be slow for large graphs, but it handles an
     arbitrary k and works for both directed and undirected inputs.
@@ -269,6 +310,7 @@ class EdgeComponentAuxGraph:
     [[1, 4], [2], [3]]
     """

+    # @not_implemented_for('multigraph')  # TODO: fix decor for classmethods
     @classmethod
     def construct(EdgeComponentAuxGraph, G):
         """Builds an auxiliary graph encoding edge-connectivity between nodes.
@@ -290,7 +332,52 @@ class EdgeComponentAuxGraph:
         ----------
         G : NetworkX graph
         """
-        pass
+        # workaround for classmethod decorator
+        not_implemented_for("multigraph")(lambda G: G)(G)
+
+        def _recursive_build(H, A, source, avail):
+            # Terminate once the flow has been compute to every node.
+            if {source} == avail:
+                return
+            # pick an arbitrary node as the sink
+            sink = arbitrary_element(avail - {source})
+            # find the minimum cut and its weight
+            value, (S, T) = nx.minimum_cut(H, source, sink)
+            if H.is_directed():
+                # check if the reverse direction has a smaller cut
+                value_, (T_, S_) = nx.minimum_cut(H, sink, source)
+                if value_ < value:
+                    value, S, T = value_, S_, T_
+            # add edge with weight of cut to the aux graph
+            A.add_edge(source, sink, weight=value)
+            # recursively call until all but one node is used
+            _recursive_build(H, A, source, avail.intersection(S))
+            _recursive_build(H, A, sink, avail.intersection(T))
+
+        # Copy input to ensure all edges have unit capacity
+        H = G.__class__()
+        H.add_nodes_from(G.nodes())
+        H.add_edges_from(G.edges(), capacity=1)
+
+        # A is the auxiliary graph to be constructed
+        # It is a weighted undirected tree
+        A = nx.Graph()
+
+        # Pick an arbitrary node as the source
+        if H.number_of_nodes() > 0:
+            source = arbitrary_element(H.nodes())
+            # Initialize a set of elements that can be chosen as the sink
+            avail = set(H.nodes())
+
+            # This constructs A
+            _recursive_build(H, A, source, avail)
+
+        # This class is a container the holds the auxiliary graph A and
+        # provides access the k_edge_components function.
+        self = EdgeComponentAuxGraph()
+        self.A = A
+        self.H = H
+        return self

     def k_edge_components(self, k):
         """Queries the auxiliary graph for k-edge-connected components.
@@ -311,7 +398,19 @@ class EdgeComponentAuxGraph:
         k from the auxiliary graph.  The resulting connected components are the
         k-edge-ccs in the original graph.
         """
-        pass
+        if k < 1:
+            raise ValueError("k cannot be less than 1")
+        A = self.A
+        # "traverse the auxiliary graph A and delete all edges with weights less
+        # than k"
+        aux_weights = nx.get_edge_attributes(A, "weight")
+        # Create a relevant graph with the auxiliary edges with weights >= k
+        R = nx.Graph()
+        R.add_nodes_from(A.nodes())
+        R.add_edges_from(e for e, w in aux_weights.items() if w >= k)
+
+        # Return the nodes that are k-edge-connected in the original graph
+        yield from nx.connected_components(R)

     def k_edge_subgraphs(self, k):
         """Queries the auxiliary graph for k-edge-connected subgraphs.
@@ -334,12 +433,49 @@ class EdgeComponentAuxGraph:
         But for multiple values of k, it can be faster to build AuxGraph and
         then use this method.
         """
-        pass
+        if k < 1:
+            raise ValueError("k cannot be less than 1")
+        H = self.H
+        A = self.A
+        # "traverse the auxiliary graph A and delete all edges with weights less
+        # than k"
+        aux_weights = nx.get_edge_attributes(A, "weight")
+        # Create a relevant graph with the auxiliary edges with weights >= k
+        R = nx.Graph()
+        R.add_nodes_from(A.nodes())
+        R.add_edges_from(e for e, w in aux_weights.items() if w >= k)
+
+        # Return the components whose subgraphs are k-edge-connected
+        for cc in nx.connected_components(R):
+            if len(cc) < k:
+                # Early return optimization
+                for node in cc:
+                    yield {node}
+            else:
+                # Call subgraph solution to refine the results
+                C = H.subgraph(cc)
+                yield from k_edge_subgraphs(C, k)


 def _low_degree_nodes(G, k, nbunch=None):
     """Helper for finding nodes with degree less than k."""
-    pass
+    # Nodes with degree less than k cannot be k-edge-connected.
+    if G.is_directed():
+        # Consider both in and out degree in the directed case
+        seen = set()
+        for node, degree in G.out_degree(nbunch):
+            if degree < k:
+                seen.add(node)
+                yield node
+        for node, degree in G.in_degree(nbunch):
+            if node not in seen and degree < k:
+                seen.add(node)
+                yield node
+    else:
+        # Only the degree matters in the undirected case
+        for node, degree in G.degree(nbunch):
+            if degree < k:
+                yield node


 def _high_degree_components(G, k):
@@ -348,7 +484,23 @@ def _high_degree_components(G, k):
     Removes and generates each node with degree less than k.  Then generates
     remaining components where all nodes have degree at least k.
     """
-    pass
+    # Iteratively remove parts of the graph that are not k-edge-connected
+    H = G.copy()
+    singletons = set(_low_degree_nodes(H, k))
+    while singletons:
+        # Only search neighbors of removed nodes
+        nbunch = set(it.chain.from_iterable(map(H.neighbors, singletons)))
+        nbunch.difference_update(singletons)
+        H.remove_nodes_from(singletons)
+        for node in singletons:
+            yield {node}
+        singletons = set(_low_degree_nodes(H, k, nbunch))
+
+    # Note: remaining connected components may not be k-edge-connected
+    if G.is_directed():
+        yield from nx.strongly_connected_components(H)
+    else:
+        yield from nx.connected_components(H)


 @nx._dispatchable(returns_graph=True)
@@ -405,4 +557,35 @@ def general_k_edge_subgraphs(G, k):
     >>> sorted(len(k_sg) for k_sg in k_edge_subgraphs(G, k=3))
     [1, 1, 1, 4, 4]
     """
-    pass
+    if k < 1:
+        raise ValueError("k cannot be less than 1")
+
+    # Node pruning optimization (incorporates early return)
+    # find_ccs is either connected_components/strongly_connected_components
+    find_ccs = partial(_high_degree_components, k=k)
+
+    # Quick return optimization
+    if G.number_of_nodes() < k:
+        for node in G.nodes():
+            yield G.subgraph([node]).copy()
+        return
+
+    # Intermediate results
+    R0 = {G.subgraph(cc).copy() for cc in find_ccs(G)}
+    # Subdivide CCs in the intermediate results until they are k-conn
+    while R0:
+        G1 = R0.pop()
+        if G1.number_of_nodes() == 1:
+            yield G1
+        else:
+            # Find a global minimum cut
+            cut_edges = nx.minimum_edge_cut(G1)
+            cut_value = len(cut_edges)
+            if cut_value < k:
+                # G1 is not k-edge-connected, so subdivide it
+                G1.remove_edges_from(cut_edges)
+                for cc in find_ccs(G1):
+                    R0.add(G1.subgraph(cc).copy())
+            else:
+                # Otherwise we found a k-edge-connected subgraph
+                yield G1
diff --git a/networkx/algorithms/connectivity/kcomponents.py b/networkx/algorithms/connectivity/kcomponents.py
index b08258abf..50d5c8f41 100644
--- a/networkx/algorithms/connectivity/kcomponents.py
+++ b/networkx/algorithms/connectivity/kcomponents.py
@@ -4,17 +4,22 @@ Moody and White algorithm for k-components
 from collections import defaultdict
 from itertools import combinations
 from operator import itemgetter
+
 import networkx as nx
+
+# Define the default maximum flow function.
 from networkx.algorithms.flow import edmonds_karp
 from networkx.utils import not_implemented_for
+
 default_flow_func = edmonds_karp
-__all__ = ['k_components']
+
+__all__ = ["k_components"]


-@not_implemented_for('directed')
+@not_implemented_for("directed")
 @nx._dispatchable
 def k_components(G, flow_func=None):
-    """Returns the k-component structure of a graph G.
+    r"""Returns the k-component structure of a graph G.

     A `k`-component is a maximal subgraph of a graph G that has, at least,
     node connectivity `k`: we need to remove at least `k` nodes to break it
@@ -98,7 +103,56 @@ def k_components(G, flow_func=None):
             https://arxiv.org/pdf/1503.04476v1

     """
-    pass
+    # Dictionary with connectivity level (k) as keys and a list of
+    # sets of nodes that form a k-component as values. Note that
+    # k-components can overlap (but only k - 1 nodes).
+    k_components = defaultdict(list)
+    # Define default flow function
+    if flow_func is None:
+        flow_func = default_flow_func
+    # Bicomponents as a base to check for higher order k-components
+    for component in nx.connected_components(G):
+        # isolated nodes have connectivity 0
+        comp = set(component)
+        if len(comp) > 1:
+            k_components[1].append(comp)
+    bicomponents = [G.subgraph(c) for c in nx.biconnected_components(G)]
+    for bicomponent in bicomponents:
+        bicomp = set(bicomponent)
+        # avoid considering dyads as bicomponents
+        if len(bicomp) > 2:
+            k_components[2].append(bicomp)
+    for B in bicomponents:
+        if len(B) <= 2:
+            continue
+        k = nx.node_connectivity(B, flow_func=flow_func)
+        if k > 2:
+            k_components[k].append(set(B))
+        # Perform cuts in a DFS like order.
+        cuts = list(nx.all_node_cuts(B, k=k, flow_func=flow_func))
+        stack = [(k, _generate_partition(B, cuts, k))]
+        while stack:
+            (parent_k, partition) = stack[-1]
+            try:
+                nodes = next(partition)
+                C = B.subgraph(nodes)
+                this_k = nx.node_connectivity(C, flow_func=flow_func)
+                if this_k > parent_k and this_k > 2:
+                    k_components[this_k].append(set(C))
+                cuts = list(nx.all_node_cuts(C, k=this_k, flow_func=flow_func))
+                if cuts:
+                    stack.append((this_k, _generate_partition(C, cuts, this_k)))
+            except StopIteration:
+                stack.pop()
+
+    # This is necessary because k-components may only be reported at their
+    # maximum k level. But we want to return a dictionary in which keys are
+    # connectivity levels and values list of sets of components, without
+    # skipping any connectivity level. Also, it's possible that subsets of
+    # an already detected k-component appear at a level k. Checking for this
+    # in the while loop above penalizes the common case. Thus we also have to
+    # _consolidate all connectivity levels in _reconstruct_k_components.
+    return _reconstruct_k_components(k_components)


 def _consolidate(sets, k):
@@ -113,4 +167,56 @@ def _consolidate(sets, k):
     is no licence for the code.

     """
-    pass
+    G = nx.Graph()
+    nodes = dict(enumerate(sets))
+    G.add_nodes_from(nodes)
+    G.add_edges_from(
+        (u, v) for u, v in combinations(nodes, 2) if len(nodes[u] & nodes[v]) >= k
+    )
+    for component in nx.connected_components(G):
+        yield set.union(*[nodes[n] for n in component])
+
+
+def _generate_partition(G, cuts, k):
+    def has_nbrs_in_partition(G, node, partition):
+        return any(n in partition for n in G[node])
+
+    components = []
+    nodes = {n for n, d in G.degree() if d > k} - {n for cut in cuts for n in cut}
+    H = G.subgraph(nodes)
+    for cc in nx.connected_components(H):
+        component = set(cc)
+        for cut in cuts:
+            for node in cut:
+                if has_nbrs_in_partition(G, node, cc):
+                    component.add(node)
+        if len(component) < G.order():
+            components.append(component)
+    yield from _consolidate(components, k + 1)
+
+
+def _reconstruct_k_components(k_comps):
+    result = {}
+    max_k = max(k_comps)
+    for k in reversed(range(1, max_k + 1)):
+        if k == max_k:
+            result[k] = list(_consolidate(k_comps[k], k))
+        elif k not in k_comps:
+            result[k] = list(_consolidate(result[k + 1], k))
+        else:
+            nodes_at_k = set.union(*k_comps[k])
+            to_add = [c for c in result[k + 1] if any(n not in nodes_at_k for n in c)]
+            if to_add:
+                result[k] = list(_consolidate(k_comps[k] + to_add, k))
+            else:
+                result[k] = list(_consolidate(k_comps[k], k))
+    return result
+
+
+def build_k_number_dict(kcomps):
+    result = {}
+    for k, comps in sorted(kcomps.items(), key=itemgetter(0)):
+        for comp in comps:
+            for node in comp:
+                result[node] = k
+    return result
diff --git a/networkx/algorithms/connectivity/kcutsets.py b/networkx/algorithms/connectivity/kcutsets.py
index 3163b3093..53f8d3b8f 100644
--- a/networkx/algorithms/connectivity/kcutsets.py
+++ b/networkx/algorithms/connectivity/kcutsets.py
@@ -5,16 +5,25 @@ import copy
 from collections import defaultdict
 from itertools import combinations
 from operator import itemgetter
+
 import networkx as nx
-from networkx.algorithms.flow import build_residual_network, edmonds_karp, shortest_augmenting_path
+from networkx.algorithms.flow import (
+    build_residual_network,
+    edmonds_karp,
+    shortest_augmenting_path,
+)
+
 from .utils import build_auxiliary_node_connectivity
+
 default_flow_func = edmonds_karp
-__all__ = ['all_node_cuts']
+
+
+__all__ = ["all_node_cuts"]


 @nx._dispatchable
 def all_node_cuts(G, k=None, flow_func=None):
-    """Returns all minimum k cutsets of an undirected graph G.
+    r"""Returns all minimum k cutsets of an undirected graph G.

     This implementation is based on Kanevsky's algorithm [1]_ for finding all
     minimum-size node cut-sets of an undirected graph G; ie the set (or sets)
@@ -79,9 +88,147 @@ def all_node_cuts(G, k=None, flow_func=None):
             http://onlinelibrary.wiley.com/doi/10.1002/net.3230230604/abstract

     """
-    pass
+    if not nx.is_connected(G):
+        raise nx.NetworkXError("Input graph is disconnected.")
+
+    # Address some corner cases first.
+    # For complete Graphs
+
+    if nx.density(G) == 1:
+        yield from ()
+        return
+
+    # Initialize data structures.
+    # Keep track of the cuts already computed so we do not repeat them.
+    seen = []
+    # Even-Tarjan reduction is what we call auxiliary digraph
+    # for node connectivity.
+    H = build_auxiliary_node_connectivity(G)
+    H_nodes = H.nodes  # for speed
+    mapping = H.graph["mapping"]
+    # Keep a copy of original predecessors, H will be modified later.
+    # Shallow copy is enough.
+    original_H_pred = copy.copy(H._pred)
+    R = build_residual_network(H, "capacity")
+    kwargs = {"capacity": "capacity", "residual": R}
+    # Define default flow function
+    if flow_func is None:
+        flow_func = default_flow_func
+    if flow_func is shortest_augmenting_path:
+        kwargs["two_phase"] = True
+    # Begin the actual algorithm
+    # step 1: Find node connectivity k of G
+    if k is None:
+        k = nx.node_connectivity(G, flow_func=flow_func)
+    # step 2:
+    # Find k nodes with top degree, call it X:
+    X = {n for n, d in sorted(G.degree(), key=itemgetter(1), reverse=True)[:k]}
+    # Check if X is a k-node-cutset
+    if _is_separating_set(G, X):
+        seen.append(X)
+        yield X
+
+    for x in X:
+        # step 3: Compute local connectivity flow of x with all other
+        # non adjacent nodes in G
+        non_adjacent = set(G) - {x} - set(G[x])
+        for v in non_adjacent:
+            # step 4: compute maximum flow in an Even-Tarjan reduction H of G
+            # and step 5: build the associated residual network R
+            R = flow_func(H, f"{mapping[x]}B", f"{mapping[v]}A", **kwargs)
+            flow_value = R.graph["flow_value"]
+
+            if flow_value == k:
+                # Find the nodes incident to the flow.
+                E1 = flowed_edges = [
+                    (u, w) for (u, w, d) in R.edges(data=True) if d["flow"] != 0
+                ]
+                VE1 = incident_nodes = {n for edge in E1 for n in edge}
+                # Remove saturated edges form the residual network.
+                # Note that reversed edges are introduced with capacity 0
+                # in the residual graph and they need to be removed too.
+                saturated_edges = [
+                    (u, w, d)
+                    for (u, w, d) in R.edges(data=True)
+                    if d["capacity"] == d["flow"] or d["capacity"] == 0
+                ]
+                R.remove_edges_from(saturated_edges)
+                R_closure = nx.transitive_closure(R)
+                # step 6: shrink the strongly connected components of
+                # residual flow network R and call it L.
+                L = nx.condensation(R)
+                cmap = L.graph["mapping"]
+                inv_cmap = defaultdict(list)
+                for n, scc in cmap.items():
+                    inv_cmap[scc].append(n)
+                # Find the incident nodes in the condensed graph.
+                VE1 = {cmap[n] for n in VE1}
+                # step 7: Compute all antichains of L;
+                # they map to closed sets in H.
+                # Any edge in H that links a closed set is part of a cutset.
+                for antichain in nx.antichains(L):
+                    # Only antichains that are subsets of incident nodes counts.
+                    # Lemma 8 in reference.
+                    if not set(antichain).issubset(VE1):
+                        continue
+                    # Nodes in an antichain of the condensation graph of
+                    # the residual network map to a closed set of nodes that
+                    # define a node partition of the auxiliary digraph H
+                    # through taking all of antichain's predecessors in the
+                    # transitive closure.
+                    S = set()
+                    for scc in antichain:
+                        S.update(inv_cmap[scc])
+                    S_ancestors = set()
+                    for n in S:
+                        S_ancestors.update(R_closure._pred[n])
+                    S.update(S_ancestors)
+                    if f"{mapping[x]}B" not in S or f"{mapping[v]}A" in S:
+                        continue
+                    # Find the cutset that links the node partition (S,~S) in H
+                    cutset = set()
+                    for u in S:
+                        cutset.update((u, w) for w in original_H_pred[u] if w not in S)
+                    # The edges in H that form the cutset are internal edges
+                    # (ie edges that represent a node of the original graph G)
+                    if any(H_nodes[u]["id"] != H_nodes[w]["id"] for u, w in cutset):
+                        continue
+                    node_cut = {H_nodes[u]["id"] for u, _ in cutset}
+
+                    if len(node_cut) == k:
+                        # The cut is invalid if it includes internal edges of
+                        # end nodes. The other half of Lemma 8 in ref.
+                        if x in node_cut or v in node_cut:
+                            continue
+                        if node_cut not in seen:
+                            yield node_cut
+                            seen.append(node_cut)
+
+                # Add an edge (x, v) to make sure that we do not
+                # find this cutset again. This is equivalent
+                # of adding the edge in the input graph
+                # G.add_edge(x, v) and then regenerate H and R:
+                # Add edges to the auxiliary digraph.
+                # See build_residual_network for convention we used
+                # in residual graphs.
+                H.add_edge(f"{mapping[x]}B", f"{mapping[v]}A", capacity=1)
+                H.add_edge(f"{mapping[v]}B", f"{mapping[x]}A", capacity=1)
+                # Add edges to the residual network.
+                R.add_edge(f"{mapping[x]}B", f"{mapping[v]}A", capacity=1)
+                R.add_edge(f"{mapping[v]}A", f"{mapping[x]}B", capacity=0)
+                R.add_edge(f"{mapping[v]}B", f"{mapping[x]}A", capacity=1)
+                R.add_edge(f"{mapping[x]}A", f"{mapping[v]}B", capacity=0)
+
+                # Add again the saturated edges to reuse the residual network
+                R.add_edges_from(saturated_edges)


 def _is_separating_set(G, cut):
     """Assumes that the input graph is connected"""
-    pass
+    if len(cut) == len(G) - 1:
+        return True
+
+    H = nx.restricted_view(G, cut, [])
+    if nx.is_connected(H):
+        return False
+    return True
diff --git a/networkx/algorithms/connectivity/stoerwagner.py b/networkx/algorithms/connectivity/stoerwagner.py
index 2e4999cd0..f6814b003 100644
--- a/networkx/algorithms/connectivity/stoerwagner.py
+++ b/networkx/algorithms/connectivity/stoerwagner.py
@@ -2,16 +2,19 @@
 Stoer-Wagner minimum cut algorithm.
 """
 from itertools import islice
+
 import networkx as nx
+
 from ...utils import BinaryHeap, arbitrary_element, not_implemented_for
-__all__ = ['stoer_wagner']
+
+__all__ = ["stoer_wagner"]


-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
-@nx._dispatchable(edge_attrs='weight')
-def stoer_wagner(G, weight='weight', heap=BinaryHeap):
-    """Returns the weighted minimum edge cut using the Stoer-Wagner algorithm.
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
+@nx._dispatchable(edge_attrs="weight")
+def stoer_wagner(G, weight="weight", heap=BinaryHeap):
+    r"""Returns the weighted minimum edge cut using the Stoer-Wagner algorithm.

     Determine the minimum edge cut of a connected graph using the
     Stoer-Wagner algorithm. In weighted cases, all weights must be
@@ -22,9 +25,9 @@ def stoer_wagner(G, weight='weight', heap=BinaryHeap):
     ============== =============================================
     Type of heap   Running time
     ============== =============================================
-    Binary heap    $O(n (m + n) \\log n)$
-    Fibonacci heap $O(nm + n^2 \\log n)$
-    Pairing heap   $O(2^{2 \\sqrt{\\log \\log n}} nm + n^2 \\log n)$
+    Binary heap    $O(n (m + n) \log n)$
+    Fibonacci heap $O(nm + n^2 \log n)$
+    Pairing heap   $O(2^{2 \sqrt{\log \log n}} nm + n^2 \log n)$
     ============== =============================================

     Parameters
@@ -81,4 +84,68 @@ def stoer_wagner(G, weight='weight', heap=BinaryHeap):
     >>> cut_value
     4
     """
-    pass
+    n = len(G)
+    if n < 2:
+        raise nx.NetworkXError("graph has less than two nodes.")
+    if not nx.is_connected(G):
+        raise nx.NetworkXError("graph is not connected.")
+
+    # Make a copy of the graph for internal use.
+    G = nx.Graph(
+        (u, v, {"weight": e.get(weight, 1)}) for u, v, e in G.edges(data=True) if u != v
+    )
+    G.__networkx_cache__ = None  # Disable caching
+
+    for u, v, e in G.edges(data=True):
+        if e["weight"] < 0:
+            raise nx.NetworkXError("graph has a negative-weighted edge.")
+
+    cut_value = float("inf")
+    nodes = set(G)
+    contractions = []  # contracted node pairs
+
+    # Repeatedly pick a pair of nodes to contract until only one node is left.
+    for i in range(n - 1):
+        # Pick an arbitrary node u and create a set A = {u}.
+        u = arbitrary_element(G)
+        A = {u}
+        # Repeatedly pick the node "most tightly connected" to A and add it to
+        # A. The tightness of connectivity of a node not in A is defined by the
+        # of edges connecting it to nodes in A.
+        h = heap()  # min-heap emulating a max-heap
+        for v, e in G[u].items():
+            h.insert(v, -e["weight"])
+        # Repeat until all but one node has been added to A.
+        for j in range(n - i - 2):
+            u = h.pop()[0]
+            A.add(u)
+            for v, e in G[u].items():
+                if v not in A:
+                    h.insert(v, h.get(v, 0) - e["weight"])
+        # A and the remaining node v define a "cut of the phase". There is a
+        # minimum cut of the original graph that is also a cut of the phase.
+        # Due to contractions in earlier phases, v may in fact represent
+        # multiple nodes in the original graph.
+        v, w = h.min()
+        w = -w
+        if w < cut_value:
+            cut_value = w
+            best_phase = i
+        # Contract v and the last node added to A.
+        contractions.append((u, v))
+        for w, e in G[v].items():
+            if w != u:
+                if w not in G[u]:
+                    G.add_edge(u, w, weight=e["weight"])
+                else:
+                    G[u][w]["weight"] += e["weight"]
+        G.remove_node(v)
+
+    # Recover the optimal partitioning from the contractions.
+    G = nx.Graph(islice(contractions, best_phase))
+    v = contractions[best_phase][1]
+    G.add_node(v)
+    reachable = set(nx.single_source_shortest_path_length(G, v))
+    partition = (list(reachable), list(nodes - reachable))
+
+    return cut_value, partition
diff --git a/networkx/algorithms/connectivity/utils.py b/networkx/algorithms/connectivity/utils.py
index 1c3f15d8f..a4d822ae5 100644
--- a/networkx/algorithms/connectivity/utils.py
+++ b/networkx/algorithms/connectivity/utils.py
@@ -2,13 +2,13 @@
 Utilities for connectivity package
 """
 import networkx as nx
-__all__ = ['build_auxiliary_node_connectivity',
-    'build_auxiliary_edge_connectivity']
+
+__all__ = ["build_auxiliary_node_connectivity", "build_auxiliary_edge_connectivity"]


 @nx._dispatchable(returns_graph=True)
 def build_auxiliary_node_connectivity(G):
-    """Creates a directed graph D from an undirected graph G to compute flow
+    r"""Creates a directed graph D from an undirected graph G to compute flow
     based node connectivity.

     For an undirected graph G having `n` nodes and `m` edges we derive a
@@ -36,7 +36,27 @@ def build_auxiliary_node_connectivity(G):
         https://doi.org/10.1007/978-3-540-31955-9_7

     """
-    pass
+    directed = G.is_directed()
+
+    mapping = {}
+    H = nx.DiGraph()
+
+    for i, node in enumerate(G):
+        mapping[node] = i
+        H.add_node(f"{i}A", id=node)
+        H.add_node(f"{i}B", id=node)
+        H.add_edge(f"{i}A", f"{i}B", capacity=1)
+
+    edges = []
+    for source, target in G.edges():
+        edges.append((f"{mapping[source]}B", f"{mapping[target]}A"))
+        if not directed:
+            edges.append((f"{mapping[target]}B", f"{mapping[source]}A"))
+    H.add_edges_from(edges, capacity=1)
+
+    # Store mapping as graph attribute
+    H.graph["mapping"] = mapping
+    return H


 @nx._dispatchable(returns_graph=True)
@@ -54,4 +74,14 @@ def build_auxiliary_edge_connectivity(G):
         chapter, look for the reference of the book).
         http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf
     """
-    pass
+    if G.is_directed():
+        H = nx.DiGraph()
+        H.add_nodes_from(G.nodes())
+        H.add_edges_from(G.edges(), capacity=1)
+        return H
+    else:
+        H = nx.DiGraph()
+        H.add_nodes_from(G.nodes())
+        for source, target in G.edges():
+            H.add_edges_from([(source, target), (target, source)], capacity=1)
+        return H
diff --git a/networkx/algorithms/core.py b/networkx/algorithms/core.py
index 2c68fec5b..511e6d9d0 100644
--- a/networkx/algorithms/core.py
+++ b/networkx/algorithms/core.py
@@ -21,18 +21,27 @@ D-cores: Measuring Collaboration of Directed Graphs Based on Degeneracy
 Christos Giatsidis, Dimitrios M. Thilikos, Michalis Vazirgiannis, ICDM 2011.
 http://www.graphdegeneracy.org/dcores_ICDM_2011.pdf

-Multi-scale structure and topological anomaly detection via a new network statistic: The onion decomposition
+Multi-scale structure and topological anomaly detection via a new network \
+statistic: The onion decomposition
 L. Hébert-Dufresne, J. A. Grochow, and A. Allard
 Scientific Reports 6, 31708 (2016)
 http://doi.org/10.1038/srep31708

 """
 import networkx as nx
-__all__ = ['core_number', 'k_core', 'k_shell', 'k_crust', 'k_corona',
-    'k_truss', 'onion_layers']

+__all__ = [
+    "core_number",
+    "k_core",
+    "k_shell",
+    "k_crust",
+    "k_corona",
+    "k_truss",
+    "onion_layers",
+]

-@nx.utils.not_implemented_for('multigraph')
+
+@nx.utils.not_implemented_for("multigraph")
 @nx._dispatchable
 def core_number(G):
     """Returns the core number for each node.
@@ -79,7 +88,37 @@ def core_number(G):
        Vladimir Batagelj and Matjaz Zaversnik, 2003.
        https://arxiv.org/abs/cs.DS/0310049
     """
-    pass
+    if nx.number_of_selfloops(G) > 0:
+        msg = (
+            "Input graph has self loops which is not permitted; "
+            "Consider using G.remove_edges_from(nx.selfloop_edges(G))."
+        )
+        raise nx.NetworkXNotImplemented(msg)
+    degrees = dict(G.degree())
+    # Sort nodes by degree.
+    nodes = sorted(degrees, key=degrees.get)
+    bin_boundaries = [0]
+    curr_degree = 0
+    for i, v in enumerate(nodes):
+        if degrees[v] > curr_degree:
+            bin_boundaries.extend([i] * (degrees[v] - curr_degree))
+            curr_degree = degrees[v]
+    node_pos = {v: pos for pos, v in enumerate(nodes)}
+    # The initial guess for the core number of a node is its degree.
+    core = degrees
+    nbrs = {v: list(nx.all_neighbors(G, v)) for v in G}
+    for v in nodes:
+        for u in nbrs[v]:
+            if core[u] > core[v]:
+                nbrs[u].remove(v)
+                pos = node_pos[u]
+                bin_start = bin_boundaries[core[u]]
+                node_pos[u] = bin_start
+                node_pos[nodes[bin_start]] = pos
+                nodes[bin_start], nodes[pos] = nodes[pos], nodes[bin_start]
+                bin_boundaries[core[u]] += 1
+                core[u] -= 1
+    return core


 def _core_subgraph(G, k_filter, k=None, core=None):
@@ -101,7 +140,12 @@ def _core_subgraph(G, k_filter, k=None, core=None):
       If not specified, the core numbers will be computed from `G`.

     """
-    pass
+    if core is None:
+        core = core_number(G)
+    if k is None:
+        k = max(core.values())
+    nodes = (v for v in core if k_filter(v, k, core))
+    return G.subgraph(nodes).copy()


 @nx._dispatchable(preserve_all_attrs=True, returns_graph=True)
@@ -160,7 +204,24 @@ def k_core(G, k=None, core_number=None):
        Vladimir Batagelj and Matjaz Zaversnik,  2003.
        https://arxiv.org/abs/cs.DS/0310049
     """
-    pass
+
+    import warnings
+
+    if G.is_multigraph():
+        warnings.warn(
+            (
+                "\n\n`k_core` will not accept `MultiGraph` objects in version 3.5.\n"
+                "Convert it to an undirected graph instead, using::\n\n"
+                "\tG = nx.Graph(G)\n"
+            ),
+            category=DeprecationWarning,
+            stacklevel=5,
+        )
+
+    def k_filter(v, k, c):
+        return c[v] >= k
+
+    return _core_subgraph(G, k_filter, k, core_number)


 @nx._dispatchable(preserve_all_attrs=True, returns_graph=True)
@@ -225,7 +286,24 @@ def k_shell(G, k=None, core_number=None):
        and Eran Shir, PNAS  July 3, 2007   vol. 104  no. 27  11150-11154
        http://www.pnas.org/content/104/27/11150.full
     """
-    pass
+
+    import warnings
+
+    if G.is_multigraph():
+        warnings.warn(
+            (
+                "\n\n`k_shell` will not accept `MultiGraph` objects in version 3.5.\n"
+                "Convert it to an undirected graph instead, using::\n\n"
+                "\tG = nx.Graph(G)\n"
+            ),
+            category=DeprecationWarning,
+            stacklevel=5,
+        )
+
+    def k_filter(v, k, c):
+        return c[v] == k
+
+    return _core_subgraph(G, k_filter, k, core_number)


 @nx._dispatchable(preserve_all_attrs=True, returns_graph=True)
@@ -287,7 +365,28 @@ def k_crust(G, k=None, core_number=None):
        and Eran Shir, PNAS  July 3, 2007   vol. 104  no. 27  11150-11154
        http://www.pnas.org/content/104/27/11150.full
     """
-    pass
+
+    import warnings
+
+    if G.is_multigraph():
+        warnings.warn(
+            (
+                "\n\n`k_crust` will not accept `MultiGraph` objects in version 3.5.\n"
+                "Convert it to an undirected graph instead, using::\n\n"
+                "\tG = nx.Graph(G)\n"
+            ),
+            category=DeprecationWarning,
+            stacklevel=5,
+        )
+
+    # Default for k is one less than in _core_subgraph, so just inline.
+    #    Filter is c[v] <= k
+    if core_number is None:
+        core_number = nx.core_number(G)
+    if k is None:
+        k = max(core_number.values()) - 1
+    nodes = (v for v in core_number if core_number[v] <= k)
+    return G.subgraph(nodes).copy()


 @nx._dispatchable(preserve_all_attrs=True, returns_graph=True)
@@ -347,11 +446,28 @@ def k_corona(G, k, core_number=None):
        Phys. Rev. E 73, 056101 (2006)
        http://link.aps.org/doi/10.1103/PhysRevE.73.056101
     """
-    pass

+    import warnings
+
+    if G.is_multigraph():
+        warnings.warn(
+            (
+                "\n\n`k_corona` will not accept `MultiGraph` objects in version 3.5.\n"
+                "Convert it to an undirected graph instead, using::\n\n"
+                "\tG = nx.Graph(G)\n"
+            ),
+            category=DeprecationWarning,
+            stacklevel=5,
+        )
+
+    def func(v, k, c):
+        return c[v] == k and k == sum(1 for w in G[v] if c[w] >= k)

-@nx.utils.not_implemented_for('directed')
-@nx.utils.not_implemented_for('multigraph')
+    return _core_subgraph(G, func, k, core_number)
+
+
+@nx.utils.not_implemented_for("directed")
+@nx.utils.not_implemented_for("multigraph")
 @nx._dispatchable(preserve_all_attrs=True, returns_graph=True)
 def k_truss(G, k):
     """Returns the k-truss of `G`.
@@ -404,11 +520,36 @@ def k_truss(G, k):
     .. [2] Trusses: Cohesive Subgraphs for Social Network Analysis. Jonathan
        Cohen, 2005.
     """
-    pass
-
-
-@nx.utils.not_implemented_for('multigraph')
-@nx.utils.not_implemented_for('directed')
+    if nx.number_of_selfloops(G) > 0:
+        msg = (
+            "Input graph has self loops which is not permitted; "
+            "Consider using G.remove_edges_from(nx.selfloop_edges(G))."
+        )
+        raise nx.NetworkXNotImplemented(msg)
+
+    H = G.copy()
+
+    n_dropped = 1
+    while n_dropped > 0:
+        n_dropped = 0
+        to_drop = []
+        seen = set()
+        for u in H:
+            nbrs_u = set(H[u])
+            seen.add(u)
+            new_nbrs = [v for v in nbrs_u if v not in seen]
+            for v in new_nbrs:
+                if len(nbrs_u & set(H[v])) < (k - 2):
+                    to_drop.append((u, v))
+        H.remove_edges_from(to_drop)
+        n_dropped = len(to_drop)
+        H.remove_nodes_from(list(nx.isolates(H)))
+
+    return H
+
+
+@nx.utils.not_implemented_for("multigraph")
+@nx.utils.not_implemented_for("directed")
 @nx._dispatchable
 def onion_layers(G):
     """Returns the layer of each vertex in an onion decomposition of the graph.
@@ -458,4 +599,50 @@ def onion_layers(G):
        Physical Review X 9, 011023 (2019)
        http://doi.org/10.1103/PhysRevX.9.011023
     """
-    pass
+    if nx.number_of_selfloops(G) > 0:
+        msg = (
+            "Input graph contains self loops which is not permitted; "
+            "Consider using G.remove_edges_from(nx.selfloop_edges(G))."
+        )
+        raise nx.NetworkXNotImplemented(msg)
+    # Dictionaries to register the k-core/onion decompositions.
+    od_layers = {}
+    # Adjacency list
+    neighbors = {v: list(nx.all_neighbors(G, v)) for v in G}
+    # Effective degree of nodes.
+    degrees = dict(G.degree())
+    # Performs the onion decomposition.
+    current_core = 1
+    current_layer = 1
+    # Sets vertices of degree 0 to layer 1, if any.
+    isolated_nodes = list(nx.isolates(G))
+    if len(isolated_nodes) > 0:
+        for v in isolated_nodes:
+            od_layers[v] = current_layer
+            degrees.pop(v)
+        current_layer = 2
+    # Finds the layer for the remaining nodes.
+    while len(degrees) > 0:
+        # Sets the order for looking at nodes.
+        nodes = sorted(degrees, key=degrees.get)
+        # Sets properly the current core.
+        min_degree = degrees[nodes[0]]
+        if min_degree > current_core:
+            current_core = min_degree
+        # Identifies vertices in the current layer.
+        this_layer = []
+        for n in nodes:
+            if degrees[n] > current_core:
+                break
+            this_layer.append(n)
+        # Identifies the core/layer of the vertices in the current layer.
+        for v in this_layer:
+            od_layers[v] = current_layer
+            for n in neighbors[v]:
+                neighbors[n].remove(v)
+                degrees[n] = degrees[n] - 1
+            degrees.pop(v)
+        # Updates the layer count.
+        current_layer = current_layer + 1
+    # Returns the dictionaries containing the onion layer of each vertices.
+    return od_layers
diff --git a/networkx/algorithms/covering.py b/networkx/algorithms/covering.py
index 5ff7e7842..bed482bc4 100644
--- a/networkx/algorithms/covering.py
+++ b/networkx/algorithms/covering.py
@@ -1,13 +1,16 @@
 """ Functions related to graph covers."""
+
 from functools import partial
 from itertools import chain
+
 import networkx as nx
 from networkx.utils import arbitrary_element, not_implemented_for
-__all__ = ['min_edge_cover', 'is_edge_cover']
+
+__all__ = ["min_edge_cover", "is_edge_cover"]


-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def min_edge_cover(G, matching_algorithm=None):
     """Returns the min cardinality edge cover of the graph as a set of edges.
@@ -68,10 +71,41 @@ def min_edge_cover(G, matching_algorithm=None):
     simply this function with a default matching algorithm of
     :func:`~networkx.algorithms.bipartite.matching.hopcraft_karp_matching`
     """
-    pass
-
-
-@not_implemented_for('directed')
+    if len(G) == 0:
+        return set()
+    if nx.number_of_isolates(G) > 0:
+        # ``min_cover`` does not exist as there is an isolated node
+        raise nx.NetworkXException(
+            "Graph has a node with no edge incident on it, so no edge cover exists."
+        )
+    if matching_algorithm is None:
+        matching_algorithm = partial(nx.max_weight_matching, maxcardinality=True)
+    maximum_matching = matching_algorithm(G)
+    # ``min_cover`` is superset of ``maximum_matching``
+    try:
+        # bipartite matching algs return dict so convert if needed
+        min_cover = set(maximum_matching.items())
+        bipartite_cover = True
+    except AttributeError:
+        min_cover = maximum_matching
+        bipartite_cover = False
+    # iterate for uncovered nodes
+    uncovered_nodes = set(G) - {v for u, v in min_cover} - {u for u, v in min_cover}
+    for v in uncovered_nodes:
+        # Since `v` is uncovered, each edge incident to `v` will join it
+        # with a covered node (otherwise, if there were an edge joining
+        # uncovered nodes `u` and `v`, the maximum matching algorithm
+        # would have found it), so we can choose an arbitrary edge
+        # incident to `v`. (This applies only in a simple graph, not a
+        # multigraph.)
+        u = arbitrary_element(G[v])
+        min_cover.add((u, v))
+        if bipartite_cover:
+            min_cover.add((v, u))
+    return min_cover
+
+
+@not_implemented_for("directed")
 @nx._dispatchable
 def is_edge_cover(G, cover):
     """Decides whether a set of edges is a valid edge cover of the graph.
@@ -105,4 +139,4 @@ def is_edge_cover(G, cover):
     An edge cover of a graph is a set of edges such that every node of
     the graph is incident to at least one edge of the set.
     """
-    pass
+    return set(G) <= set(chain.from_iterable(cover))
diff --git a/networkx/algorithms/cuts.py b/networkx/algorithms/cuts.py
index 4b07b7798..d7d54e7bb 100644
--- a/networkx/algorithms/cuts.py
+++ b/networkx/algorithms/cuts.py
@@ -1,14 +1,27 @@
 """Functions for finding and evaluating cuts in a graph.

 """
+
 from itertools import chain
+
 import networkx as nx
-__all__ = ['boundary_expansion', 'conductance', 'cut_size',
-    'edge_expansion', 'mixing_expansion', 'node_expansion',
-    'normalized_cut_size', 'volume']
+
+__all__ = [
+    "boundary_expansion",
+    "conductance",
+    "cut_size",
+    "edge_expansion",
+    "mixing_expansion",
+    "node_expansion",
+    "normalized_cut_size",
+    "volume",
+]
+
+
+# TODO STILL NEED TO UPDATE ALL THE DOCUMENTATION!


-@nx._dispatchable(edge_attrs='weight')
+@nx._dispatchable(edge_attrs="weight")
 def cut_size(G, S, T=None, weight=None):
     """Returns the size of the cut between two sets of nodes.

@@ -65,10 +78,13 @@ def cut_size(G, S, T=None, weight=None):
     multiplicity.

     """
-    pass
+    edges = nx.edge_boundary(G, S, T, data=weight, default=1)
+    if G.is_directed():
+        edges = chain(edges, nx.edge_boundary(G, T, S, data=weight, default=1))
+    return sum(weight for u, v, weight in edges)


-@nx._dispatchable(edge_attrs='weight')
+@nx._dispatchable(edge_attrs="weight")
 def volume(G, S, weight=None):
     """Returns the volume of a set of nodes.

@@ -107,10 +123,11 @@ def volume(G, S, weight=None):
            <https://www.cs.purdue.edu/homes/dgleich/publications/Gleich%202005%20-%20hierarchical%20directed%20spectral.pdf>

     """
-    pass
+    degree = G.out_degree if G.is_directed() else G.degree
+    return sum(d for v, d in degree(S, weight=weight))


-@nx._dispatchable(edge_attrs='weight')
+@nx._dispatchable(edge_attrs="weight")
 def normalized_cut_size(G, S, T=None, weight=None):
     """Returns the normalized size of the cut between two sets of nodes.

@@ -155,10 +172,15 @@ def normalized_cut_size(G, S, T=None, weight=None):
            <https://www.cs.purdue.edu/homes/dgleich/publications/Gleich%202005%20-%20hierarchical%20directed%20spectral.pdf>

     """
-    pass
+    if T is None:
+        T = set(G) - set(S)
+    num_cut_edges = cut_size(G, S, T=T, weight=weight)
+    volume_S = volume(G, S, weight=weight)
+    volume_T = volume(G, T, weight=weight)
+    return num_cut_edges * ((1 / volume_S) + (1 / volume_T))


-@nx._dispatchable(edge_attrs='weight')
+@nx._dispatchable(edge_attrs="weight")
 def conductance(G, S, T=None, weight=None):
     """Returns the conductance of two sets of nodes.

@@ -198,10 +220,15 @@ def conductance(G, S, T=None, weight=None):
            <https://www.cs.purdue.edu/homes/dgleich/publications/Gleich%202005%20-%20hierarchical%20directed%20spectral.pdf>

     """
-    pass
+    if T is None:
+        T = set(G) - set(S)
+    num_cut_edges = cut_size(G, S, T, weight=weight)
+    volume_S = volume(G, S, weight=weight)
+    volume_T = volume(G, T, weight=weight)
+    return num_cut_edges / min(volume_S, volume_T)


-@nx._dispatchable(edge_attrs='weight')
+@nx._dispatchable(edge_attrs="weight")
 def edge_expansion(G, S, T=None, weight=None):
     """Returns the edge expansion between two node sets.

@@ -242,10 +269,13 @@ def edge_expansion(G, S, T=None, weight=None):
            <http://www.math.ucsd.edu/~fan/research/revised.html>

     """
-    pass
+    if T is None:
+        T = set(G) - set(S)
+    num_cut_edges = cut_size(G, S, T=T, weight=weight)
+    return num_cut_edges / min(len(S), len(T))


-@nx._dispatchable(edge_attrs='weight')
+@nx._dispatchable(edge_attrs="weight")
 def mixing_expansion(G, S, T=None, weight=None):
     """Returns the mixing expansion between two node sets.

@@ -286,9 +316,13 @@ def mixing_expansion(G, S, T=None, weight=None):
            <https://doi.org/10.1561/0400000010>

     """
-    pass
+    num_cut_edges = cut_size(G, S, T=T, weight=weight)
+    num_total_edges = G.number_of_edges()
+    return num_cut_edges / (2 * num_total_edges)


+# TODO What is the generalization to two arguments, S and T? Does the
+# denominator become `min(len(S), len(T))`?
 @nx._dispatchable
 def node_expansion(G, S):
     """Returns the node expansion of the set `S`.
@@ -323,9 +357,12 @@ def node_expansion(G, S):
            <https://doi.org/10.1561/0400000010>

     """
-    pass
+    neighborhood = set(chain.from_iterable(G.neighbors(v) for v in S))
+    return len(neighborhood) / len(S)


+# TODO What is the generalization to two arguments, S and T? Does the
+# denominator become `min(len(S), len(T))`?
 @nx._dispatchable
 def boundary_expansion(G, S):
     """Returns the boundary expansion of the set `S`.
@@ -360,4 +397,4 @@ def boundary_expansion(G, S):
            <https://doi.org/10.1561/0400000010>

     """
-    pass
+    return len(nx.node_boundary(G, S)) / len(S)
diff --git a/networkx/algorithms/cycles.py b/networkx/algorithms/cycles.py
index a61f1b90a..14660ed52 100644
--- a/networkx/algorithms/cycles.py
+++ b/networkx/algorithms/cycles.py
@@ -3,17 +3,27 @@
 Cycle finding algorithms
 ========================
 """
+
 from collections import Counter, defaultdict
 from itertools import combinations, product
 from math import inf
+
 import networkx as nx
 from networkx.utils import not_implemented_for, pairwise
-__all__ = ['cycle_basis', 'simple_cycles', 'recursive_simple_cycles',
-    'find_cycle', 'minimum_cycle_basis', 'chordless_cycles', 'girth']

+__all__ = [
+    "cycle_basis",
+    "simple_cycles",
+    "recursive_simple_cycles",
+    "find_cycle",
+    "minimum_cycle_basis",
+    "chordless_cycles",
+    "girth",
+]

-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def cycle_basis(G, root=None):
     """Returns a list of cycles which form a basis for cycles of G.
@@ -58,7 +68,38 @@ def cycle_basis(G, root=None):
     simple_cycles
     minimum_cycle_basis
     """
-    pass
+    gnodes = dict.fromkeys(G)  # set-like object that maintains node order
+    cycles = []
+    while gnodes:  # loop over connected components
+        if root is None:
+            root = gnodes.popitem()[0]
+        stack = [root]
+        pred = {root: root}
+        used = {root: set()}
+        while stack:  # walk the spanning tree finding cycles
+            z = stack.pop()  # use last-in so cycles easier to find
+            zused = used[z]
+            for nbr in G[z]:
+                if nbr not in used:  # new node
+                    pred[nbr] = z
+                    stack.append(nbr)
+                    used[nbr] = {z}
+                elif nbr == z:  # self loops
+                    cycles.append([z])
+                elif nbr not in zused:  # found a cycle
+                    pn = used[nbr]
+                    cycle = [nbr, z]
+                    p = pred[z]
+                    while p not in pn:
+                        cycle.append(p)
+                        p = pred[p]
+                    cycle.append(p)
+                    cycles.append(cycle)
+                    used[nbr].add(z)
+        for node in pred:
+            gnodes.pop(node, None)
+        root = None
+    return cycles


 @nx._dispatchable
@@ -154,7 +195,47 @@ def simple_cycles(G, length_bound=None):
     cycle_basis
     chordless_cycles
     """
-    pass
+
+    if length_bound is not None:
+        if length_bound == 0:
+            return
+        elif length_bound < 0:
+            raise ValueError("length bound must be non-negative")
+
+    directed = G.is_directed()
+    yield from ([v] for v, Gv in G.adj.items() if v in Gv)
+
+    if length_bound is not None and length_bound == 1:
+        return
+
+    if G.is_multigraph() and not directed:
+        visited = set()
+        for u, Gu in G.adj.items():
+            multiplicity = ((v, len(Guv)) for v, Guv in Gu.items() if v in visited)
+            yield from ([u, v] for v, m in multiplicity if m > 1)
+            visited.add(u)
+
+    # explicitly filter out loops; implicitly filter out parallel edges
+    if directed:
+        G = nx.DiGraph((u, v) for u, Gu in G.adj.items() for v in Gu if v != u)
+    else:
+        G = nx.Graph((u, v) for u, Gu in G.adj.items() for v in Gu if v != u)
+
+    # this case is not strictly necessary but improves performance
+    if length_bound is not None and length_bound == 2:
+        if directed:
+            visited = set()
+            for u, Gu in G.adj.items():
+                yield from (
+                    [v, u] for v in visited.intersection(Gu) if G.has_edge(v, u)
+                )
+                visited.add(u)
+        return
+
+    if directed:
+        yield from _directed_cycle_search(G, length_bound)
+    else:
+        yield from _undirected_cycle_search(G, length_bound)


 def _directed_cycle_search(G, length_bound):
@@ -191,7 +272,20 @@ def _directed_cycle_search(G, length_bound):
     list of nodes
        Each cycle is represented by a list of nodes along the cycle.
     """
-    pass
+
+    scc = nx.strongly_connected_components
+    components = [c for c in scc(G) if len(c) >= 2]
+    while components:
+        c = components.pop()
+        Gc = G.subgraph(c)
+        v = next(iter(c))
+        if length_bound is None:
+            yield from _johnson_cycle_search(Gc, [v])
+        else:
+            yield from _bounded_cycle_search(Gc, [v], length_bound)
+        # delete v after searching G, to make sure we can find v
+        G.remove_node(v)
+        components.extend(c for c in scc(Gc) if len(c) >= 2)


 def _undirected_cycle_search(G, length_bound):
@@ -228,7 +322,20 @@ def _undirected_cycle_search(G, length_bound):
     list of nodes
        Each cycle is represented by a list of nodes along the cycle.
     """
-    pass
+
+    bcc = nx.biconnected_components
+    components = [c for c in bcc(G) if len(c) >= 3]
+    while components:
+        c = components.pop()
+        Gc = G.subgraph(c)
+        uv = list(next(iter(Gc.edges)))
+        G.remove_edge(*uv)
+        # delete (u, v) before searching G, to avoid fake 3-cycles [u, v, u]
+        if length_bound is None:
+            yield from _johnson_cycle_search(Gc, uv)
+        else:
+            yield from _bounded_cycle_search(Gc, uv, length_bound)
+        components.extend(c for c in bcc(Gc) if len(c) >= 3)


 class _NeighborhoodCache(dict):
@@ -270,7 +377,41 @@ def _johnson_cycle_search(G, path):
        https://doi.org/10.1137/0204007

     """
-    pass
+
+    G = _NeighborhoodCache(G)
+    blocked = set(path)
+    B = defaultdict(set)  # graph portions that yield no elementary circuit
+    start = path[0]
+    stack = [iter(G[path[-1]])]
+    closed = [False]
+    while stack:
+        nbrs = stack[-1]
+        for w in nbrs:
+            if w == start:
+                yield path[:]
+                closed[-1] = True
+            elif w not in blocked:
+                path.append(w)
+                closed.append(False)
+                stack.append(iter(G[w]))
+                blocked.add(w)
+                break
+        else:  # no more nbrs
+            stack.pop()
+            v = path.pop()
+            if closed.pop():
+                if closed:
+                    closed[-1] = True
+                unblock_stack = {v}
+                while unblock_stack:
+                    u = unblock_stack.pop()
+                    if u in blocked:
+                        blocked.remove(u)
+                        unblock_stack.update(B[u])
+                        B[u].clear()
+            else:
+                for w in G[v]:
+                    B[w].add(v)


 def _bounded_cycle_search(G, path, length_bound):
@@ -298,7 +439,40 @@ def _bounded_cycle_search(G, path, length_bound):
        A. Gupta and T. Suzumura https://arxiv.org/abs/2105.10094

     """
-    pass
+    G = _NeighborhoodCache(G)
+    lock = {v: 0 for v in path}
+    B = defaultdict(set)
+    start = path[0]
+    stack = [iter(G[path[-1]])]
+    blen = [length_bound]
+    while stack:
+        nbrs = stack[-1]
+        for w in nbrs:
+            if w == start:
+                yield path[:]
+                blen[-1] = 1
+            elif len(path) < lock.get(w, length_bound):
+                path.append(w)
+                blen.append(length_bound)
+                lock[w] = len(path)
+                stack.append(iter(G[w]))
+                break
+        else:
+            stack.pop()
+            v = path.pop()
+            bl = blen.pop()
+            if blen:
+                blen[-1] = min(blen[-1], bl)
+            if bl < length_bound:
+                relax_stack = [(bl, v)]
+                while relax_stack:
+                    bl, u = relax_stack.pop()
+                    if lock.get(u, length_bound) < length_bound - bl + 1:
+                        lock[u] = length_bound - bl + 1
+                        relax_stack.extend((bl + 1, w) for w in B[u].difference(path))
+            else:
+                for w in G[v]:
+                    B[w].add(v)


 @nx._dispatchable
@@ -391,7 +565,125 @@ def chordless_cycles(G, length_bound=None):
     --------
     simple_cycles
     """
-    pass
+
+    if length_bound is not None:
+        if length_bound == 0:
+            return
+        elif length_bound < 0:
+            raise ValueError("length bound must be non-negative")
+
+    directed = G.is_directed()
+    multigraph = G.is_multigraph()
+
+    if multigraph:
+        yield from ([v] for v, Gv in G.adj.items() if len(Gv.get(v, ())) == 1)
+    else:
+        yield from ([v] for v, Gv in G.adj.items() if v in Gv)
+
+    if length_bound is not None and length_bound == 1:
+        return
+
+    # Nodes with loops cannot belong to longer cycles.  Let's delete them here.
+    # also, we implicitly reduce the multiplicity of edges down to 1 in the case
+    # of multiedges.
+    if directed:
+        F = nx.DiGraph((u, v) for u, Gu in G.adj.items() if u not in Gu for v in Gu)
+        B = F.to_undirected(as_view=False)
+    else:
+        F = nx.Graph((u, v) for u, Gu in G.adj.items() if u not in Gu for v in Gu)
+        B = None
+
+    # If we're given a multigraph, we have a few cases to consider with parallel
+    # edges.
+    #
+    # 1. If we have 2 or more edges in parallel between the nodes (u, v), we
+    #    must not construct longer cycles along (u, v).
+    # 2. If G is not directed, then a pair of parallel edges between (u, v) is a
+    #    chordless cycle unless there exists a third (or more) parallel edge.
+    # 3. If G is directed, then parallel edges do not form cycles, but do
+    #    preclude back-edges from forming cycles (handled in the next section),
+    #    Thus, if an edge (u, v) is duplicated and the reverse (v, u) is also
+    #    present, then we remove both from F.
+    #
+    # In directed graphs, we need to consider both directions that edges can
+    # take, so iterate over all edges (u, v) and possibly (v, u).  In undirected
+    # graphs, we need to be a little careful to only consider every edge once,
+    # so we use a "visited" set to emulate node-order comparisons.
+
+    if multigraph:
+        if not directed:
+            B = F.copy()
+            visited = set()
+        for u, Gu in G.adj.items():
+            if directed:
+                multiplicity = ((v, len(Guv)) for v, Guv in Gu.items())
+                for v, m in multiplicity:
+                    if m > 1:
+                        F.remove_edges_from(((u, v), (v, u)))
+            else:
+                multiplicity = ((v, len(Guv)) for v, Guv in Gu.items() if v in visited)
+                for v, m in multiplicity:
+                    if m == 2:
+                        yield [u, v]
+                    if m > 1:
+                        F.remove_edge(u, v)
+                visited.add(u)
+
+    # If we're given a directed graphs, we need to think about digons.  If we
+    # have two edges (u, v) and (v, u), then that's a two-cycle.  If either edge
+    # was duplicated above, then we removed both from F.  So, any digons we find
+    # here are chordless.  After finding digons, we remove their edges from F
+    # to avoid traversing them in the search for chordless cycles.
+    if directed:
+        for u, Fu in F.adj.items():
+            digons = [[u, v] for v in Fu if F.has_edge(v, u)]
+            yield from digons
+            F.remove_edges_from(digons)
+            F.remove_edges_from(e[::-1] for e in digons)
+
+    if length_bound is not None and length_bound == 2:
+        return
+
+    # Now, we prepare to search for cycles.  We have removed all cycles of
+    # lengths 1 and 2, so F is a simple graph or simple digraph.  We repeatedly
+    # separate digraphs into their strongly connected components, and undirected
+    # graphs into their biconnected components.  For each component, we pick a
+    # node v, search for chordless cycles based at each "stem" (u, v, w), and
+    # then remove v from that component before separating the graph again.
+    if directed:
+        separate = nx.strongly_connected_components
+
+        # Directed stems look like (u -> v -> w), so we use the product of
+        # predecessors of v with successors of v.
+        def stems(C, v):
+            for u, w in product(C.pred[v], C.succ[v]):
+                if not G.has_edge(u, w):  # omit stems with acyclic chords
+                    yield [u, v, w], F.has_edge(w, u)
+
+    else:
+        separate = nx.biconnected_components
+
+        # Undirected stems look like (u ~ v ~ w), but we must not also search
+        # (w ~ v ~ u), so we use combinations of v's neighbors of length 2.
+        def stems(C, v):
+            yield from (([u, v, w], F.has_edge(w, u)) for u, w in combinations(C[v], 2))
+
+    components = [c for c in separate(F) if len(c) > 2]
+    while components:
+        c = components.pop()
+        v = next(iter(c))
+        Fc = F.subgraph(c)
+        Fcc = Bcc = None
+        for S, is_triangle in stems(Fc, v):
+            if is_triangle:
+                yield S
+            else:
+                if Fcc is None:
+                    Fcc = _NeighborhoodCache(Fc)
+                    Bcc = Fcc if B is None else _NeighborhoodCache(B.subgraph(c))
+                yield from _chordless_cycle_search(Fcc, Bcc, S, length_bound)
+
+        components.extend(c for c in separate(F.subgraph(c - {v})) if len(c) > 2)


 def _chordless_cycle_search(F, B, path, length_bound):
@@ -441,10 +733,37 @@ def _chordless_cycle_search(F, B, path, length_bound):
        https://arxiv.org/abs/1309.1051

     """
-    pass
-
-
-@not_implemented_for('undirected')
+    blocked = defaultdict(int)
+    target = path[0]
+    blocked[path[1]] = 1
+    for w in path[1:]:
+        for v in B[w]:
+            blocked[v] += 1
+
+    stack = [iter(F[path[2]])]
+    while stack:
+        nbrs = stack[-1]
+        for w in nbrs:
+            if blocked[w] == 1 and (length_bound is None or len(path) < length_bound):
+                Fw = F[w]
+                if target in Fw:
+                    yield path + [w]
+                else:
+                    Bw = B[w]
+                    if target in Bw:
+                        continue
+                    for v in Bw:
+                        blocked[v] += 1
+                    path.append(w)
+                    stack.append(iter(Fw))
+                    break
+        else:
+            stack.pop()
+            for v in B[path.pop()]:
+                blocked[v] -= 1
+
+
+@not_implemented_for("undirected")
 @nx._dispatchable(mutates_input=True)
 def recursive_simple_cycles(G):
     """Find simple cycles (elementary circuits) of a directed graph.
@@ -492,7 +811,67 @@ def recursive_simple_cycles(G):
     --------
     simple_cycles, cycle_basis
     """
-    pass
+
+    # Jon Olav Vik, 2010-08-09
+    def _unblock(thisnode):
+        """Recursively unblock and remove nodes from B[thisnode]."""
+        if blocked[thisnode]:
+            blocked[thisnode] = False
+            while B[thisnode]:
+                _unblock(B[thisnode].pop())
+
+    def circuit(thisnode, startnode, component):
+        closed = False  # set to True if elementary path is closed
+        path.append(thisnode)
+        blocked[thisnode] = True
+        for nextnode in component[thisnode]:  # direct successors of thisnode
+            if nextnode == startnode:
+                result.append(path[:])
+                closed = True
+            elif not blocked[nextnode]:
+                if circuit(nextnode, startnode, component):
+                    closed = True
+        if closed:
+            _unblock(thisnode)
+        else:
+            for nextnode in component[thisnode]:
+                if thisnode not in B[nextnode]:  # TODO: use set for speedup?
+                    B[nextnode].append(thisnode)
+        path.pop()  # remove thisnode from path
+        return closed
+
+    path = []  # stack of nodes in current path
+    blocked = defaultdict(bool)  # vertex: blocked from search?
+    B = defaultdict(list)  # graph portions that yield no elementary circuit
+    result = []  # list to accumulate the circuits found
+
+    # Johnson's algorithm exclude self cycle edges like (v, v)
+    # To be backward compatible, we record those cycles in advance
+    # and then remove from subG
+    for v in G:
+        if G.has_edge(v, v):
+            result.append([v])
+            G.remove_edge(v, v)
+
+    # Johnson's algorithm requires some ordering of the nodes.
+    # They might not be sortable so we assign an arbitrary ordering.
+    ordering = dict(zip(G, range(len(G))))
+    for s in ordering:
+        # Build the subgraph induced by s and following nodes in the ordering
+        subgraph = G.subgraph(node for node in G if ordering[node] >= ordering[s])
+        # Find the strongly connected component in the subgraph
+        # that contains the least node according to the ordering
+        strongcomp = nx.strongly_connected_components(subgraph)
+        mincomp = min(strongcomp, key=lambda ns: min(ordering[n] for n in ns))
+        component = G.subgraph(mincomp)
+        if len(component) > 1:
+            # smallest node in the component according to the ordering
+            startnode = min(component, key=ordering.__getitem__)
+            for node in component:
+                blocked[node] = False
+                B[node][:] = []
+            dummy = circuit(startnode, startnode, component)
+    return result


 @nx._dispatchable
@@ -563,12 +942,101 @@ def find_cycle(G, source=None, orientation=None):
     --------
     simple_cycles
     """
-    pass
-
-
-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
-@nx._dispatchable(edge_attrs='weight')
+    if not G.is_directed() or orientation in (None, "original"):
+
+        def tailhead(edge):
+            return edge[:2]
+
+    elif orientation == "reverse":
+
+        def tailhead(edge):
+            return edge[1], edge[0]
+
+    elif orientation == "ignore":
+
+        def tailhead(edge):
+            if edge[-1] == "reverse":
+                return edge[1], edge[0]
+            return edge[:2]
+
+    explored = set()
+    cycle = []
+    final_node = None
+    for start_node in G.nbunch_iter(source):
+        if start_node in explored:
+            # No loop is possible.
+            continue
+
+        edges = []
+        # All nodes seen in this iteration of edge_dfs
+        seen = {start_node}
+        # Nodes in active path.
+        active_nodes = {start_node}
+        previous_head = None
+
+        for edge in nx.edge_dfs(G, start_node, orientation):
+            # Determine if this edge is a continuation of the active path.
+            tail, head = tailhead(edge)
+            if head in explored:
+                # Then we've already explored it. No loop is possible.
+                continue
+            if previous_head is not None and tail != previous_head:
+                # This edge results from backtracking.
+                # Pop until we get a node whose head equals the current tail.
+                # So for example, we might have:
+                #  (0, 1), (1, 2), (2, 3), (1, 4)
+                # which must become:
+                #  (0, 1), (1, 4)
+                while True:
+                    try:
+                        popped_edge = edges.pop()
+                    except IndexError:
+                        edges = []
+                        active_nodes = {tail}
+                        break
+                    else:
+                        popped_head = tailhead(popped_edge)[1]
+                        active_nodes.remove(popped_head)
+
+                    if edges:
+                        last_head = tailhead(edges[-1])[1]
+                        if tail == last_head:
+                            break
+            edges.append(edge)
+
+            if head in active_nodes:
+                # We have a loop!
+                cycle.extend(edges)
+                final_node = head
+                break
+            else:
+                seen.add(head)
+                active_nodes.add(head)
+                previous_head = head
+
+        if cycle:
+            break
+        else:
+            explored.update(seen)
+
+    else:
+        assert len(cycle) == 0
+        raise nx.exception.NetworkXNoCycle("No cycle found.")
+
+    # We now have a list of edges which ends on a cycle.
+    # So we need to remove from the beginning edges that are not relevant.
+
+    for i, edge in enumerate(cycle):
+        tail, head = tailhead(edge)
+        if tail == final_node:
+            break
+
+    return cycle[i:]
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
+@nx._dispatchable(edge_attrs="weight")
 def minimum_cycle_basis(G, weight=None):
     """Returns a minimum weight cycle basis for G

@@ -606,7 +1074,41 @@ def minimum_cycle_basis(G, weight=None):
     --------
     simple_cycles, cycle_basis
     """
-    pass
+    # We first split the graph in connected subgraphs
+    return sum(
+        (_min_cycle_basis(G.subgraph(c), weight) for c in nx.connected_components(G)),
+        [],
+    )
+
+
+def _min_cycle_basis(G, weight):
+    cb = []
+    # We  extract the edges not in a spanning tree. We do not really need a
+    # *minimum* spanning tree. That is why we call the next function with
+    # weight=None. Depending on implementation, it may be faster as well
+    tree_edges = list(nx.minimum_spanning_edges(G, weight=None, data=False))
+    chords = G.edges - tree_edges - {(v, u) for u, v in tree_edges}
+
+    # We maintain a set of vectors orthogonal to sofar found cycles
+    set_orth = [{edge} for edge in chords]
+    while set_orth:
+        base = set_orth.pop()
+        # kth cycle is "parallel" to kth vector in set_orth
+        cycle_edges = _min_cycle(G, base, weight)
+        cb.append([v for u, v in cycle_edges])
+
+        # now update set_orth so that k+1,k+2... th elements are
+        # orthogonal to the newly found cycle, as per [p. 336, 1]
+        set_orth = [
+            (
+                {e for e in orth if e not in base if e[::-1] not in base}
+                | {e for e in base if e not in orth if e[::-1] not in orth}
+            )
+            if sum((e in orth or e[::-1] in orth) for e in cycle_edges) % 2
+            else orth
+            for orth in set_orth
+        ]
+    return cb


 def _min_cycle(G, orth, weight):
@@ -615,11 +1117,55 @@ def _min_cycle(G, orth, weight):
     orthogonal to the vector orth as per [p. 338, 1]
     Use (u, 1) to indicate the lifted copy of u (denoted u' in paper).
     """
-    pass
-
-
-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+    Gi = nx.Graph()
+
+    # Add 2 copies of each edge in G to Gi.
+    # If edge is in orth, add cross edge; otherwise in-plane edge
+    for u, v, wt in G.edges(data=weight, default=1):
+        if (u, v) in orth or (v, u) in orth:
+            Gi.add_edges_from([(u, (v, 1)), ((u, 1), v)], Gi_weight=wt)
+        else:
+            Gi.add_edges_from([(u, v), ((u, 1), (v, 1))], Gi_weight=wt)
+
+    # find the shortest length in Gi between n and (n, 1) for each n
+    # Note: Use "Gi_weight" for name of weight attribute
+    spl = nx.shortest_path_length
+    lift = {n: spl(Gi, source=n, target=(n, 1), weight="Gi_weight") for n in G}
+
+    # Now compute that short path in Gi, which translates to a cycle in G
+    start = min(lift, key=lift.get)
+    end = (start, 1)
+    min_path_i = nx.shortest_path(Gi, source=start, target=end, weight="Gi_weight")
+
+    # Now we obtain the actual path, re-map nodes in Gi to those in G
+    min_path = [n if n in G else n[0] for n in min_path_i]
+
+    # Now remove the edges that occur two times
+    # two passes: flag which edges get kept, then build it
+    edgelist = list(pairwise(min_path))
+    edgeset = set()
+    for e in edgelist:
+        if e in edgeset:
+            edgeset.remove(e)
+        elif e[::-1] in edgeset:
+            edgeset.remove(e[::-1])
+        else:
+            edgeset.add(e)
+
+    min_edgelist = []
+    for e in edgelist:
+        if e in edgeset:
+            min_edgelist.append(e)
+            edgeset.remove(e)
+        elif e[::-1] in edgeset:
+            min_edgelist.append(e[::-1])
+            edgeset.remove(e[::-1])
+
+    return min_edgelist
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def girth(G):
     """Returns the girth of the graph.
@@ -660,4 +1206,26 @@ def girth(G):
     .. [1] `Wikipedia: Girth <https://en.wikipedia.org/wiki/Girth_(graph_theory)>`_

     """
-    pass
+    girth = depth_limit = inf
+    tree_edge = nx.algorithms.traversal.breadth_first_search.TREE_EDGE
+    level_edge = nx.algorithms.traversal.breadth_first_search.LEVEL_EDGE
+    for n in G:
+        # run a BFS from source n, keeping track of distances; since we want
+        # the shortest cycle, no need to explore beyond the current minimum length
+        depth = {n: 0}
+        for u, v, label in nx.bfs_labeled_edges(G, n):
+            du = depth[u]
+            if du > depth_limit:
+                break
+            if label is tree_edge:
+                depth[v] = du + 1
+            else:
+                # if (u, v) is a level edge, the length is du + du + 1 (odd)
+                # otherwise, it's a forward edge; length is du + (du + 1) + 1 (even)
+                delta = label is level_edge
+                length = du + du + 2 - delta
+                if length < girth:
+                    girth = length
+                    depth_limit = du - delta
+
+    return girth
diff --git a/networkx/algorithms/d_separation.py b/networkx/algorithms/d_separation.py
index 6c1bed40a..a688eca40 100644
--- a/networkx/algorithms/d_separation.py
+++ b/networkx/algorithms/d_separation.py
@@ -212,15 +212,23 @@ References
 .. [6] https://en.wikipedia.org/wiki/Berkson%27s_paradox

 """
+
 from collections import deque
 from itertools import chain
+
 import networkx as nx
 from networkx.utils import UnionFind, not_implemented_for
-__all__ = ['is_d_separator', 'is_minimal_d_separator',
-    'find_minimal_d_separator', 'd_separated', 'minimal_d_separator']
+
+__all__ = [
+    "is_d_separator",
+    "is_minimal_d_separator",
+    "find_minimal_d_separator",
+    "d_separated",
+    "minimal_d_separator",
+]


-@not_implemented_for('undirected')
+@not_implemented_for("undirected")
 @nx._dispatchable
 def is_d_separator(G, x, y, z):
     """Return whether node sets `x` and `y` are d-separated by `z`.
@@ -267,10 +275,68 @@ def is_d_separator(G, x, y, z):

     https://en.wikipedia.org/wiki/Bayesian_network#d-separation
     """
-    pass
-
-
-@not_implemented_for('undirected')
+    try:
+        x = {x} if x in G else x
+        y = {y} if y in G else y
+        z = {z} if z in G else z
+
+        intersection = x & y or x & z or y & z
+        if intersection:
+            raise nx.NetworkXError(
+                f"The sets are not disjoint, with intersection {intersection}"
+            )
+
+        set_v = x | y | z
+        if set_v - G.nodes:
+            raise nx.NodeNotFound(f"The node(s) {set_v - G.nodes} are not found in G")
+    except TypeError:
+        raise nx.NodeNotFound("One of x, y, or z is not a node or a set of nodes in G")
+
+    if not nx.is_directed_acyclic_graph(G):
+        raise nx.NetworkXError("graph should be directed acyclic")
+
+    # contains -> and <-> edges from starting node T
+    forward_deque = deque([])
+    forward_visited = set()
+
+    # contains <- and - edges from starting node T
+    backward_deque = deque(x)
+    backward_visited = set()
+
+    ancestors_or_z = set().union(*[nx.ancestors(G, node) for node in x]) | z | x
+
+    while forward_deque or backward_deque:
+        if backward_deque:
+            node = backward_deque.popleft()
+            backward_visited.add(node)
+            if node in y:
+                return False
+            if node in z:
+                continue
+
+            # add <- edges to backward deque
+            backward_deque.extend(G.pred[node].keys() - backward_visited)
+            # add -> edges to forward deque
+            forward_deque.extend(G.succ[node].keys() - forward_visited)
+
+        if forward_deque:
+            node = forward_deque.popleft()
+            forward_visited.add(node)
+            if node in y:
+                return False
+
+            # Consider if -> node <- is opened due to ancestor of node in z
+            if node in ancestors_or_z:
+                # add <- edges to backward deque
+                backward_deque.extend(G.pred[node].keys() - backward_visited)
+            if node not in z:
+                # add -> edges to forward deque
+                forward_deque.extend(G.succ[node].keys() - forward_visited)
+
+    return True
+
+
+@not_implemented_for("undirected")
 @nx._dispatchable
 def find_minimal_d_separator(G, x, y, *, included=None, restricted=None):
     """Returns a minimal d-separating set between `x` and `y` if possible
@@ -327,10 +393,57 @@ def find_minimal_d_separator(G, x, y, *, included=None, restricted=None):
         minimal d-separators in linear time and applications." In
         Uncertainty in Artificial Intelligence, pp. 637-647. PMLR, 2020.
     """
-    pass
+    if not nx.is_directed_acyclic_graph(G):
+        raise nx.NetworkXError("graph should be directed acyclic")
+
+    try:
+        x = {x} if x in G else x
+        y = {y} if y in G else y
+
+        if included is None:
+            included = set()
+        elif included in G:
+            included = {included}
+
+        if restricted is None:
+            restricted = set(G)
+        elif restricted in G:
+            restricted = {restricted}
+
+        set_y = x | y | included | restricted
+        if set_y - G.nodes:
+            raise nx.NodeNotFound(f"The node(s) {set_y - G.nodes} are not found in G")
+    except TypeError:
+        raise nx.NodeNotFound(
+            "One of x, y, included or restricted is not a node or set of nodes in G"
+        )
+
+    if not included <= restricted:
+        raise nx.NetworkXError(
+            f"Included nodes {included} must be in restricted nodes {restricted}"
+        )

+    intersection = x & y or x & included or y & included
+    if intersection:
+        raise nx.NetworkXError(
+            f"The sets x, y, included are not disjoint. Overlap: {intersection}"
+        )

-@not_implemented_for('undirected')
+    nodeset = x | y | included
+    ancestors_x_y_included = nodeset.union(*[nx.ancestors(G, node) for node in nodeset])
+
+    z_init = restricted & (ancestors_x_y_included - (x | y))
+
+    x_closure = _reachable(G, x, ancestors_x_y_included, z_init)
+    if x_closure & y:
+        return None
+
+    z_updated = z_init & (x_closure | included)
+    y_closure = _reachable(G, y, ancestors_x_y_included, z_updated)
+    return z_updated & (y_closure | included)
+
+
+@not_implemented_for("undirected")
 @nx._dispatchable
 def is_minimal_d_separator(G, x, y, z, *, included=None, restricted=None):
     """Determine if `z` is a minimal d-separator for `x` and `y`.
@@ -414,10 +527,67 @@ def is_minimal_d_separator(G, x, y, z, *, included=None, restricted=None):

     For full details, see [1]_.
     """
-    pass
-
-
-@not_implemented_for('undirected')
+    if not nx.is_directed_acyclic_graph(G):
+        raise nx.NetworkXError("graph should be directed acyclic")
+
+    try:
+        x = {x} if x in G else x
+        y = {y} if y in G else y
+        z = {z} if z in G else z
+
+        if included is None:
+            included = set()
+        elif included in G:
+            included = {included}
+
+        if restricted is None:
+            restricted = set(G)
+        elif restricted in G:
+            restricted = {restricted}
+
+        set_y = x | y | included | restricted
+        if set_y - G.nodes:
+            raise nx.NodeNotFound(f"The node(s) {set_y - G.nodes} are not found in G")
+    except TypeError:
+        raise nx.NodeNotFound(
+            "One of x, y, z, included or restricted is not a node or set of nodes in G"
+        )
+
+    if not included <= z:
+        raise nx.NetworkXError(
+            f"Included nodes {included} must be in proposed separating set z {x}"
+        )
+    if not z <= restricted:
+        raise nx.NetworkXError(
+            f"Separating set {z} must be contained in restricted set {restricted}"
+        )
+
+    intersection = x.intersection(y) or x.intersection(z) or y.intersection(z)
+    if intersection:
+        raise nx.NetworkXError(
+            f"The sets are not disjoint, with intersection {intersection}"
+        )
+
+    nodeset = x | y | included
+    ancestors_x_y_included = nodeset.union(*[nx.ancestors(G, n) for n in nodeset])
+
+    # criterion (a) -- check that z is actually a separator
+    x_closure = _reachable(G, x, ancestors_x_y_included, z)
+    if x_closure & y:
+        return False
+
+    # criterion (b) -- basic constraint; included and restricted already checked above
+    if not (z <= ancestors_x_y_included):
+        return False
+
+    # criterion (c) -- check that z is minimal
+    y_closure = _reachable(G, y, ancestors_x_y_included, z)
+    if not ((z - included) <= (x_closure & y_closure)):
+        return False
+    return True
+
+
+@not_implemented_for("undirected")
 def _reachable(G, x, a, z):
     """Modified Bayes-Ball algorithm for finding d-connected nodes.

@@ -455,9 +625,61 @@ def _reachable(G, x, a, z):
        Fourteenth Conference on Uncertainty in Artificial Intelligence
        (UAI), (pp. 480–487). 1998.
     """
-    pass
-

+    def _pass(e, v, f, n):
+        """Whether a ball entering node `v` along edge `e` passes to `n` along `f`.
+
+        Boolean function defined on page 6 of [1]_.
+
+        Parameters
+        ----------
+        e : bool
+            Directed edge by which the ball got to node `v`; `True` iff directed into `v`.
+        v : node
+            Node where the ball is.
+        f : bool
+            Directed edge connecting nodes `v` and `n`; `True` iff directed `n`.
+        n : node
+            Checking whether the ball passes to this node.
+
+        Returns
+        -------
+        b : bool
+            Whether the ball passes or not.
+
+        References
+        ----------
+        .. [1] van der Zander, Benito, and Maciej Liśkiewicz. "Finding
+           minimal d-separators in linear time and applications." In
+           Uncertainty in Artificial Intelligence, pp. 637-647. PMLR, 2020.
+        """
+        is_element_of_A = n in a
+        # almost_definite_status = True  # always true for DAGs; not so for RCGs
+        collider_if_in_Z = v not in z or (e and not f)
+        return is_element_of_A and collider_if_in_Z  # and almost_definite_status
+
+    queue = deque([])
+    for node in x:
+        if bool(G.pred[node]):
+            queue.append((True, node))
+        if bool(G.succ[node]):
+            queue.append((False, node))
+    processed = queue.copy()
+
+    while any(queue):
+        e, v = queue.popleft()
+        preds = ((False, n) for n in G.pred[v])
+        succs = ((True, n) for n in G.succ[v])
+        f_n_pairs = chain(preds, succs)
+        for f, n in f_n_pairs:
+            if (f, n) not in processed and _pass(e, v, f, n):
+                queue.append((f, n))
+                processed.append((f, n))
+
+    return {w for (_, w) in processed}
+
+
+# Deprecated functions:
 def d_separated(G, x, y, z):
     """Return whether nodes sets ``x`` and ``y`` are d-separated by ``z``.

@@ -467,7 +689,15 @@ def d_separated(G, x, y, z):
         Please use `is_d_separator(G, x, y, z)`.

     """
-    pass
+    import warnings
+
+    warnings.warn(
+        "d_separated is deprecated and will be removed in NetworkX v3.5."
+        "Please use `is_d_separator(G, x, y, z)`.",
+        category=DeprecationWarning,
+        stacklevel=2,
+    )
+    return nx.is_d_separator(G, x, y, z)


 def minimal_d_separator(G, u, v):
@@ -479,4 +709,14 @@ def minimal_d_separator(G, u, v):
         Please use `find_minimal_d_separator(G, x, y)`.

     """
-    pass
+    import warnings
+
+    warnings.warn(
+        (
+            "This function is deprecated and will be removed in NetworkX v3.5."
+            "Please use `is_d_separator(G, x, y)`."
+        ),
+        category=DeprecationWarning,
+        stacklevel=2,
+    )
+    return nx.find_minimal_d_separator(G, u, v)
diff --git a/networkx/algorithms/dag.py b/networkx/algorithms/dag.py
index 35579494e..a70e2c5ef 100644
--- a/networkx/algorithms/dag.py
+++ b/networkx/algorithms/dag.py
@@ -4,19 +4,35 @@ Note that most of these functions are only guaranteed to work for DAGs.
 In general, these functions do not check for acyclic-ness, so it is up
 to the user to check for that.
 """
+
 import heapq
 from collections import deque
 from functools import partial
 from itertools import chain, combinations, product, starmap
 from math import gcd
+
 import networkx as nx
 from networkx.utils import arbitrary_element, not_implemented_for, pairwise
-__all__ = ['descendants', 'ancestors', 'topological_sort',
-    'lexicographical_topological_sort', 'all_topological_sorts',
-    'topological_generations', 'is_directed_acyclic_graph', 'is_aperiodic',
-    'transitive_closure', 'transitive_closure_dag', 'transitive_reduction',
-    'antichains', 'dag_longest_path', 'dag_longest_path_length',
-    'dag_to_branching', 'compute_v_structures']
+
+__all__ = [
+    "descendants",
+    "ancestors",
+    "topological_sort",
+    "lexicographical_topological_sort",
+    "all_topological_sorts",
+    "topological_generations",
+    "is_directed_acyclic_graph",
+    "is_aperiodic",
+    "transitive_closure",
+    "transitive_closure_dag",
+    "transitive_reduction",
+    "antichains",
+    "dag_longest_path",
+    "dag_longest_path_length",
+    "dag_to_branching",
+    "compute_v_structures",
+]
+
 chaini = chain.from_iterable


@@ -54,7 +70,7 @@ def descendants(G, source):
     --------
     ancestors
     """
-    pass
+    return {child for parent, child in nx.bfs_edges(G, source)}


 @nx._dispatchable
@@ -91,13 +107,19 @@ def ancestors(G, source):
     --------
     descendants
     """
-    pass
+    return {child for parent, child in nx.bfs_edges(G, source, reverse=True)}


 @nx._dispatchable
 def has_cycle(G):
     """Decides whether the directed graph has a cycle."""
-    pass
+    try:
+        # Feed the entire iterator into a zero-length deque.
+        deque(topological_sort(G), maxlen=0)
+    except nx.NetworkXUnfeasible:
+        return True
+    else:
+        return False


 @nx._dispatchable
@@ -138,7 +160,7 @@ def is_directed_acyclic_graph(G):
     --------
     topological_sort
     """
-    pass
+    return G.is_directed() and not has_cycle(G)


 @nx._dispatchable
@@ -190,7 +212,33 @@ def topological_generations(G):
     --------
     topological_sort
     """
-    pass
+    if not G.is_directed():
+        raise nx.NetworkXError("Topological sort not defined on undirected graphs.")
+
+    multigraph = G.is_multigraph()
+    indegree_map = {v: d for v, d in G.in_degree() if d > 0}
+    zero_indegree = [v for v, d in G.in_degree() if d == 0]
+
+    while zero_indegree:
+        this_generation = zero_indegree
+        zero_indegree = []
+        for node in this_generation:
+            if node not in G:
+                raise RuntimeError("Graph changed during iteration")
+            for child in G.neighbors(node):
+                try:
+                    indegree_map[child] -= len(G[node][child]) if multigraph else 1
+                except KeyError as err:
+                    raise RuntimeError("Graph changed during iteration") from err
+                if indegree_map[child] == 0:
+                    zero_indegree.append(child)
+                    del indegree_map[child]
+        yield this_generation
+
+    if indegree_map:
+        raise nx.NetworkXUnfeasible(
+            "Graph contains a cycle or graph changed during iteration"
+        )


 @nx._dispatchable
@@ -258,7 +306,8 @@ def topological_sort(G):
     .. [1] Manber, U. (1989).
        *Introduction to Algorithms - A Creative Approach.* Addison-Wesley.
     """
-    pass
+    for generation in nx.topological_generations(G):
+        yield from generation


 @nx._dispatchable
@@ -358,10 +407,52 @@ def lexicographical_topological_sort(G, key=None):
     .. [1] Manber, U. (1989).
        *Introduction to Algorithms - A Creative Approach.* Addison-Wesley.
     """
-    pass
+    if not G.is_directed():
+        msg = "Topological sort not defined on undirected graphs."
+        raise nx.NetworkXError(msg)
+
+    if key is None:
+
+        def key(node):
+            return node
+
+    nodeid_map = {n: i for i, n in enumerate(G)}

+    def create_tuple(node):
+        return key(node), nodeid_map[node], node

-@not_implemented_for('undirected')
+    indegree_map = {v: d for v, d in G.in_degree() if d > 0}
+    # These nodes have zero indegree and ready to be returned.
+    zero_indegree = [create_tuple(v) for v, d in G.in_degree() if d == 0]
+    heapq.heapify(zero_indegree)
+
+    while zero_indegree:
+        _, _, node = heapq.heappop(zero_indegree)
+
+        if node not in G:
+            raise RuntimeError("Graph changed during iteration")
+        for _, child in G.edges(node):
+            try:
+                indegree_map[child] -= 1
+            except KeyError as err:
+                raise RuntimeError("Graph changed during iteration") from err
+            if indegree_map[child] == 0:
+                try:
+                    heapq.heappush(zero_indegree, create_tuple(child))
+                except TypeError as err:
+                    raise TypeError(
+                        f"{err}\nConsider using `key=` parameter to resolve ambiguities in the sort order."
+                    )
+                del indegree_map[child]
+
+        yield node
+
+    if indegree_map:
+        msg = "Graph contains a cycle or graph changed during iteration"
+        raise nx.NetworkXUnfeasible(msg)
+
+
+@not_implemented_for("undirected")
 @nx._dispatchable
 def all_topological_sorts(G):
     """Returns a generator of _all_ topological sorts of the directed graph G.
@@ -408,7 +499,77 @@ def all_topological_sorts(G):
        https://doi.org/10.1016/0020-0190(74)90001-5.
        Elsevier (North-Holland), Amsterdam
     """
-    pass
+    if not G.is_directed():
+        raise nx.NetworkXError("Topological sort not defined on undirected graphs.")
+
+    # the names of count and D are chosen to match the global variables in [1]
+    # number of edges originating in a vertex v
+    count = dict(G.in_degree())
+    # vertices with indegree 0
+    D = deque([v for v, d in G.in_degree() if d == 0])
+    # stack of first value chosen at a position k in the topological sort
+    bases = []
+    current_sort = []
+
+    # do-while construct
+    while True:
+        assert all(count[v] == 0 for v in D)
+
+        if len(current_sort) == len(G):
+            yield list(current_sort)
+
+            # clean-up stack
+            while len(current_sort) > 0:
+                assert len(bases) == len(current_sort)
+                q = current_sort.pop()
+
+                # "restores" all edges (q, x)
+                # NOTE: it is important to iterate over edges instead
+                # of successors, so count is updated correctly in multigraphs
+                for _, j in G.out_edges(q):
+                    count[j] += 1
+                    assert count[j] >= 0
+                # remove entries from D
+                while len(D) > 0 and count[D[-1]] > 0:
+                    D.pop()
+
+                # corresponds to a circular shift of the values in D
+                # if the first value chosen (the base) is in the first
+                # position of D again, we are done and need to consider the
+                # previous condition
+                D.appendleft(q)
+                if D[-1] == bases[-1]:
+                    # all possible values have been chosen at current position
+                    # remove corresponding marker
+                    bases.pop()
+                else:
+                    # there are still elements that have not been fixed
+                    # at the current position in the topological sort
+                    # stop removing elements, escape inner loop
+                    break
+
+        else:
+            if len(D) == 0:
+                raise nx.NetworkXUnfeasible("Graph contains a cycle.")
+
+            # choose next node
+            q = D.pop()
+            # "erase" all edges (q, x)
+            # NOTE: it is important to iterate over edges instead
+            # of successors, so count is updated correctly in multigraphs
+            for _, j in G.out_edges(q):
+                count[j] -= 1
+                assert count[j] >= 0
+                if count[j] == 0:
+                    D.append(j)
+            current_sort.append(q)
+
+            # base for current position might _not_ be fixed yet
+            if len(bases) < len(current_sort):
+                bases.append(q)
+
+        if len(bases) == 0:
+            break


 @nx._dispatchable
@@ -478,7 +639,30 @@ def is_aperiodic(G):
        in Shier, D. R.; Wallenius, K. T., Applied Mathematical Modeling:
        A Multidisciplinary Approach, CRC Press.
     """
-    pass
+    if not G.is_directed():
+        raise nx.NetworkXError("is_aperiodic not defined for undirected graphs")
+    if len(G) == 0:
+        raise nx.NetworkXPointlessConcept("Graph has no nodes.")
+    s = arbitrary_element(G)
+    levels = {s: 0}
+    this_level = [s]
+    g = 0
+    lev = 1
+    while this_level:
+        next_level = []
+        for u in this_level:
+            for v in G[u]:
+                if v in levels:  # Non-Tree Edge
+                    g = gcd(g, levels[u] - levels[v] + 1)
+                else:  # Tree Edge
+                    next_level.append(v)
+                    levels[v] = lev
+        this_level = next_level
+        lev += 1
+    if len(levels) == len(G):  # All nodes in tree
+        return g == 1
+    else:
+        return g == 1 and nx.is_aperiodic(G.subgraph(set(G) - set(levels)))


 @nx._dispatchable(preserve_all_attrs=True, returns_graph=True)
@@ -555,10 +739,25 @@ def transitive_closure(G, reflexive=False):
     ----------
     .. [1] https://www.ics.uci.edu/~eppstein/PADS/PartialOrder.py
     """
-    pass
+    TC = G.copy()
+
+    if reflexive not in {None, True, False}:
+        raise nx.NetworkXError("Incorrect value for the parameter `reflexive`")

+    for v in G:
+        if reflexive is None:
+            TC.add_edges_from((v, u) for u in nx.descendants(G, v) if u not in TC[v])
+        elif reflexive is True:
+            TC.add_edges_from(
+                (v, u) for u in nx.descendants(G, v) | {v} if u not in TC[v]
+            )
+        elif reflexive is False:
+            TC.add_edges_from((v, e[1]) for e in nx.edge_bfs(G, v) if e[1] not in TC[v])

-@not_implemented_for('undirected')
+    return TC
+
+
+@not_implemented_for("undirected")
 @nx._dispatchable(preserve_all_attrs=True, returns_graph=True)
 def transitive_closure_dag(G, topo_order=None):
     """Returns the transitive closure of a directed acyclic graph.
@@ -602,10 +801,20 @@ def transitive_closure_dag(G, topo_order=None):
     This algorithm is probably simple enough to be well-known but I didn't find
     a mention in the literature.
     """
-    pass
+    if topo_order is None:
+        topo_order = list(topological_sort(G))
+
+    TC = G.copy()
+
+    # idea: traverse vertices following a reverse topological order, connecting
+    # each vertex to its descendants at distance 2 as we go
+    for v in reversed(topo_order):
+        TC.add_edges_from((v, u) for u in nx.descendants_at_distance(TC, v, 2))

+    return TC

-@not_implemented_for('undirected')
+
+@not_implemented_for("undirected")
 @nx._dispatchable(returns_graph=True)
 def transitive_reduction(G):
     """Returns transitive reduction of a directed graph
@@ -656,10 +865,29 @@ def transitive_reduction(G):
     https://en.wikipedia.org/wiki/Transitive_reduction

     """
-    pass
-
-
-@not_implemented_for('undirected')
+    if not is_directed_acyclic_graph(G):
+        msg = "Directed Acyclic Graph required for transitive_reduction"
+        raise nx.NetworkXError(msg)
+    TR = nx.DiGraph()
+    TR.add_nodes_from(G.nodes())
+    descendants = {}
+    # count before removing set stored in descendants
+    check_count = dict(G.in_degree)
+    for u in G:
+        u_nbrs = set(G[u])
+        for v in G[u]:
+            if v in u_nbrs:
+                if v not in descendants:
+                    descendants[v] = {y for x, y in nx.dfs_edges(G, v)}
+                u_nbrs -= descendants[v]
+            check_count[v] -= 1
+            if check_count[v] == 0:
+                del descendants[v]
+        TR.add_edges_from((u, v) for v in u_nbrs)
+    return TR
+
+
+@not_implemented_for("undirected")
 @nx._dispatchable
 def antichains(G, topo_order=None):
     """Generates antichains from a directed acyclic graph (DAG).
@@ -707,12 +935,28 @@ def antichains(G, topo_order=None):
     .. [1] Free Lattices, by R. Freese, J. Jezek and J. B. Nation,
        AMS, Vol 42, 1995, p. 226.
     """
-    pass
-
-
-@not_implemented_for('undirected')
-@nx._dispatchable(edge_attrs={'weight': 'default_weight'})
-def dag_longest_path(G, weight='weight', default_weight=1, topo_order=None):
+    if topo_order is None:
+        topo_order = list(nx.topological_sort(G))
+
+    TC = nx.transitive_closure_dag(G, topo_order)
+    antichains_stacks = [([], list(reversed(topo_order)))]
+
+    while antichains_stacks:
+        (antichain, stack) = antichains_stacks.pop()
+        # Invariant:
+        #  - the elements of antichain are independent
+        #  - the elements of stack are independent from those of antichain
+        yield antichain
+        while stack:
+            x = stack.pop()
+            new_antichain = antichain + [x]
+            new_stack = [t for t in stack if not ((t in TC[x]) or (x in TC[t]))]
+            antichains_stacks.append((new_antichain, new_stack))
+
+
+@not_implemented_for("undirected")
+@nx._dispatchable(edge_attrs={"weight": "default_weight"})
+def dag_longest_path(G, weight="weight", default_weight=1, topo_order=None):
     """Returns the longest path in a directed acyclic graph (DAG).

     If `G` has edges with `weight` attribute the edge data are used as
@@ -768,12 +1012,47 @@ def dag_longest_path(G, weight='weight', default_weight=1, topo_order=None):
     dag_longest_path_length

     """
-    pass
-
-
-@not_implemented_for('undirected')
-@nx._dispatchable(edge_attrs={'weight': 'default_weight'})
-def dag_longest_path_length(G, weight='weight', default_weight=1):
+    if not G:
+        return []
+
+    if topo_order is None:
+        topo_order = nx.topological_sort(G)
+
+    dist = {}  # stores {v : (length, u)}
+    for v in topo_order:
+        us = [
+            (
+                dist[u][0]
+                + (
+                    max(data.values(), key=lambda x: x.get(weight, default_weight))
+                    if G.is_multigraph()
+                    else data
+                ).get(weight, default_weight),
+                u,
+            )
+            for u, data in G.pred[v].items()
+        ]
+
+        # Use the best predecessor if there is one and its distance is
+        # non-negative, otherwise terminate.
+        maxu = max(us, key=lambda x: x[0]) if us else (0, v)
+        dist[v] = maxu if maxu[0] >= 0 else (0, v)
+
+    u = None
+    v = max(dist, key=lambda x: dist[x][0])
+    path = []
+    while u != v:
+        path.append(v)
+        u = v
+        v = dist[v][1]
+
+    path.reverse()
+    return path
+
+
+@not_implemented_for("undirected")
+@nx._dispatchable(edge_attrs={"weight": "default_weight"})
+def dag_longest_path_length(G, weight="weight", default_weight=1):
     """Returns the longest path length in a DAG

     Parameters
@@ -811,7 +1090,17 @@ def dag_longest_path_length(G, weight='weight', default_weight=1):
     --------
     dag_longest_path
     """
-    pass
+    path = nx.dag_longest_path(G, weight, default_weight)
+    path_length = 0
+    if G.is_multigraph():
+        for u, v in pairwise(path):
+            i = max(G[u][v], key=lambda x: G[u][v][x].get(weight, default_weight))
+            path_length += G[u][v][i].get(weight, default_weight)
+    else:
+        for u, v in pairwise(path):
+            path_length += G[u][v].get(weight, default_weight)
+
+    return path_length


 @nx._dispatchable
@@ -826,11 +1115,15 @@ def root_to_leaf_paths(G):
     any leaf. A path is a list of nodes.

     """
-    pass
+    roots = (v for v, d in G.in_degree() if d == 0)
+    leaves = (v for v, d in G.out_degree() if d == 0)
+    all_paths = partial(nx.all_simple_paths, G)
+    # TODO In Python 3, this would be better as `yield from ...`.
+    return chaini(starmap(all_paths, product(roots, leaves)))


-@not_implemented_for('multigraph')
-@not_implemented_for('undirected')
+@not_implemented_for("multigraph")
+@not_implemented_for("undirected")
 @nx._dispatchable(returns_graph=True)
 def dag_to_branching(G):
     """Returns a branching representing all (overlapping) paths from
@@ -917,10 +1210,18 @@ def dag_to_branching(G):
     that function.

     """
-    pass
-
-
-@not_implemented_for('undirected')
+    if has_cycle(G):
+        msg = "dag_to_branching is only defined for acyclic graphs"
+        raise nx.HasACycle(msg)
+    paths = root_to_leaf_paths(G)
+    B = nx.prefix_tree(paths)
+    # Remove the synthetic `root`(0) and `NIL`(-1) nodes from the tree
+    B.remove_node(0)
+    B.remove_node(-1)
+    return B
+
+
+@not_implemented_for("undirected")
 @nx._dispatchable
 def compute_v_structures(G):
     """Iterate through the graph to compute all v-structures.
@@ -951,4 +1252,8 @@ def compute_v_structures(G):
     -----
     `Wikipedia: Collider in causal graphs <https://en.wikipedia.org/wiki/Collider_(statistics)>`_
     """
-    pass
+    for collider, preds in G.pred.items():
+        for common_parents in combinations(preds, r=2):
+            # ensure that the colliders are the same
+            common_parents = sorted(common_parents)
+            yield (common_parents[0], collider, common_parents[1])
diff --git a/networkx/algorithms/distance_measures.py b/networkx/algorithms/distance_measures.py
index 83efb364d..20c1086d6 100644
--- a/networkx/algorithms/distance_measures.py
+++ b/networkx/algorithms/distance_measures.py
@@ -1,12 +1,22 @@
 """Graph diameter, radius, eccentricity and other properties."""
+
 import networkx as nx
 from networkx.utils import not_implemented_for
-__all__ = ['eccentricity', 'diameter', 'radius', 'periphery', 'center',
-    'barycenter', 'resistance_distance', 'kemeny_constant',
-    'effective_graph_resistance']
+
+__all__ = [
+    "eccentricity",
+    "diameter",
+    "radius",
+    "periphery",
+    "center",
+    "barycenter",
+    "resistance_distance",
+    "kemeny_constant",
+    "effective_graph_resistance",
+]


-def _extrema_bounding(G, compute='diameter', weight=None):
+def _extrema_bounding(G, compute="diameter", weight=None):
     """Compute requested extreme distance metric of undirected graph G

     Computation is based on smart lower and upper bounds, and in practice
@@ -78,10 +88,156 @@ def _extrema_bounding(G, compute='diameter', weight=None):
        Theoretical Computer Science, 2015
        https://www.sciencedirect.com/science/article/pii/S0304397515001644
     """
-    pass
-
-
-@nx._dispatchable(edge_attrs='weight')
+    # init variables
+    degrees = dict(G.degree())  # start with the highest degree node
+    minlowernode = max(degrees, key=degrees.get)
+    N = len(degrees)  # number of nodes
+    # alternate between smallest lower and largest upper bound
+    high = False
+    # status variables
+    ecc_lower = dict.fromkeys(G, 0)
+    ecc_upper = dict.fromkeys(G, N)
+    candidates = set(G)
+
+    # (re)set bound extremes
+    minlower = N
+    maxlower = 0
+    minupper = N
+    maxupper = 0
+
+    # repeat the following until there are no more candidates
+    while candidates:
+        if high:
+            current = maxuppernode  # select node with largest upper bound
+        else:
+            current = minlowernode  # select node with smallest lower bound
+        high = not high
+
+        # get distances from/to current node and derive eccentricity
+        dist = nx.shortest_path_length(G, source=current, weight=weight)
+
+        if len(dist) != N:
+            msg = "Cannot compute metric because graph is not connected."
+            raise nx.NetworkXError(msg)
+        current_ecc = max(dist.values())
+
+        # print status update
+        #        print ("ecc of " + str(current) + " (" + str(ecc_lower[current]) + "/"
+        #        + str(ecc_upper[current]) + ", deg: " + str(dist[current]) + ") is "
+        #        + str(current_ecc))
+        #        print(ecc_upper)
+
+        # (re)set bound extremes
+        maxuppernode = None
+        minlowernode = None
+
+        # update node bounds
+        for i in candidates:
+            # update eccentricity bounds
+            d = dist[i]
+            ecc_lower[i] = low = max(ecc_lower[i], max(d, (current_ecc - d)))
+            ecc_upper[i] = upp = min(ecc_upper[i], current_ecc + d)
+
+            # update min/max values of lower and upper bounds
+            minlower = min(ecc_lower[i], minlower)
+            maxlower = max(ecc_lower[i], maxlower)
+            minupper = min(ecc_upper[i], minupper)
+            maxupper = max(ecc_upper[i], maxupper)
+
+        # update candidate set
+        if compute == "diameter":
+            ruled_out = {
+                i
+                for i in candidates
+                if ecc_upper[i] <= maxlower and 2 * ecc_lower[i] >= maxupper
+            }
+        elif compute == "radius":
+            ruled_out = {
+                i
+                for i in candidates
+                if ecc_lower[i] >= minupper and ecc_upper[i] + 1 <= 2 * minlower
+            }
+        elif compute == "periphery":
+            ruled_out = {
+                i
+                for i in candidates
+                if ecc_upper[i] < maxlower
+                and (maxlower == maxupper or ecc_lower[i] > maxupper)
+            }
+        elif compute == "center":
+            ruled_out = {
+                i
+                for i in candidates
+                if ecc_lower[i] > minupper
+                and (minlower == minupper or ecc_upper[i] + 1 < 2 * minlower)
+            }
+        elif compute == "eccentricities":
+            ruled_out = set()
+        else:
+            msg = "compute must be one of 'diameter', 'radius', 'periphery', 'center', 'eccentricities'"
+            raise ValueError(msg)
+
+        ruled_out.update(i for i in candidates if ecc_lower[i] == ecc_upper[i])
+        candidates -= ruled_out
+
+        #        for i in ruled_out:
+        #            print("removing %g: ecc_u: %g maxl: %g ecc_l: %g maxu: %g"%
+        #                    (i,ecc_upper[i],maxlower,ecc_lower[i],maxupper))
+        #        print("node %g: ecc_u: %g maxl: %g ecc_l: %g maxu: %g"%
+        #                    (4,ecc_upper[4],maxlower,ecc_lower[4],maxupper))
+        #        print("NODE 4: %g"%(ecc_upper[4] <= maxlower))
+        #        print("NODE 4: %g"%(2 * ecc_lower[4] >= maxupper))
+        #        print("NODE 4: %g"%(ecc_upper[4] <= maxlower
+        #                            and 2 * ecc_lower[4] >= maxupper))
+
+        # updating maxuppernode and minlowernode for selection in next round
+        for i in candidates:
+            if (
+                minlowernode is None
+                or (
+                    ecc_lower[i] == ecc_lower[minlowernode]
+                    and degrees[i] > degrees[minlowernode]
+                )
+                or (ecc_lower[i] < ecc_lower[minlowernode])
+            ):
+                minlowernode = i
+
+            if (
+                maxuppernode is None
+                or (
+                    ecc_upper[i] == ecc_upper[maxuppernode]
+                    and degrees[i] > degrees[maxuppernode]
+                )
+                or (ecc_upper[i] > ecc_upper[maxuppernode])
+            ):
+                maxuppernode = i
+
+        # print status update
+    #        print (" min=" + str(minlower) + "/" + str(minupper) +
+    #        " max=" + str(maxlower) + "/" + str(maxupper) +
+    #        " candidates: " + str(len(candidates)))
+    #        print("cand:",candidates)
+    #        print("ecc_l",ecc_lower)
+    #        print("ecc_u",ecc_upper)
+    #        wait = input("press Enter to continue")
+
+    # return the correct value of the requested metric
+    if compute == "diameter":
+        return maxlower
+    if compute == "radius":
+        return minupper
+    if compute == "periphery":
+        p = [v for v in G if ecc_lower[v] == maxlower]
+        return p
+    if compute == "center":
+        c = [v for v in G if ecc_upper[v] == minupper]
+        return c
+    if compute == "eccentricities":
+        return ecc_lower
+    return None
+
+
+@nx._dispatchable(edge_attrs="weight")
 def eccentricity(G, v=None, sp=None, weight=None):
     """Returns the eccentricity of nodes in G.

@@ -134,10 +290,43 @@ def eccentricity(G, v=None, sp=None, weight=None):
     {1: 2, 5: 3}

     """
-    pass
-
-
-@nx._dispatchable(edge_attrs='weight')
+    #    if v is None:                # none, use entire graph
+    #        nodes=G.nodes()
+    #    elif v in G:               # is v a single node
+    #        nodes=[v]
+    #    else:                      # assume v is a container of nodes
+    #        nodes=v
+    order = G.order()
+    e = {}
+    for n in G.nbunch_iter(v):
+        if sp is None:
+            length = nx.shortest_path_length(G, source=n, weight=weight)
+
+            L = len(length)
+        else:
+            try:
+                length = sp[n]
+                L = len(length)
+            except TypeError as err:
+                raise nx.NetworkXError('Format of "sp" is invalid.') from err
+        if L != order:
+            if G.is_directed():
+                msg = (
+                    "Found infinite path length because the digraph is not"
+                    " strongly connected"
+                )
+            else:
+                msg = "Found infinite path length because the graph is not" " connected"
+            raise nx.NetworkXError(msg)
+
+        e[n] = max(length.values())
+
+    if v in G:
+        return e[v]  # return single value
+    return e
+
+
+@nx._dispatchable(edge_attrs="weight")
 def diameter(G, e=None, usebounds=False, weight=None):
     """Returns the diameter of the graph G.

@@ -186,10 +375,14 @@ def diameter(G, e=None, usebounds=False, weight=None):
     --------
     eccentricity
     """
-    pass
+    if usebounds is True and e is None and not G.is_directed():
+        return _extrema_bounding(G, compute="diameter", weight=weight)
+    if e is None:
+        e = eccentricity(G, weight=weight)
+    return max(e.values())


-@nx._dispatchable(edge_attrs='weight')
+@nx._dispatchable(edge_attrs="weight")
 def periphery(G, e=None, usebounds=False, weight=None):
     """Returns the periphery of the graph G.

@@ -239,10 +432,16 @@ def periphery(G, e=None, usebounds=False, weight=None):
     barycenter
     center
     """
-    pass
+    if usebounds is True and e is None and not G.is_directed():
+        return _extrema_bounding(G, compute="periphery", weight=weight)
+    if e is None:
+        e = eccentricity(G, weight=weight)
+    diameter = max(e.values())
+    p = [v for v in e if e[v] == diameter]
+    return p


-@nx._dispatchable(edge_attrs='weight')
+@nx._dispatchable(edge_attrs="weight")
 def radius(G, e=None, usebounds=False, weight=None):
     """Returns the radius of the graph G.

@@ -288,10 +487,14 @@ def radius(G, e=None, usebounds=False, weight=None):
     2

     """
-    pass
+    if usebounds is True and e is None and not G.is_directed():
+        return _extrema_bounding(G, compute="radius", weight=weight)
+    if e is None:
+        e = eccentricity(G, weight=weight)
+    return min(e.values())


-@nx._dispatchable(edge_attrs='weight')
+@nx._dispatchable(edge_attrs="weight")
 def center(G, e=None, usebounds=False, weight=None):
     """Returns the center of the graph G.

@@ -341,12 +544,18 @@ def center(G, e=None, usebounds=False, weight=None):
     barycenter
     periphery
     """
-    pass
+    if usebounds is True and e is None and not G.is_directed():
+        return _extrema_bounding(G, compute="center", weight=weight)
+    if e is None:
+        e = eccentricity(G, weight=weight)
+    radius = min(e.values())
+    p = [v for v in e if e[v] == radius]
+    return p


-@nx._dispatchable(edge_attrs='weight', mutates_input={'attr': 2})
+@nx._dispatchable(edge_attrs="weight", mutates_input={"attr": 2})
 def barycenter(G, weight=None, attr=None, sp=None):
-    """Calculate barycenter of a connected graph, optionally with edge weights.
+    r"""Calculate barycenter of a connected graph, optionally with edge weights.

     The :dfn:`barycenter` a
     :func:`connected <networkx.algorithms.components.is_connected>` graph
@@ -355,7 +564,7 @@ def barycenter(G, weight=None, attr=None, sp=None):

     .. math::

-        \\sum_{u \\in V(G)} d_G(u, v),
+        \sum_{u \in V(G)} d_G(u, v),

     where :math:`d_G` is the (possibly weighted) :func:`path length
     <networkx.algorithms.shortest_paths.generic.shortest_path_length>`.
@@ -399,13 +608,35 @@ def barycenter(G, weight=None, attr=None, sp=None):
     center
     periphery
     """
-    pass
-
-
-@not_implemented_for('directed')
-@nx._dispatchable(edge_attrs='weight')
-def resistance_distance(G, nodeA=None, nodeB=None, weight=None,
-    invert_weight=True):
+    if sp is None:
+        sp = nx.shortest_path_length(G, weight=weight)
+    else:
+        sp = sp.items()
+        if weight is not None:
+            raise ValueError("Cannot use both sp, weight arguments together")
+    smallest, barycenter_vertices, n = float("inf"), [], len(G)
+    for v, dists in sp:
+        if len(dists) < n:
+            raise nx.NetworkXNoPath(
+                f"Input graph {G} is disconnected, so every induced subgraph "
+                "has infinite barycentricity."
+            )
+        barycentricity = sum(dists.values())
+        if attr is not None:
+            G.nodes[v][attr] = barycentricity
+        if barycentricity < smallest:
+            smallest = barycentricity
+            barycenter_vertices = [v]
+        elif barycentricity == smallest:
+            barycenter_vertices.append(v)
+    if attr is not None:
+        nx._clear_cache(G)
+    return barycenter_vertices
+
+
+@not_implemented_for("directed")
+@nx._dispatchable(edge_attrs="weight")
+def resistance_distance(G, nodeA=None, nodeB=None, weight=None, invert_weight=True):
     """Returns the resistance distance between pairs of nodes in graph G.

     The resistance distance between two nodes of a graph is akin to treating
@@ -474,11 +705,74 @@ def resistance_distance(G, nodeA=None, nodeB=None, weight=None,
         Resistance distance.
         J. of Math. Chem. 12:81-95, 1993.
     """
-    pass
-
-
-@not_implemented_for('directed')
-@nx._dispatchable(edge_attrs='weight')
+    import numpy as np
+
+    if len(G) == 0:
+        raise nx.NetworkXError("Graph G must contain at least one node.")
+    if not nx.is_connected(G):
+        raise nx.NetworkXError("Graph G must be strongly connected.")
+    if nodeA is not None and nodeA not in G:
+        raise nx.NetworkXError("Node A is not in graph G.")
+    if nodeB is not None and nodeB not in G:
+        raise nx.NetworkXError("Node B is not in graph G.")
+
+    G = G.copy()
+    node_list = list(G)
+
+    # Invert weights
+    if invert_weight and weight is not None:
+        if G.is_multigraph():
+            for u, v, k, d in G.edges(keys=True, data=True):
+                d[weight] = 1 / d[weight]
+        else:
+            for u, v, d in G.edges(data=True):
+                d[weight] = 1 / d[weight]
+
+    # Compute resistance distance using the Pseudo-inverse of the Laplacian
+    # Self-loops are ignored
+    L = nx.laplacian_matrix(G, weight=weight).todense()
+    Linv = np.linalg.pinv(L, hermitian=True)
+
+    # Return relevant distances
+    if nodeA is not None and nodeB is not None:
+        i = node_list.index(nodeA)
+        j = node_list.index(nodeB)
+        return Linv.item(i, i) + Linv.item(j, j) - Linv.item(i, j) - Linv.item(j, i)
+
+    elif nodeA is not None:
+        i = node_list.index(nodeA)
+        d = {}
+        for n in G:
+            j = node_list.index(n)
+            d[n] = Linv.item(i, i) + Linv.item(j, j) - Linv.item(i, j) - Linv.item(j, i)
+        return d
+
+    elif nodeB is not None:
+        j = node_list.index(nodeB)
+        d = {}
+        for n in G:
+            i = node_list.index(n)
+            d[n] = Linv.item(i, i) + Linv.item(j, j) - Linv.item(i, j) - Linv.item(j, i)
+        return d
+
+    else:
+        d = {}
+        for n in G:
+            i = node_list.index(n)
+            d[n] = {}
+            for n2 in G:
+                j = node_list.index(n2)
+                d[n][n2] = (
+                    Linv.item(i, i)
+                    + Linv.item(j, j)
+                    - Linv.item(i, j)
+                    - Linv.item(j, i)
+                )
+        return d
+
+
+@not_implemented_for("directed")
+@nx._dispatchable(edge_attrs="weight")
 def effective_graph_resistance(G, weight=None, invert_weight=True):
     """Returns the Effective graph resistance of G.

@@ -538,11 +832,35 @@ def effective_graph_resistance(G, weight=None, invert_weight=True):
         Effective graph resistance.
         Lin. Alg. Appl. 435:2491-2506, 2011.
     """
-    pass
+    import numpy as np
+
+    if len(G) == 0:
+        raise nx.NetworkXError("Graph G must contain at least one node.")
+
+    # Disconnected graphs have infinite Effective graph resistance
+    if not nx.is_connected(G):
+        return float("inf")
+
+    # Invert weights
+    G = G.copy()
+    if invert_weight and weight is not None:
+        if G.is_multigraph():
+            for u, v, k, d in G.edges(keys=True, data=True):
+                d[weight] = 1 / d[weight]
+        else:
+            for u, v, d in G.edges(data=True):
+                d[weight] = 1 / d[weight]
+
+    # Get Laplacian eigenvalues
+    mu = np.sort(nx.laplacian_spectrum(G, weight=weight))
+
+    # Compute Effective graph resistance based on spectrum of the Laplacian
+    # Self-loops are ignored
+    return float(np.sum(1 / mu[1:]) * G.number_of_nodes())


-@nx.utils.not_implemented_for('directed')
-@nx._dispatchable(edge_attrs='weight')
+@nx.utils.not_implemented_for("directed")
+@nx._dispatchable(edge_attrs="weight")
 def kemeny_constant(G, *, weight=None):
     """Returns the Kemeny constant of the given graph.

@@ -606,4 +924,28 @@ def kemeny_constant(G, *, weight=None):
         Paul Erdös is Eighty, vol. 2, Bolyai Society,
         Mathematical Studies, Keszthely, Hungary (1993), pp. 1-46
     """
-    pass
+    import numpy as np
+    import scipy as sp
+
+    if len(G) == 0:
+        raise nx.NetworkXError("Graph G must contain at least one node.")
+    if not nx.is_connected(G):
+        raise nx.NetworkXError("Graph G must be connected.")
+    if nx.is_negatively_weighted(G, weight=weight):
+        raise nx.NetworkXError("The weights of graph G must be nonnegative.")
+
+    # Compute matrix H = D^-1/2 A D^-1/2
+    A = nx.adjacency_matrix(G, weight=weight)
+    n, m = A.shape
+    diags = A.sum(axis=1)
+    with np.errstate(divide="ignore"):
+        diags_sqrt = 1.0 / np.sqrt(diags)
+    diags_sqrt[np.isinf(diags_sqrt)] = 0
+    DH = sp.sparse.csr_array(sp.sparse.spdiags(diags_sqrt, 0, m, n, format="csr"))
+    H = DH @ (A @ DH)
+
+    # Compute eigenvalues of H
+    eig = np.sort(sp.linalg.eigvalsh(H.todense()))
+
+    # Compute the Kemeny constant
+    return float(np.sum(1 / (1 - eig[:-1])))
diff --git a/networkx/algorithms/distance_regular.py b/networkx/algorithms/distance_regular.py
index bc0a96922..27b4d0216 100644
--- a/networkx/algorithms/distance_regular.py
+++ b/networkx/algorithms/distance_regular.py
@@ -3,11 +3,18 @@
 Distance-regular graphs
 =======================
 """
+
 import networkx as nx
 from networkx.utils import not_implemented_for
+
 from .distance_measures import diameter
-__all__ = ['is_distance_regular', 'is_strongly_regular',
-    'intersection_array', 'global_parameters']
+
+__all__ = [
+    "is_distance_regular",
+    "is_strongly_regular",
+    "intersection_array",
+    "global_parameters",
+]


 @nx._dispatchable
@@ -51,7 +58,11 @@ def is_distance_regular(G):
         http://mathworld.wolfram.com/Distance-RegularGraph.html

     """
-    pass
+    try:
+        intersection_array(G)
+        return True
+    except nx.NetworkXError:
+        return False


 def global_parameters(b, c):
@@ -95,11 +106,11 @@ def global_parameters(b, c):
     --------
     intersection_array
     """
-    pass
+    return ((y, b[0] - x - y, x) for x, y in zip(b + [0], [0] + c))


-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def intersection_array(G):
     """Returns the intersection array of a distance-regular graph.
@@ -136,11 +147,43 @@ def intersection_array(G):
     --------
     global_parameters
     """
-    pass
-
-
-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+    # test for regular graph (all degrees must be equal)
+    if len(G) == 0:
+        raise nx.NetworkXPointlessConcept("Graph has no nodes.")
+    degree = iter(G.degree())
+    (_, k) = next(degree)
+    for _, knext in degree:
+        if knext != k:
+            raise nx.NetworkXError("Graph is not distance regular.")
+        k = knext
+    path_length = dict(nx.all_pairs_shortest_path_length(G))
+    diameter = max(max(path_length[n].values()) for n in path_length)
+    bint = {}  # 'b' intersection array
+    cint = {}  # 'c' intersection array
+    for u in G:
+        for v in G:
+            try:
+                i = path_length[u][v]
+            except KeyError as err:  # graph must be connected
+                raise nx.NetworkXError("Graph is not distance regular.") from err
+            # number of neighbors of v at a distance of i-1 from u
+            c = len([n for n in G[v] if path_length[n][u] == i - 1])
+            # number of neighbors of v at a distance of i+1 from u
+            b = len([n for n in G[v] if path_length[n][u] == i + 1])
+            # b,c are independent of u and v
+            if cint.get(i, c) != c or bint.get(i, b) != b:
+                raise nx.NetworkXError("Graph is not distance regular")
+            bint[i] = b
+            cint[i] = c
+    return (
+        [bint.get(j, 0) for j in range(diameter)],
+        [cint.get(j + 1, 0) for j in range(diameter)],
+    )
+
+
+# TODO There is a definition for directed strongly regular graphs.
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def is_strongly_regular(G):
     """Returns True if and only if the given graph is strongly
@@ -181,4 +224,15 @@ def is_strongly_regular(G):
         True

     """
-    pass
+    # Here is an alternate implementation based directly on the
+    # definition of strongly regular graphs:
+    #
+    #     return (all_equal(G.degree().values())
+    #             and all_equal(len(common_neighbors(G, u, v))
+    #                           for u, v in G.edges())
+    #             and all_equal(len(common_neighbors(G, u, v))
+    #                           for u, v in non_edges(G)))
+    #
+    # We instead use the fact that a distance-regular graph of diameter
+    # two is strongly regular.
+    return is_distance_regular(G) and diameter(G) == 2
diff --git a/networkx/algorithms/dominance.py b/networkx/algorithms/dominance.py
index d0af4785f..ab841fe21 100644
--- a/networkx/algorithms/dominance.py
+++ b/networkx/algorithms/dominance.py
@@ -1,13 +1,16 @@
 """
 Dominance algorithms.
 """
+
 from functools import reduce
+
 import networkx as nx
 from networkx.utils import not_implemented_for
-__all__ = ['immediate_dominators', 'dominance_frontiers']

+__all__ = ["immediate_dominators", "dominance_frontiers"]

-@not_implemented_for('undirected')
+
+@not_implemented_for("undirected")
 @nx._dispatchable
 def immediate_dominators(G, start):
     """Returns the immediate dominators of all nodes of a directed graph.
@@ -51,7 +54,34 @@ def immediate_dominators(G, start):
            A simple, fast dominance algorithm.
            Software Practice & Experience, 4:110, 2001.
     """
-    pass
+    if start not in G:
+        raise nx.NetworkXError("start is not in G")
+
+    idom = {start: start}
+
+    order = list(nx.dfs_postorder_nodes(G, start))
+    dfn = {u: i for i, u in enumerate(order)}
+    order.pop()
+    order.reverse()
+
+    def intersect(u, v):
+        while u != v:
+            while dfn[u] < dfn[v]:
+                u = idom[u]
+            while dfn[u] > dfn[v]:
+                v = idom[v]
+        return u
+
+    changed = True
+    while changed:
+        changed = False
+        for u in order:
+            new_idom = reduce(intersect, (v for v in G.pred[u] if v in idom))
+            if u not in idom or idom[u] != new_idom:
+                idom[u] = new_idom
+                changed = True
+
+    return idom


 @nx._dispatchable
@@ -92,4 +122,14 @@ def dominance_frontiers(G, start):
            A simple, fast dominance algorithm.
            Software Practice & Experience, 4:110, 2001.
     """
-    pass
+    idom = nx.immediate_dominators(G, start)
+
+    df = {u: set() for u in idom}
+    for u in idom:
+        if len(G.pred[u]) >= 2:
+            for v in G.pred[u]:
+                if v in idom:
+                    while v != idom[u]:
+                        df[v].add(u)
+                        v = idom[v]
+    return df
diff --git a/networkx/algorithms/dominating.py b/networkx/algorithms/dominating.py
index bb1ca38c9..8e9a458f8 100644
--- a/networkx/algorithms/dominating.py
+++ b/networkx/algorithms/dominating.py
@@ -1,13 +1,15 @@
 """Functions for computing dominating sets in a graph."""
 from itertools import chain
+
 import networkx as nx
 from networkx.utils import arbitrary_element
-__all__ = ['dominating_set', 'is_dominating_set']
+
+__all__ = ["dominating_set", "is_dominating_set"]


 @nx._dispatchable
 def dominating_set(G, start_with=None):
-    """Finds a dominating set for the graph G.
+    r"""Finds a dominating set for the graph G.

     A *dominating set* for a graph with node set *V* is a subset *D* of
     *V* such that every node not in *D* is adjacent to at least one
@@ -42,7 +44,25 @@ def dominating_set(G, start_with=None):
         http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf

     """
-    pass
+    all_nodes = set(G)
+    if start_with is None:
+        start_with = arbitrary_element(all_nodes)
+    if start_with not in G:
+        raise nx.NetworkXError(f"node {start_with} is not in G")
+    dominating_set = {start_with}
+    dominated_nodes = set(G[start_with])
+    remaining_nodes = all_nodes - dominated_nodes - dominating_set
+    while remaining_nodes:
+        # Choose an arbitrary node and determine its undominated neighbors.
+        v = remaining_nodes.pop()
+        undominated_nbrs = set(G[v]) - dominating_set
+        # Add the node to the dominating set and the neighbors to the
+        # dominated set. Finally, remove all of those nodes from the set
+        # of remaining nodes.
+        dominating_set.add(v)
+        dominated_nodes |= undominated_nbrs
+        remaining_nodes -= undominated_nbrs
+    return dominating_set


 @nx._dispatchable
@@ -69,4 +89,6 @@ def is_dominating_set(G, nbunch):
     .. [1] https://en.wikipedia.org/wiki/Dominating_set

     """
-    pass
+    testset = {n for n in nbunch if n in G}
+    nbrs = set(chain.from_iterable(G[n] for n in testset))
+    return len(set(G) - testset - nbrs) == 0
diff --git a/networkx/algorithms/efficiency_measures.py b/networkx/algorithms/efficiency_measures.py
index 48ec5ddf3..2c99b0114 100644
--- a/networkx/algorithms/efficiency_measures.py
+++ b/networkx/algorithms/efficiency_measures.py
@@ -1,11 +1,14 @@
 """Provides functions for computing the efficiency of nodes and graphs."""
+
 import networkx as nx
 from networkx.exception import NetworkXNoPath
+
 from ..utils import not_implemented_for
-__all__ = ['efficiency', 'local_efficiency', 'global_efficiency']
+
+__all__ = ["efficiency", "local_efficiency", "global_efficiency"]


-@not_implemented_for('directed')
+@not_implemented_for("directed")
 @nx._dispatchable
 def efficiency(G, u, v):
     """Returns the efficiency of a pair of nodes in a graph.
@@ -49,10 +52,14 @@ def efficiency(G, u, v):
            <https://doi.org/10.1103/PhysRevLett.87.198701>

     """
-    pass
+    try:
+        eff = 1 / nx.shortest_path_length(G, u, v)
+    except NetworkXNoPath:
+        eff = 0
+    return eff


-@not_implemented_for('directed')
+@not_implemented_for("directed")
 @nx._dispatchable
 def global_efficiency(G):
     """Returns the average global efficiency of the graph.
@@ -94,10 +101,26 @@ def global_efficiency(G):
            <https://doi.org/10.1103/PhysRevLett.87.198701>

     """
-    pass
-
-
-@not_implemented_for('directed')
+    n = len(G)
+    denom = n * (n - 1)
+    if denom != 0:
+        lengths = nx.all_pairs_shortest_path_length(G)
+        g_eff = 0
+        for source, targets in lengths:
+            for target, distance in targets.items():
+                if distance > 0:
+                    g_eff += 1 / distance
+        g_eff /= denom
+        # g_eff = sum(1 / d for s, tgts in lengths
+        #                   for t, d in tgts.items() if d > 0) / denom
+    else:
+        g_eff = 0
+    # TODO This can be made more efficient by computing all pairs shortest
+    # path lengths in parallel.
+    return g_eff
+
+
+@not_implemented_for("directed")
 @nx._dispatchable
 def local_efficiency(G):
     """Returns the average local efficiency of the graph.
@@ -140,4 +163,6 @@ def local_efficiency(G):
            <https://doi.org/10.1103/PhysRevLett.87.198701>

     """
-    pass
+    # TODO This summation can be trivially parallelized.
+    efficiency_list = (global_efficiency(G.subgraph(G[v])) for v in G)
+    return sum(efficiency_list) / len(G)
diff --git a/networkx/algorithms/euler.py b/networkx/algorithms/euler.py
index 032874645..2e0e57358 100644
--- a/networkx/algorithms/euler.py
+++ b/networkx/algorithms/euler.py
@@ -2,10 +2,19 @@
 Eulerian circuits and graphs.
 """
 from itertools import combinations
+
 import networkx as nx
+
 from ..utils import arbitrary_element, not_implemented_for
-__all__ = ['is_eulerian', 'eulerian_circuit', 'eulerize', 'is_semieulerian',
-    'has_eulerian_path', 'eulerian_path']
+
+__all__ = [
+    "is_eulerian",
+    "eulerian_circuit",
+    "eulerize",
+    "is_semieulerian",
+    "has_eulerian_path",
+    "eulerian_path",
+]


 @nx._dispatchable
@@ -49,7 +58,15 @@ def is_eulerian(G):


     """
-    pass
+    if G.is_directed():
+        # Every node must have equal in degree and out degree and the
+        # graph must be strongly connected
+        return all(
+            G.in_degree(n) == G.out_degree(n) for n in G
+        ) and nx.is_strongly_connected(G)
+    # An undirected Eulerian graph has no vertices of odd degree and
+    # must be connected.
+    return all(d % 2 == 0 for v, d in G.degree()) and nx.is_connected(G)


 @nx._dispatchable
@@ -63,7 +80,7 @@ def is_semieulerian(G):
     has_eulerian_path
     is_eulerian
     """
-    pass
+    return has_eulerian_path(G) and not is_eulerian(G)


 def _find_path_start(G):
@@ -71,7 +88,70 @@ def _find_path_start(G):

     If no path exists, return None.
     """
-    pass
+    if not has_eulerian_path(G):
+        return None
+
+    if is_eulerian(G):
+        return arbitrary_element(G)
+
+    if G.is_directed():
+        v1, v2 = (v for v in G if G.in_degree(v) != G.out_degree(v))
+        # Determines which is the 'start' node (as opposed to the 'end')
+        if G.out_degree(v1) > G.in_degree(v1):
+            return v1
+        else:
+            return v2
+
+    else:
+        # In an undirected graph randomly choose one of the possibilities
+        start = [v for v in G if G.degree(v) % 2 != 0][0]
+        return start
+
+
+def _simplegraph_eulerian_circuit(G, source):
+    if G.is_directed():
+        degree = G.out_degree
+        edges = G.out_edges
+    else:
+        degree = G.degree
+        edges = G.edges
+    vertex_stack = [source]
+    last_vertex = None
+    while vertex_stack:
+        current_vertex = vertex_stack[-1]
+        if degree(current_vertex) == 0:
+            if last_vertex is not None:
+                yield (last_vertex, current_vertex)
+            last_vertex = current_vertex
+            vertex_stack.pop()
+        else:
+            _, next_vertex = arbitrary_element(edges(current_vertex))
+            vertex_stack.append(next_vertex)
+            G.remove_edge(current_vertex, next_vertex)
+
+
+def _multigraph_eulerian_circuit(G, source):
+    if G.is_directed():
+        degree = G.out_degree
+        edges = G.out_edges
+    else:
+        degree = G.degree
+        edges = G.edges
+    vertex_stack = [(source, None)]
+    last_vertex = None
+    last_key = None
+    while vertex_stack:
+        current_vertex, current_key = vertex_stack[-1]
+        if degree(current_vertex) == 0:
+            if last_vertex is not None:
+                yield (last_vertex, current_vertex, last_key)
+            last_vertex, last_key = current_vertex, current_key
+            vertex_stack.pop()
+        else:
+            triple = arbitrary_element(edges(current_vertex, keys=True))
+            _, next_vertex, next_key = triple
+            vertex_stack.append((next_vertex, next_key))
+            G.remove_edge(current_vertex, next_vertex, next_key)


 @nx._dispatchable
@@ -137,7 +217,22 @@ def eulerian_circuit(G, source=None, keys=False):
         [0, 2, 1]

     """
-    pass
+    if not is_eulerian(G):
+        raise nx.NetworkXError("G is not Eulerian.")
+    if G.is_directed():
+        G = G.reverse()
+    else:
+        G = G.copy()
+    if source is None:
+        source = arbitrary_element(G)
+    if G.is_multigraph():
+        for u, v, k in _multigraph_eulerian_circuit(G, source):
+            if keys:
+                yield u, v, k
+            else:
+                yield u, v
+    else:
+        yield from _simplegraph_eulerian_circuit(G, source)


 @nx._dispatchable
@@ -203,7 +298,36 @@ def has_eulerian_path(G, source=None):
     is_eulerian
     eulerian_path
     """
-    pass
+    if nx.is_eulerian(G):
+        return True
+
+    if G.is_directed():
+        ins = G.in_degree
+        outs = G.out_degree
+        # Since we know it is not eulerian, outs - ins must be 1 for source
+        if source is not None and outs[source] - ins[source] != 1:
+            return False
+
+        unbalanced_ins = 0
+        unbalanced_outs = 0
+        for v in G:
+            if ins[v] - outs[v] == 1:
+                unbalanced_ins += 1
+            elif outs[v] - ins[v] == 1:
+                unbalanced_outs += 1
+            elif ins[v] != outs[v]:
+                return False
+
+        return (
+            unbalanced_ins <= 1 and unbalanced_outs <= 1 and nx.is_weakly_connected(G)
+        )
+    else:
+        # We know it is not eulerian, so degree of source must be odd.
+        if source is not None and G.degree[source] % 2 != 1:
+            return False
+
+        # Sum is 2 since we know it is not eulerian (which implies sum is 0)
+        return sum(d % 2 == 1 for v, d in G.degree()) == 2 and nx.is_connected(G)


 @nx._dispatchable
@@ -228,10 +352,40 @@ def eulerian_path(G, source=None, keys=False):
     Warning: If `source` provided is not the start node of an Euler path
     will raise error even if an Euler Path exists.
     """
-    pass
-
-
-@not_implemented_for('directed')
+    if not has_eulerian_path(G, source):
+        raise nx.NetworkXError("Graph has no Eulerian paths.")
+    if G.is_directed():
+        G = G.reverse()
+        if source is None or nx.is_eulerian(G) is False:
+            source = _find_path_start(G)
+        if G.is_multigraph():
+            for u, v, k in _multigraph_eulerian_circuit(G, source):
+                if keys:
+                    yield u, v, k
+                else:
+                    yield u, v
+        else:
+            yield from _simplegraph_eulerian_circuit(G, source)
+    else:
+        G = G.copy()
+        if source is None:
+            source = _find_path_start(G)
+        if G.is_multigraph():
+            if keys:
+                yield from reversed(
+                    [(v, u, k) for u, v, k in _multigraph_eulerian_circuit(G, source)]
+                )
+            else:
+                yield from reversed(
+                    [(v, u) for u, v, k in _multigraph_eulerian_circuit(G, source)]
+                )
+        else:
+            yield from reversed(
+                [(v, u) for u, v in _simplegraph_eulerian_circuit(G, source)]
+            )
+
+
+@not_implemented_for("directed")
 @nx._dispatchable(returns_graph=True)
 def eulerize(G):
     """Transforms a graph into an Eulerian graph.
@@ -274,4 +428,42 @@ def eulerize(G):
         True

     """
-    pass
+    if G.order() == 0:
+        raise nx.NetworkXPointlessConcept("Cannot Eulerize null graph")
+    if not nx.is_connected(G):
+        raise nx.NetworkXError("G is not connected")
+    odd_degree_nodes = [n for n, d in G.degree() if d % 2 == 1]
+    G = nx.MultiGraph(G)
+    if len(odd_degree_nodes) == 0:
+        return G
+
+    # get all shortest paths between vertices of odd degree
+    odd_deg_pairs_paths = [
+        (m, {n: nx.shortest_path(G, source=m, target=n)})
+        for m, n in combinations(odd_degree_nodes, 2)
+    ]
+
+    # use the number of vertices in a graph + 1 as an upper bound on
+    # the maximum length of a path in G
+    upper_bound_on_max_path_length = len(G) + 1
+
+    # use "len(G) + 1 - len(P)",
+    # where P is a shortest path between vertices n and m,
+    # as edge-weights in a new graph
+    # store the paths in the graph for easy indexing later
+    Gp = nx.Graph()
+    for n, Ps in odd_deg_pairs_paths:
+        for m, P in Ps.items():
+            if n != m:
+                Gp.add_edge(
+                    m, n, weight=upper_bound_on_max_path_length - len(P), path=P
+                )
+
+    # find the minimum weight matching of edges in the weighted graph
+    best_matching = nx.Graph(list(nx.max_weight_matching(Gp)))
+
+    # duplicate each edge along each path in the set of paths in Gp
+    for m, n in best_matching.edges():
+        path = Gp[m][n]["path"]
+        G.add_edges_from(nx.utils.pairwise(path))
+    return G
diff --git a/networkx/algorithms/flow/boykovkolmogorov.py b/networkx/algorithms/flow/boykovkolmogorov.py
index 749df42e4..87290a928 100644
--- a/networkx/algorithms/flow/boykovkolmogorov.py
+++ b/networkx/algorithms/flow/boykovkolmogorov.py
@@ -3,15 +3,18 @@ Boykov-Kolmogorov algorithm for maximum flow problems.
 """
 from collections import deque
 from operator import itemgetter
+
 import networkx as nx
 from networkx.algorithms.flow.utils import build_residual_network
-__all__ = ['boykov_kolmogorov']
+
+__all__ = ["boykov_kolmogorov"]


-@nx._dispatchable(edge_attrs={'capacity': float('inf')}, returns_graph=True)
-def boykov_kolmogorov(G, s, t, capacity='capacity', residual=None,
-    value_only=False, cutoff=None):
-    """Find a maximum single-commodity flow using Boykov-Kolmogorov algorithm.
+@nx._dispatchable(edge_attrs={"capacity": float("inf")}, returns_graph=True)
+def boykov_kolmogorov(
+    G, s, t, capacity="capacity", residual=None, value_only=False, cutoff=None
+):
+    r"""Find a maximum single-commodity flow using Boykov-Kolmogorov algorithm.

     This function returns the residual network resulting after computing
     the maximum flow. See below for details about the conventions
@@ -151,4 +154,216 @@ def boykov_kolmogorov(G, s, t, capacity='capacity', residual=None,
            https://web.archive.org/web/20170809091249/https://pub.ist.ac.at/~vnk/papers/thesis.pdf

     """
-    pass
+    R = boykov_kolmogorov_impl(G, s, t, capacity, residual, cutoff)
+    R.graph["algorithm"] = "boykov_kolmogorov"
+    nx._clear_cache(R)
+    return R
+
+
+def boykov_kolmogorov_impl(G, s, t, capacity, residual, cutoff):
+    if s not in G:
+        raise nx.NetworkXError(f"node {str(s)} not in graph")
+    if t not in G:
+        raise nx.NetworkXError(f"node {str(t)} not in graph")
+    if s == t:
+        raise nx.NetworkXError("source and sink are the same node")
+
+    if residual is None:
+        R = build_residual_network(G, capacity)
+    else:
+        R = residual
+
+    # Initialize/reset the residual network.
+    # This is way too slow
+    # nx.set_edge_attributes(R, 0, 'flow')
+    for u in R:
+        for e in R[u].values():
+            e["flow"] = 0
+
+    # Use an arbitrary high value as infinite. It is computed
+    # when building the residual network.
+    INF = R.graph["inf"]
+
+    if cutoff is None:
+        cutoff = INF
+
+    R_succ = R.succ
+    R_pred = R.pred
+
+    def grow():
+        """Bidirectional breadth-first search for the growth stage.
+
+        Returns a connecting edge, that is and edge that connects
+        a node from the source search tree with a node from the
+        target search tree.
+        The first node in the connecting edge is always from the
+        source tree and the last node from the target tree.
+        """
+        while active:
+            u = active[0]
+            if u in source_tree:
+                this_tree = source_tree
+                other_tree = target_tree
+                neighbors = R_succ
+            else:
+                this_tree = target_tree
+                other_tree = source_tree
+                neighbors = R_pred
+            for v, attr in neighbors[u].items():
+                if attr["capacity"] - attr["flow"] > 0:
+                    if v not in this_tree:
+                        if v in other_tree:
+                            return (u, v) if this_tree is source_tree else (v, u)
+                        this_tree[v] = u
+                        dist[v] = dist[u] + 1
+                        timestamp[v] = timestamp[u]
+                        active.append(v)
+                    elif v in this_tree and _is_closer(u, v):
+                        this_tree[v] = u
+                        dist[v] = dist[u] + 1
+                        timestamp[v] = timestamp[u]
+            _ = active.popleft()
+        return None, None
+
+    def augment(u, v):
+        """Augmentation stage.
+
+        Reconstruct path and determine its residual capacity.
+        We start from a connecting edge, which links a node
+        from the source tree to a node from the target tree.
+        The connecting edge is the output of the grow function
+        and the input of this function.
+        """
+        attr = R_succ[u][v]
+        flow = min(INF, attr["capacity"] - attr["flow"])
+        path = [u]
+        # Trace a path from u to s in source_tree.
+        w = u
+        while w != s:
+            n = w
+            w = source_tree[n]
+            attr = R_pred[n][w]
+            flow = min(flow, attr["capacity"] - attr["flow"])
+            path.append(w)
+        path.reverse()
+        # Trace a path from v to t in target_tree.
+        path.append(v)
+        w = v
+        while w != t:
+            n = w
+            w = target_tree[n]
+            attr = R_succ[n][w]
+            flow = min(flow, attr["capacity"] - attr["flow"])
+            path.append(w)
+        # Augment flow along the path and check for saturated edges.
+        it = iter(path)
+        u = next(it)
+        these_orphans = []
+        for v in it:
+            R_succ[u][v]["flow"] += flow
+            R_succ[v][u]["flow"] -= flow
+            if R_succ[u][v]["flow"] == R_succ[u][v]["capacity"]:
+                if v in source_tree:
+                    source_tree[v] = None
+                    these_orphans.append(v)
+                if u in target_tree:
+                    target_tree[u] = None
+                    these_orphans.append(u)
+            u = v
+        orphans.extend(sorted(these_orphans, key=dist.get))
+        return flow
+
+    def adopt():
+        """Adoption stage.
+
+        Reconstruct search trees by adopting or discarding orphans.
+        During augmentation stage some edges got saturated and thus
+        the source and target search trees broke down to forests, with
+        orphans as roots of some of its trees. We have to reconstruct
+        the search trees rooted to source and target before we can grow
+        them again.
+        """
+        while orphans:
+            u = orphans.popleft()
+            if u in source_tree:
+                tree = source_tree
+                neighbors = R_pred
+            else:
+                tree = target_tree
+                neighbors = R_succ
+            nbrs = ((n, attr, dist[n]) for n, attr in neighbors[u].items() if n in tree)
+            for v, attr, d in sorted(nbrs, key=itemgetter(2)):
+                if attr["capacity"] - attr["flow"] > 0:
+                    if _has_valid_root(v, tree):
+                        tree[u] = v
+                        dist[u] = dist[v] + 1
+                        timestamp[u] = time
+                        break
+            else:
+                nbrs = (
+                    (n, attr, dist[n]) for n, attr in neighbors[u].items() if n in tree
+                )
+                for v, attr, d in sorted(nbrs, key=itemgetter(2)):
+                    if attr["capacity"] - attr["flow"] > 0:
+                        if v not in active:
+                            active.append(v)
+                    if tree[v] == u:
+                        tree[v] = None
+                        orphans.appendleft(v)
+                if u in active:
+                    active.remove(u)
+                del tree[u]
+
+    def _has_valid_root(n, tree):
+        path = []
+        v = n
+        while v is not None:
+            path.append(v)
+            if v in (s, t):
+                base_dist = 0
+                break
+            elif timestamp[v] == time:
+                base_dist = dist[v]
+                break
+            v = tree[v]
+        else:
+            return False
+        length = len(path)
+        for i, u in enumerate(path, 1):
+            dist[u] = base_dist + length - i
+            timestamp[u] = time
+        return True
+
+    def _is_closer(u, v):
+        return timestamp[v] <= timestamp[u] and dist[v] > dist[u] + 1
+
+    source_tree = {s: None}
+    target_tree = {t: None}
+    active = deque([s, t])
+    orphans = deque()
+    flow_value = 0
+    # data structures for the marking heuristic
+    time = 1
+    timestamp = {s: time, t: time}
+    dist = {s: 0, t: 0}
+    while flow_value < cutoff:
+        # Growth stage
+        u, v = grow()
+        if u is None:
+            break
+        time += 1
+        # Augmentation stage
+        flow_value += augment(u, v)
+        # Adoption stage
+        adopt()
+
+    if flow_value * 2 > INF:
+        raise nx.NetworkXUnbounded("Infinite capacity path, flow unbounded above.")
+
+    # Add source and target tree in a graph attribute.
+    # A partition that defines a minimum cut can be directly
+    # computed from the search trees as explained in the docstrings.
+    R.graph["trees"] = (source_tree, target_tree)
+    # Add the standard flow_value graph attribute.
+    R.graph["flow_value"] = flow_value
+    return R
diff --git a/networkx/algorithms/flow/capacityscaling.py b/networkx/algorithms/flow/capacityscaling.py
index e05a60b82..bf68565c5 100644
--- a/networkx/algorithms/flow/capacityscaling.py
+++ b/networkx/algorithms/flow/capacityscaling.py
@@ -1,34 +1,161 @@
 """
 Capacity scaling minimum cost flow algorithm.
 """
-__all__ = ['capacity_scaling']
+
+__all__ = ["capacity_scaling"]
+
 from itertools import chain
 from math import log
+
 import networkx as nx
+
 from ...utils import BinaryHeap, arbitrary_element, not_implemented_for


 def _detect_unboundedness(R):
     """Detect infinite-capacity negative cycles."""
-    pass
+    G = nx.DiGraph()
+    G.add_nodes_from(R)
+
+    # Value simulating infinity.
+    inf = R.graph["inf"]
+    # True infinity.
+    f_inf = float("inf")
+    for u in R:
+        for v, e in R[u].items():
+            # Compute the minimum weight of infinite-capacity (u, v) edges.
+            w = f_inf
+            for k, e in e.items():
+                if e["capacity"] == inf:
+                    w = min(w, e["weight"])
+            if w != f_inf:
+                G.add_edge(u, v, weight=w)

+    if nx.negative_edge_cycle(G):
+        raise nx.NetworkXUnbounded(
+            "Negative cost cycle of infinite capacity found. "
+            "Min cost flow may be unbounded below."
+        )

-@not_implemented_for('undirected')
+
+@not_implemented_for("undirected")
 def _build_residual_network(G, demand, capacity, weight):
     """Build a residual network and initialize a zero flow."""
-    pass
+    if sum(G.nodes[u].get(demand, 0) for u in G) != 0:
+        raise nx.NetworkXUnfeasible("Sum of the demands should be 0.")
+
+    R = nx.MultiDiGraph()
+    R.add_nodes_from(
+        (u, {"excess": -G.nodes[u].get(demand, 0), "potential": 0}) for u in G
+    )
+
+    inf = float("inf")
+    # Detect selfloops with infinite capacities and negative weights.
+    for u, v, e in nx.selfloop_edges(G, data=True):
+        if e.get(weight, 0) < 0 and e.get(capacity, inf) == inf:
+            raise nx.NetworkXUnbounded(
+                "Negative cost cycle of infinite capacity found. "
+                "Min cost flow may be unbounded below."
+            )
+
+    # Extract edges with positive capacities. Self loops excluded.
+    if G.is_multigraph():
+        edge_list = [
+            (u, v, k, e)
+            for u, v, k, e in G.edges(data=True, keys=True)
+            if u != v and e.get(capacity, inf) > 0
+        ]
+    else:
+        edge_list = [
+            (u, v, 0, e)
+            for u, v, e in G.edges(data=True)
+            if u != v and e.get(capacity, inf) > 0
+        ]
+    # Simulate infinity with the larger of the sum of absolute node imbalances
+    # the sum of finite edge capacities or any positive value if both sums are
+    # zero. This allows the infinite-capacity edges to be distinguished for
+    # unboundedness detection and directly participate in residual capacity
+    # calculation.
+    inf = (
+        max(
+            sum(abs(R.nodes[u]["excess"]) for u in R),
+            2
+            * sum(
+                e[capacity]
+                for u, v, k, e in edge_list
+                if capacity in e and e[capacity] != inf
+            ),
+        )
+        or 1
+    )
+    for u, v, k, e in edge_list:
+        r = min(e.get(capacity, inf), inf)
+        w = e.get(weight, 0)
+        # Add both (u, v) and (v, u) into the residual network marked with the
+        # original key. (key[1] == True) indicates the (u, v) is in the
+        # original network.
+        R.add_edge(u, v, key=(k, True), capacity=r, weight=w, flow=0)
+        R.add_edge(v, u, key=(k, False), capacity=0, weight=-w, flow=0)
+
+    # Record the value simulating infinity.
+    R.graph["inf"] = inf
+
+    _detect_unboundedness(R)
+
+    return R


 def _build_flow_dict(G, R, capacity, weight):
     """Build a flow dictionary from a residual network."""
-    pass
+    inf = float("inf")
+    flow_dict = {}
+    if G.is_multigraph():
+        for u in G:
+            flow_dict[u] = {}
+            for v, es in G[u].items():
+                flow_dict[u][v] = {
+                    # Always saturate negative selfloops.
+                    k: (
+                        0
+                        if (
+                            u != v or e.get(capacity, inf) <= 0 or e.get(weight, 0) >= 0
+                        )
+                        else e[capacity]
+                    )
+                    for k, e in es.items()
+                }
+            for v, es in R[u].items():
+                if v in flow_dict[u]:
+                    flow_dict[u][v].update(
+                        (k[0], e["flow"]) for k, e in es.items() if e["flow"] > 0
+                    )
+    else:
+        for u in G:
+            flow_dict[u] = {
+                # Always saturate negative selfloops.
+                v: (
+                    0
+                    if (u != v or e.get(capacity, inf) <= 0 or e.get(weight, 0) >= 0)
+                    else e[capacity]
+                )
+                for v, e in G[u].items()
+            }
+            flow_dict[u].update(
+                (v, e["flow"])
+                for v, es in R[u].items()
+                for e in es.values()
+                if e["flow"] > 0
+            )
+    return flow_dict


-@nx._dispatchable(node_attrs='demand', edge_attrs={'capacity': float('inf'),
-    'weight': 0})
-def capacity_scaling(G, demand='demand', capacity='capacity', weight=
-    'weight', heap=BinaryHeap):
-    """Find a minimum cost flow satisfying all demands in digraph G.
+@nx._dispatchable(
+    node_attrs="demand", edge_attrs={"capacity": float("inf"), "weight": 0}
+)
+def capacity_scaling(
+    G, demand="demand", capacity="capacity", weight="weight", heap=BinaryHeap
+):
+    r"""Find a minimum cost flow satisfying all demands in digraph G.

     This is a capacity scaling successive shortest augmenting path algorithm.

@@ -155,4 +282,126 @@ def capacity_scaling(G, demand='demand', capacity='capacity', weight=
     >>> flowDict
     {'p': {'q': 2, 'a': 2}, 'q': {'d': 1}, 'a': {'t': 4}, 'd': {'w': 2}, 't': {'q': 1, 'w': 1}, 'w': {}}
     """
-    pass
+    R = _build_residual_network(G, demand, capacity, weight)
+
+    inf = float("inf")
+    # Account cost of negative selfloops.
+    flow_cost = sum(
+        0
+        if e.get(capacity, inf) <= 0 or e.get(weight, 0) >= 0
+        else e[capacity] * e[weight]
+        for u, v, e in nx.selfloop_edges(G, data=True)
+    )
+
+    # Determine the maximum edge capacity.
+    wmax = max(chain([-inf], (e["capacity"] for u, v, e in R.edges(data=True))))
+    if wmax == -inf:
+        # Residual network has no edges.
+        return flow_cost, _build_flow_dict(G, R, capacity, weight)
+
+    R_nodes = R.nodes
+    R_succ = R.succ
+
+    delta = 2 ** int(log(wmax, 2))
+    while delta >= 1:
+        # Saturate Δ-residual edges with negative reduced costs to achieve
+        # Δ-optimality.
+        for u in R:
+            p_u = R_nodes[u]["potential"]
+            for v, es in R_succ[u].items():
+                for k, e in es.items():
+                    flow = e["capacity"] - e["flow"]
+                    if e["weight"] - p_u + R_nodes[v]["potential"] < 0:
+                        flow = e["capacity"] - e["flow"]
+                        if flow >= delta:
+                            e["flow"] += flow
+                            R_succ[v][u][(k[0], not k[1])]["flow"] -= flow
+                            R_nodes[u]["excess"] -= flow
+                            R_nodes[v]["excess"] += flow
+        # Determine the Δ-active nodes.
+        S = set()
+        T = set()
+        S_add = S.add
+        S_remove = S.remove
+        T_add = T.add
+        T_remove = T.remove
+        for u in R:
+            excess = R_nodes[u]["excess"]
+            if excess >= delta:
+                S_add(u)
+            elif excess <= -delta:
+                T_add(u)
+        # Repeatedly augment flow from S to T along shortest paths until
+        # Δ-feasibility is achieved.
+        while S and T:
+            s = arbitrary_element(S)
+            t = None
+            # Search for a shortest path in terms of reduce costs from s to
+            # any t in T in the Δ-residual network.
+            d = {}
+            pred = {s: None}
+            h = heap()
+            h_insert = h.insert
+            h_get = h.get
+            h_insert(s, 0)
+            while h:
+                u, d_u = h.pop()
+                d[u] = d_u
+                if u in T:
+                    # Path found.
+                    t = u
+                    break
+                p_u = R_nodes[u]["potential"]
+                for v, es in R_succ[u].items():
+                    if v in d:
+                        continue
+                    wmin = inf
+                    # Find the minimum-weighted (u, v) Δ-residual edge.
+                    for k, e in es.items():
+                        if e["capacity"] - e["flow"] >= delta:
+                            w = e["weight"]
+                            if w < wmin:
+                                wmin = w
+                                kmin = k
+                                emin = e
+                    if wmin == inf:
+                        continue
+                    # Update the distance label of v.
+                    d_v = d_u + wmin - p_u + R_nodes[v]["potential"]
+                    if h_insert(v, d_v):
+                        pred[v] = (u, kmin, emin)
+            if t is not None:
+                # Augment Δ units of flow from s to t.
+                while u != s:
+                    v = u
+                    u, k, e = pred[v]
+                    e["flow"] += delta
+                    R_succ[v][u][(k[0], not k[1])]["flow"] -= delta
+                # Account node excess and deficit.
+                R_nodes[s]["excess"] -= delta
+                R_nodes[t]["excess"] += delta
+                if R_nodes[s]["excess"] < delta:
+                    S_remove(s)
+                if R_nodes[t]["excess"] > -delta:
+                    T_remove(t)
+                # Update node potentials.
+                d_t = d[t]
+                for u, d_u in d.items():
+                    R_nodes[u]["potential"] -= d_u - d_t
+            else:
+                # Path not found.
+                S_remove(s)
+        delta //= 2
+
+    if any(R.nodes[u]["excess"] != 0 for u in R):
+        raise nx.NetworkXUnfeasible("No flow satisfying all demands.")
+
+    # Calculate the flow cost.
+    for u in R:
+        for v, es in R_succ[u].items():
+            for e in es.values():
+                flow = e["flow"]
+                if flow > 0:
+                    flow_cost += flow * e["weight"]
+
+    return flow_cost, _build_flow_dict(G, R, capacity, weight)
diff --git a/networkx/algorithms/flow/dinitz_alg.py b/networkx/algorithms/flow/dinitz_alg.py
index 533a4a45a..bcc08fe48 100644
--- a/networkx/algorithms/flow/dinitz_alg.py
+++ b/networkx/algorithms/flow/dinitz_alg.py
@@ -2,15 +2,16 @@
 Dinitz' algorithm for maximum flow problems.
 """
 from collections import deque
+
 import networkx as nx
 from networkx.algorithms.flow.utils import build_residual_network
 from networkx.utils import pairwise
-__all__ = ['dinitz']
+
+__all__ = ["dinitz"]


-@nx._dispatchable(edge_attrs={'capacity': float('inf')}, returns_graph=True)
-def dinitz(G, s, t, capacity='capacity', residual=None, value_only=False,
-    cutoff=None):
+@nx._dispatchable(edge_attrs={"capacity": float("inf")}, returns_graph=True)
+def dinitz(G, s, t, capacity="capacity", residual=None, value_only=False, cutoff=None):
     """Find a maximum single-commodity flow using Dinitz' algorithm.

     This function returns the residual network resulting after computing
@@ -132,4 +133,105 @@ def dinitz(G, s, t, capacity='capacity', residual=None, value_only=False,
            https://doi.org/10.1007/11685654_10

     """
-    pass
+    R = dinitz_impl(G, s, t, capacity, residual, cutoff)
+    R.graph["algorithm"] = "dinitz"
+    nx._clear_cache(R)
+    return R
+
+
+def dinitz_impl(G, s, t, capacity, residual, cutoff):
+    if s not in G:
+        raise nx.NetworkXError(f"node {str(s)} not in graph")
+    if t not in G:
+        raise nx.NetworkXError(f"node {str(t)} not in graph")
+    if s == t:
+        raise nx.NetworkXError("source and sink are the same node")
+
+    if residual is None:
+        R = build_residual_network(G, capacity)
+    else:
+        R = residual
+
+    # Initialize/reset the residual network.
+    for u in R:
+        for e in R[u].values():
+            e["flow"] = 0
+
+    # Use an arbitrary high value as infinite. It is computed
+    # when building the residual network.
+    INF = R.graph["inf"]
+
+    if cutoff is None:
+        cutoff = INF
+
+    R_succ = R.succ
+    R_pred = R.pred
+
+    def breath_first_search():
+        parents = {}
+        vertex_dist = {s: 0}
+        queue = deque([(s, 0)])
+        # Record all the potential edges of shortest augmenting paths
+        while queue:
+            if t in parents:
+                break
+            u, dist = queue.popleft()
+            for v, attr in R_succ[u].items():
+                if attr["capacity"] - attr["flow"] > 0:
+                    if v in parents:
+                        if vertex_dist[v] == dist + 1:
+                            parents[v].append(u)
+                    else:
+                        parents[v] = deque([u])
+                        vertex_dist[v] = dist + 1
+                        queue.append((v, dist + 1))
+        return parents
+
+    def depth_first_search(parents):
+        # DFS to find all the shortest augmenting paths
+        """Build a path using DFS starting from the sink"""
+        total_flow = 0
+        u = t
+        # path also functions as a stack
+        path = [u]
+        # The loop ends with no augmenting path left in the layered graph
+        while True:
+            if len(parents[u]) > 0:
+                v = parents[u][0]
+                path.append(v)
+            else:
+                path.pop()
+                if len(path) == 0:
+                    break
+                v = path[-1]
+                parents[v].popleft()
+            # Augment the flow along the path found
+            if v == s:
+                flow = INF
+                for u, v in pairwise(path):
+                    flow = min(flow, R_pred[u][v]["capacity"] - R_pred[u][v]["flow"])
+                for u, v in pairwise(reversed(path)):
+                    R_pred[v][u]["flow"] += flow
+                    R_pred[u][v]["flow"] -= flow
+                    # Find the proper node to continue the search
+                    if R_pred[v][u]["capacity"] - R_pred[v][u]["flow"] == 0:
+                        parents[v].popleft()
+                        while path[-1] != v:
+                            path.pop()
+                total_flow += flow
+                v = path[-1]
+            u = v
+        return total_flow
+
+    flow_value = 0
+    while flow_value < cutoff:
+        parents = breath_first_search()
+        if t not in parents:
+            break
+        this_flow = depth_first_search(parents)
+        if this_flow * 2 > INF:
+            raise nx.NetworkXUnbounded("Infinite capacity path, flow unbounded above.")
+        flow_value += this_flow
+
+    R.graph["flow_value"] = flow_value
+    return R
diff --git a/networkx/algorithms/flow/edmondskarp.py b/networkx/algorithms/flow/edmondskarp.py
index d89ce70a0..500632683 100644
--- a/networkx/algorithms/flow/edmondskarp.py
+++ b/networkx/algorithms/flow/edmondskarp.py
@@ -1,24 +1,126 @@
 """
 Edmonds-Karp algorithm for maximum flow problems.
 """
+
 import networkx as nx
 from networkx.algorithms.flow.utils import build_residual_network
-__all__ = ['edmonds_karp']
+
+__all__ = ["edmonds_karp"]


 def edmonds_karp_core(R, s, t, cutoff):
     """Implementation of the Edmonds-Karp algorithm."""
-    pass
+    R_nodes = R.nodes
+    R_pred = R.pred
+    R_succ = R.succ
+
+    inf = R.graph["inf"]
+
+    def augment(path):
+        """Augment flow along a path from s to t."""
+        # Determine the path residual capacity.
+        flow = inf
+        it = iter(path)
+        u = next(it)
+        for v in it:
+            attr = R_succ[u][v]
+            flow = min(flow, attr["capacity"] - attr["flow"])
+            u = v
+        if flow * 2 > inf:
+            raise nx.NetworkXUnbounded("Infinite capacity path, flow unbounded above.")
+        # Augment flow along the path.
+        it = iter(path)
+        u = next(it)
+        for v in it:
+            R_succ[u][v]["flow"] += flow
+            R_succ[v][u]["flow"] -= flow
+            u = v
+        return flow
+
+    def bidirectional_bfs():
+        """Bidirectional breadth-first search for an augmenting path."""
+        pred = {s: None}
+        q_s = [s]
+        succ = {t: None}
+        q_t = [t]
+        while True:
+            q = []
+            if len(q_s) <= len(q_t):
+                for u in q_s:
+                    for v, attr in R_succ[u].items():
+                        if v not in pred and attr["flow"] < attr["capacity"]:
+                            pred[v] = u
+                            if v in succ:
+                                return v, pred, succ
+                            q.append(v)
+                if not q:
+                    return None, None, None
+                q_s = q
+            else:
+                for u in q_t:
+                    for v, attr in R_pred[u].items():
+                        if v not in succ and attr["flow"] < attr["capacity"]:
+                            succ[v] = u
+                            if v in pred:
+                                return v, pred, succ
+                            q.append(v)
+                if not q:
+                    return None, None, None
+                q_t = q
+
+    # Look for shortest augmenting paths using breadth-first search.
+    flow_value = 0
+    while flow_value < cutoff:
+        v, pred, succ = bidirectional_bfs()
+        if pred is None:
+            break
+        path = [v]
+        # Trace a path from s to v.
+        u = v
+        while u != s:
+            u = pred[u]
+            path.append(u)
+        path.reverse()
+        # Trace a path from v to t.
+        u = v
+        while u != t:
+            u = succ[u]
+            path.append(u)
+        flow_value += augment(path)
+
+    return flow_value


 def edmonds_karp_impl(G, s, t, capacity, residual, cutoff):
     """Implementation of the Edmonds-Karp algorithm."""
-    pass
-
-
-@nx._dispatchable(edge_attrs={'capacity': float('inf')}, returns_graph=True)
-def edmonds_karp(G, s, t, capacity='capacity', residual=None, value_only=
-    False, cutoff=None):
+    if s not in G:
+        raise nx.NetworkXError(f"node {str(s)} not in graph")
+    if t not in G:
+        raise nx.NetworkXError(f"node {str(t)} not in graph")
+    if s == t:
+        raise nx.NetworkXError("source and sink are the same node")
+
+    if residual is None:
+        R = build_residual_network(G, capacity)
+    else:
+        R = residual
+
+    # Initialize/reset the residual network.
+    for u in R:
+        for e in R[u].values():
+            e["flow"] = 0
+
+    if cutoff is None:
+        cutoff = float("inf")
+    R.graph["flow_value"] = edmonds_karp_core(R, s, t, cutoff)
+
+    return R
+
+
+@nx._dispatchable(edge_attrs={"capacity": float("inf")}, returns_graph=True)
+def edmonds_karp(
+    G, s, t, capacity="capacity", residual=None, value_only=False, cutoff=None
+):
     """Find a maximum single-commodity flow using the Edmonds-Karp algorithm.

     This function returns the residual network resulting after computing
@@ -133,4 +235,7 @@ def edmonds_karp(G, s, t, capacity='capacity', residual=None, value_only=
     True

     """
-    pass
+    R = edmonds_karp_impl(G, s, t, capacity, residual, cutoff)
+    R.graph["algorithm"] = "edmonds_karp"
+    nx._clear_cache(R)
+    return R
diff --git a/networkx/algorithms/flow/gomory_hu.py b/networkx/algorithms/flow/gomory_hu.py
index 0a43fa7cb..951abaeb5 100644
--- a/networkx/algorithms/flow/gomory_hu.py
+++ b/networkx/algorithms/flow/gomory_hu.py
@@ -3,16 +3,19 @@ Gomory-Hu tree of undirected Graphs.
 """
 import networkx as nx
 from networkx.utils import not_implemented_for
+
 from .edmondskarp import edmonds_karp
 from .utils import build_residual_network
+
 default_flow_func = edmonds_karp
-__all__ = ['gomory_hu_tree']
+
+__all__ = ["gomory_hu_tree"]


-@not_implemented_for('directed')
-@nx._dispatchable(edge_attrs={'capacity': float('inf')}, returns_graph=True)
-def gomory_hu_tree(G, capacity='capacity', flow_func=None):
-    """Returns the Gomory-Hu tree of an undirected graph G.
+@not_implemented_for("directed")
+@nx._dispatchable(edge_attrs={"capacity": float("inf")}, returns_graph=True)
+def gomory_hu_tree(G, capacity="capacity", flow_func=None):
+    r"""Returns the Gomory-Hu tree of an undirected graph G.

     A Gomory-Hu tree of an undirected graph with capacities is a
     weighted tree that represents the minimum s-t cuts for all s-t
@@ -127,4 +130,48 @@ def gomory_hu_tree(G, capacity='capacity', flow_func=None):
            SIAM J Comput 19(1):143-155, 1990.

     """
-    pass
+    if flow_func is None:
+        flow_func = default_flow_func
+
+    if len(G) == 0:  # empty graph
+        msg = "Empty Graph does not have a Gomory-Hu tree representation"
+        raise nx.NetworkXError(msg)
+
+    # Start the tree as a star graph with an arbitrary node at the center
+    tree = {}
+    labels = {}
+    iter_nodes = iter(G)
+    root = next(iter_nodes)
+    for n in iter_nodes:
+        tree[n] = root
+
+    # Reuse residual network
+    R = build_residual_network(G, capacity)
+
+    # For all the leaves in the star graph tree (that is n-1 nodes).
+    for source in tree:
+        # Find neighbor in the tree
+        target = tree[source]
+        # compute minimum cut
+        cut_value, partition = nx.minimum_cut(
+            G, source, target, capacity=capacity, flow_func=flow_func, residual=R
+        )
+        labels[(source, target)] = cut_value
+        # Update the tree
+        # Source will always be in partition[0] and target in partition[1]
+        for node in partition[0]:
+            if node != source and node in tree and tree[node] == target:
+                tree[node] = source
+                labels[node, source] = labels.get((node, target), cut_value)
+        #
+        if target != root and tree[target] in partition[0]:
+            labels[source, tree[target]] = labels[target, tree[target]]
+            labels[target, source] = cut_value
+            tree[source] = tree[target]
+            tree[target] = source
+
+    # Build the tree
+    T = nx.Graph()
+    T.add_nodes_from(G)
+    T.add_weighted_edges_from(((u, v, labels[u, v]) for u, v in tree.items()))
+    return T
diff --git a/networkx/algorithms/flow/maxflow.py b/networkx/algorithms/flow/maxflow.py
index 97b2c4c3e..96bca029f 100644
--- a/networkx/algorithms/flow/maxflow.py
+++ b/networkx/algorithms/flow/maxflow.py
@@ -2,19 +2,22 @@
 Maximum flow (and minimum cut) algorithms on capacitated graphs.
 """
 import networkx as nx
+
 from .boykovkolmogorov import boykov_kolmogorov
 from .dinitz_alg import dinitz
 from .edmondskarp import edmonds_karp
 from .preflowpush import preflow_push
 from .shortestaugmentingpath import shortest_augmenting_path
 from .utils import build_flow_dict
+
+# Define the default flow function for computing maximum flow.
 default_flow_func = preflow_push
-__all__ = ['maximum_flow', 'maximum_flow_value', 'minimum_cut',
-    'minimum_cut_value']

+__all__ = ["maximum_flow", "maximum_flow_value", "minimum_cut", "minimum_cut_value"]

-@nx._dispatchable(graphs='flowG', edge_attrs={'capacity': float('inf')})
-def maximum_flow(flowG, _s, _t, capacity='capacity', flow_func=None, **kwargs):
+
+@nx._dispatchable(graphs="flowG", edge_attrs={"capacity": float("inf")})
+def maximum_flow(flowG, _s, _t, capacity="capacity", flow_func=None, **kwargs):
     """Find a maximum single-commodity flow.

     Parameters
@@ -141,12 +144,25 @@ def maximum_flow(flowG, _s, _t, capacity='capacity', flow_func=None, **kwargs):
     True

     """
-    pass
+    if flow_func is None:
+        if kwargs:
+            raise nx.NetworkXError(
+                "You have to explicitly set a flow_func if"
+                " you need to pass parameters via kwargs."
+            )
+        flow_func = default_flow_func
+
+    if not callable(flow_func):
+        raise nx.NetworkXError("flow_func has to be callable.")

+    R = flow_func(flowG, _s, _t, capacity=capacity, value_only=False, **kwargs)
+    flow_dict = build_flow_dict(flowG, R)

-@nx._dispatchable(graphs='flowG', edge_attrs={'capacity': float('inf')})
-def maximum_flow_value(flowG, _s, _t, capacity='capacity', flow_func=None,
-    **kwargs):
+    return (R.graph["flow_value"], flow_dict)
+
+
+@nx._dispatchable(graphs="flowG", edge_attrs={"capacity": float("inf")})
+def maximum_flow_value(flowG, _s, _t, capacity="capacity", flow_func=None, **kwargs):
     """Find the value of maximum single-commodity flow.

     Parameters
@@ -267,11 +283,24 @@ def maximum_flow_value(flowG, _s, _t, capacity='capacity', flow_func=None,
     True

     """
-    pass
+    if flow_func is None:
+        if kwargs:
+            raise nx.NetworkXError(
+                "You have to explicitly set a flow_func if"
+                " you need to pass parameters via kwargs."
+            )
+        flow_func = default_flow_func
+
+    if not callable(flow_func):
+        raise nx.NetworkXError("flow_func has to be callable.")
+
+    R = flow_func(flowG, _s, _t, capacity=capacity, value_only=True, **kwargs)

+    return R.graph["flow_value"]

-@nx._dispatchable(graphs='flowG', edge_attrs={'capacity': float('inf')})
-def minimum_cut(flowG, _s, _t, capacity='capacity', flow_func=None, **kwargs):
+
+@nx._dispatchable(graphs="flowG", edge_attrs={"capacity": float("inf")})
+def minimum_cut(flowG, _s, _t, capacity="capacity", flow_func=None, **kwargs):
     """Compute the value and the node partition of a minimum (s, t)-cut.

     Use the max-flow min-cut theorem, i.e., the capacity of a minimum
@@ -403,12 +432,39 @@ def minimum_cut(flowG, _s, _t, capacity='capacity', flow_func=None, **kwargs):
     True

     """
-    pass
-
-
-@nx._dispatchable(graphs='flowG', edge_attrs={'capacity': float('inf')})
-def minimum_cut_value(flowG, _s, _t, capacity='capacity', flow_func=None,
-    **kwargs):
+    if flow_func is None:
+        if kwargs:
+            raise nx.NetworkXError(
+                "You have to explicitly set a flow_func if"
+                " you need to pass parameters via kwargs."
+            )
+        flow_func = default_flow_func
+
+    if not callable(flow_func):
+        raise nx.NetworkXError("flow_func has to be callable.")
+
+    if kwargs.get("cutoff") is not None and flow_func is preflow_push:
+        raise nx.NetworkXError("cutoff should not be specified.")
+
+    R = flow_func(flowG, _s, _t, capacity=capacity, value_only=True, **kwargs)
+    # Remove saturated edges from the residual network
+    cutset = [(u, v, d) for u, v, d in R.edges(data=True) if d["flow"] == d["capacity"]]
+    R.remove_edges_from(cutset)
+
+    # Then, reachable and non reachable nodes from source in the
+    # residual network form the node partition that defines
+    # the minimum cut.
+    non_reachable = set(dict(nx.shortest_path_length(R, target=_t)))
+    partition = (set(flowG) - non_reachable, non_reachable)
+    # Finally add again cutset edges to the residual network to make
+    # sure that it is reusable.
+    if cutset is not None:
+        R.add_edges_from(cutset)
+    return (R.graph["flow_value"], partition)
+
+
+@nx._dispatchable(graphs="flowG", edge_attrs={"capacity": float("inf")})
+def minimum_cut_value(flowG, _s, _t, capacity="capacity", flow_func=None, **kwargs):
     """Compute the value of a minimum (s, t)-cut.

     Use the max-flow min-cut theorem, i.e., the capacity of a minimum
@@ -526,4 +582,20 @@ def minimum_cut_value(flowG, _s, _t, capacity='capacity', flow_func=None,
     True

     """
-    pass
+    if flow_func is None:
+        if kwargs:
+            raise nx.NetworkXError(
+                "You have to explicitly set a flow_func if"
+                " you need to pass parameters via kwargs."
+            )
+        flow_func = default_flow_func
+
+    if not callable(flow_func):
+        raise nx.NetworkXError("flow_func has to be callable.")
+
+    if kwargs.get("cutoff") is not None and flow_func is preflow_push:
+        raise nx.NetworkXError("cutoff should not be specified.")
+
+    R = flow_func(flowG, _s, _t, capacity=capacity, value_only=True, **kwargs)
+
+    return R.graph["flow_value"]
diff --git a/networkx/algorithms/flow/mincost.py b/networkx/algorithms/flow/mincost.py
index 99cd8ac35..2f9390d7a 100644
--- a/networkx/algorithms/flow/mincost.py
+++ b/networkx/algorithms/flow/mincost.py
@@ -1,16 +1,17 @@
 """
 Minimum cost flow algorithms on directed connected graphs.
 """
-__all__ = ['min_cost_flow_cost', 'min_cost_flow', 'cost_of_flow',
-    'max_flow_min_cost']
+
+__all__ = ["min_cost_flow_cost", "min_cost_flow", "cost_of_flow", "max_flow_min_cost"]
+
 import networkx as nx


-@nx._dispatchable(node_attrs='demand', edge_attrs={'capacity': float('inf'),
-    'weight': 0})
-def min_cost_flow_cost(G, demand='demand', capacity='capacity', weight='weight'
-    ):
-    """Find the cost of a minimum cost flow satisfying all demands in digraph G.
+@nx._dispatchable(
+    node_attrs="demand", edge_attrs={"capacity": float("inf"), "weight": 0}
+)
+def min_cost_flow_cost(G, demand="demand", capacity="capacity", weight="weight"):
+    r"""Find the cost of a minimum cost flow satisfying all demands in digraph G.

     G is a digraph with edge costs and capacities and in which nodes
     have demand, i.e., they want to send or receive some amount of
@@ -95,13 +96,14 @@ def min_cost_flow_cost(G, demand='demand', capacity='capacity', weight='weight'
     >>> flowCost
     24
     """
-    pass
+    return nx.network_simplex(G, demand=demand, capacity=capacity, weight=weight)[0]


-@nx._dispatchable(node_attrs='demand', edge_attrs={'capacity': float('inf'),
-    'weight': 0})
-def min_cost_flow(G, demand='demand', capacity='capacity', weight='weight'):
-    """Returns a minimum cost flow satisfying all demands in digraph G.
+@nx._dispatchable(
+    node_attrs="demand", edge_attrs={"capacity": float("inf"), "weight": 0}
+)
+def min_cost_flow(G, demand="demand", capacity="capacity", weight="weight"):
+    r"""Returns a minimum cost flow satisfying all demands in digraph G.

     G is a digraph with edge costs and capacities and in which nodes
     have demand, i.e., they want to send or receive some amount of
@@ -187,11 +189,11 @@ def min_cost_flow(G, demand='demand', capacity='capacity', weight='weight'):
     >>> flowDict
     {'a': {'b': 4, 'c': 1}, 'd': {}, 'b': {'d': 4}, 'c': {'d': 1}}
     """
-    pass
+    return nx.network_simplex(G, demand=demand, capacity=capacity, weight=weight)[1]


-@nx._dispatchable(edge_attrs={'weight': 0})
-def cost_of_flow(G, flowDict, weight='weight'):
+@nx._dispatchable(edge_attrs={"weight": 0})
+def cost_of_flow(G, flowDict, weight="weight"):
     """Compute the cost of the flow given by flowDict on graph G.

     Note that this function does not check for the validity of the
@@ -247,11 +249,11 @@ def cost_of_flow(G, flowDict, weight='weight'):
     >>> nx.cost_of_flow(G, flowDict)
     24
     """
-    pass
+    return sum((flowDict[u][v] * d.get(weight, 0) for u, v, d in G.edges(data=True)))


-@nx._dispatchable(edge_attrs={'capacity': float('inf'), 'weight': 0})
-def max_flow_min_cost(G, s, t, capacity='capacity', weight='weight'):
+@nx._dispatchable(edge_attrs={"capacity": float("inf"), "weight": 0})
+def max_flow_min_cost(G, s, t, capacity="capacity", weight="weight"):
     """Returns a maximum (s, t)-flow of minimum cost.

     G is a digraph with edge costs and capacities. There is a source
@@ -347,4 +349,8 @@ def max_flow_min_cost(G, s, t, capacity='capacity', weight='weight'):
     True

     """
-    pass
+    maxFlow = nx.maximum_flow_value(G, s, t, capacity=capacity)
+    H = nx.DiGraph(G)
+    H.add_node(s, demand=-maxFlow)
+    H.add_node(t, demand=maxFlow)
+    return min_cost_flow(H, capacity=capacity, weight=weight)
diff --git a/networkx/algorithms/flow/networksimplex.py b/networkx/algorithms/flow/networksimplex.py
index 1a3d1ea4c..a9822d968 100644
--- a/networkx/algorithms/flow/networksimplex.py
+++ b/networkx/algorithms/flow/networksimplex.py
@@ -1,34 +1,42 @@
 """
 Minimum cost flow algorithms on directed connected graphs.
 """
-__all__ = ['network_simplex']
+
+__all__ = ["network_simplex"]
+
 from itertools import chain, islice, repeat
 from math import ceil, sqrt
+
 import networkx as nx
 from networkx.utils import not_implemented_for


 class _DataEssentialsAndFunctions:
-
-    def __init__(self, G, multigraph, demand='demand', capacity='capacity',
-        weight='weight'):
-        self.node_list = list(G)
-        self.node_indices = {u: i for i, u in enumerate(self.node_list)}
-        self.node_demands = [G.nodes[u].get(demand, 0) for u in self.node_list]
-        self.edge_sources = []
-        self.edge_targets = []
+    def __init__(
+        self, G, multigraph, demand="demand", capacity="capacity", weight="weight"
+    ):
+        # Number all nodes and edges and hereafter reference them using ONLY their numbers
+        self.node_list = list(G)  # nodes
+        self.node_indices = {u: i for i, u in enumerate(self.node_list)}  # node indices
+        self.node_demands = [
+            G.nodes[u].get(demand, 0) for u in self.node_list
+        ]  # node demands
+
+        self.edge_sources = []  # edge sources
+        self.edge_targets = []  # edge targets
         if multigraph:
-            self.edge_keys = []
-        self.edge_indices = {}
-        self.edge_capacities = []
-        self.edge_weights = []
+            self.edge_keys = []  # edge keys
+        self.edge_indices = {}  # edge indices
+        self.edge_capacities = []  # edge capacities
+        self.edge_weights = []  # edge weights
+
         if not multigraph:
             edges = G.edges(data=True)
         else:
             edges = G.edges(data=True, keys=True)
-        inf = float('inf')
-        edges = (e for e in edges if e[0] != e[1] and e[-1].get(capacity,
-            inf) != 0)
+
+        inf = float("inf")
+        edges = (e for e in edges if e[0] != e[1] and e[-1].get(capacity, inf) != 0)
         for i, e in enumerate(edges):
             self.edge_sources.append(self.node_indices[e[0]])
             self.edge_targets.append(self.node_indices[e[1]])
@@ -37,28 +45,77 @@ class _DataEssentialsAndFunctions:
             self.edge_indices[e[:-1]] = i
             self.edge_capacities.append(e[-1].get(capacity, inf))
             self.edge_weights.append(e[-1].get(weight, 0))
-        self.edge_count = None
-        self.edge_flow = None
-        self.node_potentials = None
-        self.parent = None
-        self.parent_edge = None
-        self.subtree_size = None
-        self.next_node_dft = None
-        self.prev_node_dft = None
-        self.last_descendent_dft = None
-        self._spanning_tree_initialized = False
+
+        # spanning tree specific data to be initialized
+
+        self.edge_count = None  # number of edges
+        self.edge_flow = None  # edge flows
+        self.node_potentials = None  # node potentials
+        self.parent = None  # parent nodes
+        self.parent_edge = None  # edges to parents
+        self.subtree_size = None  # subtree sizes
+        self.next_node_dft = None  # next nodes in depth-first thread
+        self.prev_node_dft = None  # previous nodes in depth-first thread
+        self.last_descendent_dft = None  # last descendants in depth-first thread
+        self._spanning_tree_initialized = (
+            False  # False until initialize_spanning_tree() is called
+        )
+
+    def initialize_spanning_tree(self, n, faux_inf):
+        self.edge_count = len(self.edge_indices)  # number of edges
+        self.edge_flow = list(
+            chain(repeat(0, self.edge_count), (abs(d) for d in self.node_demands))
+        )  # edge flows
+        self.node_potentials = [
+            faux_inf if d <= 0 else -faux_inf for d in self.node_demands
+        ]  # node potentials
+        self.parent = list(chain(repeat(-1, n), [None]))  # parent nodes
+        self.parent_edge = list(
+            range(self.edge_count, self.edge_count + n)
+        )  # edges to parents
+        self.subtree_size = list(chain(repeat(1, n), [n + 1]))  # subtree sizes
+        self.next_node_dft = list(
+            chain(range(1, n), [-1, 0])
+        )  # next nodes in depth-first thread
+        self.prev_node_dft = list(range(-1, n))  # previous nodes in depth-first thread
+        self.last_descendent_dft = list(
+            chain(range(n), [n - 1])
+        )  # last descendants in depth-first thread
+        self._spanning_tree_initialized = True  # True only if all the assignments pass

     def find_apex(self, p, q):
         """
         Find the lowest common ancestor of nodes p and q in the spanning tree.
         """
-        pass
+        size_p = self.subtree_size[p]
+        size_q = self.subtree_size[q]
+        while True:
+            while size_p < size_q:
+                p = self.parent[p]
+                size_p = self.subtree_size[p]
+            while size_p > size_q:
+                q = self.parent[q]
+                size_q = self.subtree_size[q]
+            if size_p == size_q:
+                if p != q:
+                    p = self.parent[p]
+                    size_p = self.subtree_size[p]
+                    q = self.parent[q]
+                    size_q = self.subtree_size[q]
+                else:
+                    return p

     def trace_path(self, p, w):
         """
         Returns the nodes and edges on the path from node p to its ancestor w.
         """
-        pass
+        Wn = [p]
+        We = []
+        while p != w:
+            We.append(self.parent_edge[p])
+            p = self.parent[p]
+            Wn.append(p)
+        return Wn, We

     def find_cycle(self, i, p, q):
         """
@@ -67,69 +124,213 @@ class _DataEssentialsAndFunctions:

         The cycle is oriented in the direction from p to q.
         """
-        pass
+        w = self.find_apex(p, q)
+        Wn, We = self.trace_path(p, w)
+        Wn.reverse()
+        We.reverse()
+        if We != [i]:
+            We.append(i)
+        WnR, WeR = self.trace_path(q, w)
+        del WnR[-1]
+        Wn += WnR
+        We += WeR
+        return Wn, We

     def augment_flow(self, Wn, We, f):
         """
         Augment f units of flow along a cycle represented by Wn and We.
         """
-        pass
+        for i, p in zip(We, Wn):
+            if self.edge_sources[i] == p:
+                self.edge_flow[i] += f
+            else:
+                self.edge_flow[i] -= f

     def trace_subtree(self, p):
         """
         Yield the nodes in the subtree rooted at a node p.
         """
-        pass
+        yield p
+        l = self.last_descendent_dft[p]
+        while p != l:
+            p = self.next_node_dft[p]
+            yield p

     def remove_edge(self, s, t):
         """
         Remove an edge (s, t) where parent[t] == s from the spanning tree.
         """
-        pass
+        size_t = self.subtree_size[t]
+        prev_t = self.prev_node_dft[t]
+        last_t = self.last_descendent_dft[t]
+        next_last_t = self.next_node_dft[last_t]
+        # Remove (s, t).
+        self.parent[t] = None
+        self.parent_edge[t] = None
+        # Remove the subtree rooted at t from the depth-first thread.
+        self.next_node_dft[prev_t] = next_last_t
+        self.prev_node_dft[next_last_t] = prev_t
+        self.next_node_dft[last_t] = t
+        self.prev_node_dft[t] = last_t
+        # Update the subtree sizes and last descendants of the (old) ancestors
+        # of t.
+        while s is not None:
+            self.subtree_size[s] -= size_t
+            if self.last_descendent_dft[s] == last_t:
+                self.last_descendent_dft[s] = prev_t
+            s = self.parent[s]

     def make_root(self, q):
         """
         Make a node q the root of its containing subtree.
         """
-        pass
+        ancestors = []
+        while q is not None:
+            ancestors.append(q)
+            q = self.parent[q]
+        ancestors.reverse()
+        for p, q in zip(ancestors, islice(ancestors, 1, None)):
+            size_p = self.subtree_size[p]
+            last_p = self.last_descendent_dft[p]
+            prev_q = self.prev_node_dft[q]
+            last_q = self.last_descendent_dft[q]
+            next_last_q = self.next_node_dft[last_q]
+            # Make p a child of q.
+            self.parent[p] = q
+            self.parent[q] = None
+            self.parent_edge[p] = self.parent_edge[q]
+            self.parent_edge[q] = None
+            self.subtree_size[p] = size_p - self.subtree_size[q]
+            self.subtree_size[q] = size_p
+            # Remove the subtree rooted at q from the depth-first thread.
+            self.next_node_dft[prev_q] = next_last_q
+            self.prev_node_dft[next_last_q] = prev_q
+            self.next_node_dft[last_q] = q
+            self.prev_node_dft[q] = last_q
+            if last_p == last_q:
+                self.last_descendent_dft[p] = prev_q
+                last_p = prev_q
+            # Add the remaining parts of the subtree rooted at p as a subtree
+            # of q in the depth-first thread.
+            self.prev_node_dft[p] = last_q
+            self.next_node_dft[last_q] = p
+            self.next_node_dft[last_p] = q
+            self.prev_node_dft[q] = last_p
+            self.last_descendent_dft[q] = last_p

     def add_edge(self, i, p, q):
         """
         Add an edge (p, q) to the spanning tree where q is the root of a subtree.
         """
-        pass
+        last_p = self.last_descendent_dft[p]
+        next_last_p = self.next_node_dft[last_p]
+        size_q = self.subtree_size[q]
+        last_q = self.last_descendent_dft[q]
+        # Make q a child of p.
+        self.parent[q] = p
+        self.parent_edge[q] = i
+        # Insert the subtree rooted at q into the depth-first thread.
+        self.next_node_dft[last_p] = q
+        self.prev_node_dft[q] = last_p
+        self.prev_node_dft[next_last_p] = last_q
+        self.next_node_dft[last_q] = next_last_p
+        # Update the subtree sizes and last descendants of the (new) ancestors
+        # of q.
+        while p is not None:
+            self.subtree_size[p] += size_q
+            if self.last_descendent_dft[p] == last_p:
+                self.last_descendent_dft[p] = last_q
+            p = self.parent[p]

     def update_potentials(self, i, p, q):
         """
         Update the potentials of the nodes in the subtree rooted at a node
         q connected to its parent p by an edge i.
         """
-        pass
+        if q == self.edge_targets[i]:
+            d = self.node_potentials[p] - self.edge_weights[i] - self.node_potentials[q]
+        else:
+            d = self.node_potentials[p] + self.edge_weights[i] - self.node_potentials[q]
+        for q in self.trace_subtree(q):
+            self.node_potentials[q] += d

     def reduced_cost(self, i):
         """Returns the reduced cost of an edge i."""
-        pass
+        c = (
+            self.edge_weights[i]
+            - self.node_potentials[self.edge_sources[i]]
+            + self.node_potentials[self.edge_targets[i]]
+        )
+        return c if self.edge_flow[i] == 0 else -c

     def find_entering_edges(self):
         """Yield entering edges until none can be found."""
-        pass
+        if self.edge_count == 0:
+            return
+
+        # Entering edges are found by combining Dantzig's rule and Bland's
+        # rule. The edges are cyclically grouped into blocks of size B. Within
+        # each block, Dantzig's rule is applied to find an entering edge. The
+        # blocks to search is determined following Bland's rule.
+        B = int(ceil(sqrt(self.edge_count)))  # pivot block size
+        M = (self.edge_count + B - 1) // B  # number of blocks needed to cover all edges
+        m = 0  # number of consecutive blocks without eligible
+        # entering edges
+        f = 0  # first edge in block
+        while m < M:
+            # Determine the next block of edges.
+            l = f + B
+            if l <= self.edge_count:
+                edges = range(f, l)
+            else:
+                l -= self.edge_count
+                edges = chain(range(f, self.edge_count), range(l))
+            f = l
+            # Find the first edge with the lowest reduced cost.
+            i = min(edges, key=self.reduced_cost)
+            c = self.reduced_cost(i)
+            if c >= 0:
+                # No entering edge found in the current block.
+                m += 1
+            else:
+                # Entering edge found.
+                if self.edge_flow[i] == 0:
+                    p = self.edge_sources[i]
+                    q = self.edge_targets[i]
+                else:
+                    p = self.edge_targets[i]
+                    q = self.edge_sources[i]
+                yield i, p, q
+                m = 0
+        # All edges have nonnegative reduced costs. The current flow is
+        # optimal.

     def residual_capacity(self, i, p):
         """Returns the residual capacity of an edge i in the direction away
         from its endpoint p.
         """
-        pass
+        return (
+            self.edge_capacities[i] - self.edge_flow[i]
+            if self.edge_sources[i] == p
+            else self.edge_flow[i]
+        )

     def find_leaving_edge(self, Wn, We):
         """Returns the leaving edge in a cycle represented by Wn and We."""
-        pass
+        j, s = min(
+            zip(reversed(We), reversed(Wn)),
+            key=lambda i_p: self.residual_capacity(*i_p),
+        )
+        t = self.edge_targets[j] if self.edge_sources[j] == s else self.edge_sources[j]
+        return j, s, t


-@not_implemented_for('undirected')
-@nx._dispatchable(node_attrs='demand', edge_attrs={'capacity': float('inf'),
-    'weight': 0})
-def network_simplex(G, demand='demand', capacity='capacity', weight='weight'):
-    """Find a minimum cost flow satisfying all demands in digraph G.
+@not_implemented_for("undirected")
+@nx._dispatchable(
+    node_attrs="demand", edge_attrs={"capacity": float("inf"), "weight": 0}
+)
+def network_simplex(G, demand="demand", capacity="capacity", weight="weight"):
+    r"""Find a minimum cost flow satisfying all demands in digraph G.

     This is a primal network simplex algorithm that uses the leaving
     arc rule to prevent cycling.
@@ -290,4 +491,176 @@ def network_simplex(G, demand='demand', capacity='capacity', weight='weight'):
            optimization.
            INFOR 17(1):16--34. 1979.
     """
-    pass
+    ###########################################################################
+    # Problem essentials extraction and sanity check
+    ###########################################################################
+
+    if len(G) == 0:
+        raise nx.NetworkXError("graph has no nodes")
+
+    multigraph = G.is_multigraph()
+
+    # extracting data essential to problem
+    DEAF = _DataEssentialsAndFunctions(
+        G, multigraph, demand=demand, capacity=capacity, weight=weight
+    )
+
+    ###########################################################################
+    # Quick Error Detection
+    ###########################################################################
+
+    inf = float("inf")
+    for u, d in zip(DEAF.node_list, DEAF.node_demands):
+        if abs(d) == inf:
+            raise nx.NetworkXError(f"node {u!r} has infinite demand")
+    for e, w in zip(DEAF.edge_indices, DEAF.edge_weights):
+        if abs(w) == inf:
+            raise nx.NetworkXError(f"edge {e!r} has infinite weight")
+    if not multigraph:
+        edges = nx.selfloop_edges(G, data=True)
+    else:
+        edges = nx.selfloop_edges(G, data=True, keys=True)
+    for e in edges:
+        if abs(e[-1].get(weight, 0)) == inf:
+            raise nx.NetworkXError(f"edge {e[:-1]!r} has infinite weight")
+
+    ###########################################################################
+    # Quick Infeasibility Detection
+    ###########################################################################
+
+    if sum(DEAF.node_demands) != 0:
+        raise nx.NetworkXUnfeasible("total node demand is not zero")
+    for e, c in zip(DEAF.edge_indices, DEAF.edge_capacities):
+        if c < 0:
+            raise nx.NetworkXUnfeasible(f"edge {e!r} has negative capacity")
+    if not multigraph:
+        edges = nx.selfloop_edges(G, data=True)
+    else:
+        edges = nx.selfloop_edges(G, data=True, keys=True)
+    for e in edges:
+        if e[-1].get(capacity, inf) < 0:
+            raise nx.NetworkXUnfeasible(f"edge {e[:-1]!r} has negative capacity")
+
+    ###########################################################################
+    # Initialization
+    ###########################################################################
+
+    # Add a dummy node -1 and connect all existing nodes to it with infinite-
+    # capacity dummy edges. Node -1 will serve as the root of the
+    # spanning tree of the network simplex method. The new edges will used to
+    # trivially satisfy the node demands and create an initial strongly
+    # feasible spanning tree.
+    for i, d in enumerate(DEAF.node_demands):
+        # Must be greater-than here. Zero-demand nodes must have
+        # edges pointing towards the root to ensure strong feasibility.
+        if d > 0:
+            DEAF.edge_sources.append(-1)
+            DEAF.edge_targets.append(i)
+        else:
+            DEAF.edge_sources.append(i)
+            DEAF.edge_targets.append(-1)
+    faux_inf = (
+        3
+        * max(
+            chain(
+                [
+                    sum(c for c in DEAF.edge_capacities if c < inf),
+                    sum(abs(w) for w in DEAF.edge_weights),
+                ],
+                (abs(d) for d in DEAF.node_demands),
+            )
+        )
+        or 1
+    )
+
+    n = len(DEAF.node_list)  # number of nodes
+    DEAF.edge_weights.extend(repeat(faux_inf, n))
+    DEAF.edge_capacities.extend(repeat(faux_inf, n))
+
+    # Construct the initial spanning tree.
+    DEAF.initialize_spanning_tree(n, faux_inf)
+
+    ###########################################################################
+    # Pivot loop
+    ###########################################################################
+
+    for i, p, q in DEAF.find_entering_edges():
+        Wn, We = DEAF.find_cycle(i, p, q)
+        j, s, t = DEAF.find_leaving_edge(Wn, We)
+        DEAF.augment_flow(Wn, We, DEAF.residual_capacity(j, s))
+        # Do nothing more if the entering edge is the same as the leaving edge.
+        if i != j:
+            if DEAF.parent[t] != s:
+                # Ensure that s is the parent of t.
+                s, t = t, s
+            if We.index(i) > We.index(j):
+                # Ensure that q is in the subtree rooted at t.
+                p, q = q, p
+            DEAF.remove_edge(s, t)
+            DEAF.make_root(q)
+            DEAF.add_edge(i, p, q)
+            DEAF.update_potentials(i, p, q)
+
+    ###########################################################################
+    # Infeasibility and unboundedness detection
+    ###########################################################################
+
+    if any(DEAF.edge_flow[i] != 0 for i in range(-n, 0)):
+        raise nx.NetworkXUnfeasible("no flow satisfies all node demands")
+
+    if any(DEAF.edge_flow[i] * 2 >= faux_inf for i in range(DEAF.edge_count)) or any(
+        e[-1].get(capacity, inf) == inf and e[-1].get(weight, 0) < 0
+        for e in nx.selfloop_edges(G, data=True)
+    ):
+        raise nx.NetworkXUnbounded("negative cycle with infinite capacity found")
+
+    ###########################################################################
+    # Flow cost calculation and flow dict construction
+    ###########################################################################
+
+    del DEAF.edge_flow[DEAF.edge_count :]
+    flow_cost = sum(w * x for w, x in zip(DEAF.edge_weights, DEAF.edge_flow))
+    flow_dict = {n: {} for n in DEAF.node_list}
+
+    def add_entry(e):
+        """Add a flow dict entry."""
+        d = flow_dict[e[0]]
+        for k in e[1:-2]:
+            try:
+                d = d[k]
+            except KeyError:
+                t = {}
+                d[k] = t
+                d = t
+        d[e[-2]] = e[-1]
+
+    DEAF.edge_sources = (
+        DEAF.node_list[s] for s in DEAF.edge_sources
+    )  # Use original nodes.
+    DEAF.edge_targets = (
+        DEAF.node_list[t] for t in DEAF.edge_targets
+    )  # Use original nodes.
+    if not multigraph:
+        for e in zip(DEAF.edge_sources, DEAF.edge_targets, DEAF.edge_flow):
+            add_entry(e)
+        edges = G.edges(data=True)
+    else:
+        for e in zip(
+            DEAF.edge_sources, DEAF.edge_targets, DEAF.edge_keys, DEAF.edge_flow
+        ):
+            add_entry(e)
+        edges = G.edges(data=True, keys=True)
+    for e in edges:
+        if e[0] != e[1]:
+            if e[-1].get(capacity, inf) == 0:
+                add_entry(e[:-1] + (0,))
+        else:
+            w = e[-1].get(weight, 0)
+            if w >= 0:
+                add_entry(e[:-1] + (0,))
+            else:
+                c = e[-1][capacity]
+                flow_cost += w * c
+                add_entry(e[:-1] + (c,))
+
+    return flow_cost, flow_dict
diff --git a/networkx/algorithms/flow/preflowpush.py b/networkx/algorithms/flow/preflowpush.py
index 101c7948d..42cadc2e2 100644
--- a/networkx/algorithms/flow/preflowpush.py
+++ b/networkx/algorithms/flow/preflowpush.py
@@ -1,31 +1,305 @@
 """
 Highest-label preflow-push algorithm for maximum flow problems.
 """
+
 from collections import deque
 from itertools import islice
+
 import networkx as nx
+
 from ...utils import arbitrary_element
-from .utils import CurrentEdge, GlobalRelabelThreshold, Level, build_residual_network, detect_unboundedness
-__all__ = ['preflow_push']
+from .utils import (
+    CurrentEdge,
+    GlobalRelabelThreshold,
+    Level,
+    build_residual_network,
+    detect_unboundedness,
+)
+
+__all__ = ["preflow_push"]


-def preflow_push_impl(G, s, t, capacity, residual, global_relabel_freq,
-    value_only):
+def preflow_push_impl(G, s, t, capacity, residual, global_relabel_freq, value_only):
     """Implementation of the highest-label preflow-push algorithm."""
-    pass
+    if s not in G:
+        raise nx.NetworkXError(f"node {str(s)} not in graph")
+    if t not in G:
+        raise nx.NetworkXError(f"node {str(t)} not in graph")
+    if s == t:
+        raise nx.NetworkXError("source and sink are the same node")
+
+    if global_relabel_freq is None:
+        global_relabel_freq = 0
+    if global_relabel_freq < 0:
+        raise nx.NetworkXError("global_relabel_freq must be nonnegative.")
+
+    if residual is None:
+        R = build_residual_network(G, capacity)
+    else:
+        R = residual
+
+    detect_unboundedness(R, s, t)
+
+    R_nodes = R.nodes
+    R_pred = R.pred
+    R_succ = R.succ
+
+    # Initialize/reset the residual network.
+    for u in R:
+        R_nodes[u]["excess"] = 0
+        for e in R_succ[u].values():
+            e["flow"] = 0
+
+    def reverse_bfs(src):
+        """Perform a reverse breadth-first search from src in the residual
+        network.
+        """
+        heights = {src: 0}
+        q = deque([(src, 0)])
+        while q:
+            u, height = q.popleft()
+            height += 1
+            for v, attr in R_pred[u].items():
+                if v not in heights and attr["flow"] < attr["capacity"]:
+                    heights[v] = height
+                    q.append((v, height))
+        return heights
+
+    # Initialize heights of the nodes.
+    heights = reverse_bfs(t)
+
+    if s not in heights:
+        # t is not reachable from s in the residual network. The maximum flow
+        # must be zero.
+        R.graph["flow_value"] = 0
+        return R
+
+    n = len(R)
+    # max_height represents the height of the highest level below level n with
+    # at least one active node.
+    max_height = max(heights[u] for u in heights if u != s)
+    heights[s] = n
+
+    grt = GlobalRelabelThreshold(n, R.size(), global_relabel_freq)
+
+    # Initialize heights and 'current edge' data structures of the nodes.
+    for u in R:
+        R_nodes[u]["height"] = heights[u] if u in heights else n + 1
+        R_nodes[u]["curr_edge"] = CurrentEdge(R_succ[u])
+
+    def push(u, v, flow):
+        """Push flow units of flow from u to v."""
+        R_succ[u][v]["flow"] += flow
+        R_succ[v][u]["flow"] -= flow
+        R_nodes[u]["excess"] -= flow
+        R_nodes[v]["excess"] += flow
+
+    # The maximum flow must be nonzero now. Initialize the preflow by
+    # saturating all edges emanating from s.
+    for u, attr in R_succ[s].items():
+        flow = attr["capacity"]
+        if flow > 0:
+            push(s, u, flow)
+
+    # Partition nodes into levels.
+    levels = [Level() for i in range(2 * n)]
+    for u in R:
+        if u != s and u != t:
+            level = levels[R_nodes[u]["height"]]
+            if R_nodes[u]["excess"] > 0:
+                level.active.add(u)
+            else:
+                level.inactive.add(u)
+
+    def activate(v):
+        """Move a node from the inactive set to the active set of its level."""
+        if v != s and v != t:
+            level = levels[R_nodes[v]["height"]]
+            if v in level.inactive:
+                level.inactive.remove(v)
+                level.active.add(v)
+
+    def relabel(u):
+        """Relabel a node to create an admissible edge."""
+        grt.add_work(len(R_succ[u]))
+        return (
+            min(
+                R_nodes[v]["height"]
+                for v, attr in R_succ[u].items()
+                if attr["flow"] < attr["capacity"]
+            )
+            + 1
+        )
+
+    def discharge(u, is_phase1):
+        """Discharge a node until it becomes inactive or, during phase 1 (see
+        below), its height reaches at least n. The node is known to have the
+        largest height among active nodes.
+        """
+        height = R_nodes[u]["height"]
+        curr_edge = R_nodes[u]["curr_edge"]
+        # next_height represents the next height to examine after discharging
+        # the current node. During phase 1, it is capped to below n.
+        next_height = height
+        levels[height].active.remove(u)
+        while True:
+            v, attr = curr_edge.get()
+            if height == R_nodes[v]["height"] + 1 and attr["flow"] < attr["capacity"]:
+                flow = min(R_nodes[u]["excess"], attr["capacity"] - attr["flow"])
+                push(u, v, flow)
+                activate(v)
+                if R_nodes[u]["excess"] == 0:
+                    # The node has become inactive.
+                    levels[height].inactive.add(u)
+                    break
+            try:
+                curr_edge.move_to_next()
+            except StopIteration:
+                # We have run off the end of the adjacency list, and there can
+                # be no more admissible edges. Relabel the node to create one.
+                height = relabel(u)
+                if is_phase1 and height >= n - 1:
+                    # Although the node is still active, with a height at least
+                    # n - 1, it is now known to be on the s side of the minimum
+                    # s-t cut. Stop processing it until phase 2.
+                    levels[height].active.add(u)
+                    break
+                # The first relabel operation after global relabeling may not
+                # increase the height of the node since the 'current edge' data
+                # structure is not rewound. Use height instead of (height - 1)
+                # in case other active nodes at the same level are missed.
+                next_height = height
+        R_nodes[u]["height"] = height
+        return next_height
+
+    def gap_heuristic(height):
+        """Apply the gap heuristic."""
+        # Move all nodes at levels (height + 1) to max_height to level n + 1.
+        for level in islice(levels, height + 1, max_height + 1):
+            for u in level.active:
+                R_nodes[u]["height"] = n + 1
+            for u in level.inactive:
+                R_nodes[u]["height"] = n + 1
+            levels[n + 1].active.update(level.active)
+            level.active.clear()
+            levels[n + 1].inactive.update(level.inactive)
+            level.inactive.clear()
+
+    def global_relabel(from_sink):
+        """Apply the global relabeling heuristic."""
+        src = t if from_sink else s
+        heights = reverse_bfs(src)
+        if not from_sink:
+            # s must be reachable from t. Remove t explicitly.
+            del heights[t]
+        max_height = max(heights.values())
+        if from_sink:
+            # Also mark nodes from which t is unreachable for relabeling. This
+            # serves the same purpose as the gap heuristic.
+            for u in R:
+                if u not in heights and R_nodes[u]["height"] < n:
+                    heights[u] = n + 1
+        else:
+            # Shift the computed heights because the height of s is n.
+            for u in heights:
+                heights[u] += n
+            max_height += n
+        del heights[src]
+        for u, new_height in heights.items():
+            old_height = R_nodes[u]["height"]
+            if new_height != old_height:
+                if u in levels[old_height].active:
+                    levels[old_height].active.remove(u)
+                    levels[new_height].active.add(u)
+                else:
+                    levels[old_height].inactive.remove(u)
+                    levels[new_height].inactive.add(u)
+                R_nodes[u]["height"] = new_height
+        return max_height
+
+    # Phase 1: Find the maximum preflow by pushing as much flow as possible to
+    # t.
+
+    height = max_height
+    while height > 0:
+        # Discharge active nodes in the current level.
+        while True:
+            level = levels[height]
+            if not level.active:
+                # All active nodes in the current level have been discharged.
+                # Move to the next lower level.
+                height -= 1
+                break
+            # Record the old height and level for the gap heuristic.
+            old_height = height
+            old_level = level
+            u = arbitrary_element(level.active)
+            height = discharge(u, True)
+            if grt.is_reached():
+                # Global relabeling heuristic: Recompute the exact heights of
+                # all nodes.
+                height = global_relabel(True)
+                max_height = height
+                grt.clear_work()
+            elif not old_level.active and not old_level.inactive:
+                # Gap heuristic: If the level at old_height is empty (a 'gap'),
+                # a minimum cut has been identified. All nodes with heights
+                # above old_height can have their heights set to n + 1 and not
+                # be further processed before a maximum preflow is found.
+                gap_heuristic(old_height)
+                height = old_height - 1
+                max_height = height
+            else:
+                # Update the height of the highest level with at least one
+                # active node.
+                max_height = max(max_height, height)
+
+    # A maximum preflow has been found. The excess at t is the maximum flow
+    # value.
+    if value_only:
+        R.graph["flow_value"] = R_nodes[t]["excess"]
+        return R
+
+    # Phase 2: Convert the maximum preflow into a maximum flow by returning the
+    # excess to s.
+
+    # Relabel all nodes so that they have accurate heights.
+    height = global_relabel(False)
+    grt.clear_work()
+
+    # Continue to discharge the active nodes.
+    while height > n:
+        # Discharge active nodes in the current level.
+        while True:
+            level = levels[height]
+            if not level.active:
+                # All active nodes in the current level have been discharged.
+                # Move to the next lower level.
+                height -= 1
+                break
+            u = arbitrary_element(level.active)
+            height = discharge(u, False)
+            if grt.is_reached():
+                # Global relabeling heuristic.
+                height = global_relabel(False)
+                grt.clear_work()
+
+    R.graph["flow_value"] = R_nodes[t]["excess"]
+    return R


-@nx._dispatchable(edge_attrs={'capacity': float('inf')}, returns_graph=True)
-def preflow_push(G, s, t, capacity='capacity', residual=None,
-    global_relabel_freq=1, value_only=False):
-    """Find a maximum single-commodity flow using the highest-label
+@nx._dispatchable(edge_attrs={"capacity": float("inf")}, returns_graph=True)
+def preflow_push(
+    G, s, t, capacity="capacity", residual=None, global_relabel_freq=1, value_only=False
+):
+    r"""Find a maximum single-commodity flow using the highest-label
     preflow-push algorithm.

     This function returns the residual network resulting after computing
     the maximum flow. See below for details about the conventions
     NetworkX uses for defining residual networks.

-    This algorithm has a running time of $O(n^2 \\sqrt{m})$ for $n$ nodes and
+    This algorithm has a running time of $O(n^2 \sqrt{m})$ for $n$ nodes and
     $m$ edges.


@@ -145,4 +419,7 @@ def preflow_push(G, s, t, capacity='capacity', residual=None,
     True

     """
-    pass
+    R = preflow_push_impl(G, s, t, capacity, residual, global_relabel_freq, value_only)
+    R.graph["algorithm"] = "preflow_push"
+    nx._clear_cache(R)
+    return R
diff --git a/networkx/algorithms/flow/shortestaugmentingpath.py b/networkx/algorithms/flow/shortestaugmentingpath.py
index 2bbab9bb7..9f1193f1c 100644
--- a/networkx/algorithms/flow/shortestaugmentingpath.py
+++ b/networkx/algorithms/flow/shortestaugmentingpath.py
@@ -1,23 +1,180 @@
 """
 Shortest augmenting path algorithm for maximum flow problems.
 """
+
 from collections import deque
+
 import networkx as nx
+
 from .edmondskarp import edmonds_karp_core
 from .utils import CurrentEdge, build_residual_network
-__all__ = ['shortest_augmenting_path']

+__all__ = ["shortest_augmenting_path"]

-def shortest_augmenting_path_impl(G, s, t, capacity, residual, two_phase,
-    cutoff):
+
+def shortest_augmenting_path_impl(G, s, t, capacity, residual, two_phase, cutoff):
     """Implementation of the shortest augmenting path algorithm."""
-    pass
+    if s not in G:
+        raise nx.NetworkXError(f"node {str(s)} not in graph")
+    if t not in G:
+        raise nx.NetworkXError(f"node {str(t)} not in graph")
+    if s == t:
+        raise nx.NetworkXError("source and sink are the same node")
+
+    if residual is None:
+        R = build_residual_network(G, capacity)
+    else:
+        R = residual
+
+    R_nodes = R.nodes
+    R_pred = R.pred
+    R_succ = R.succ
+
+    # Initialize/reset the residual network.
+    for u in R:
+        for e in R_succ[u].values():
+            e["flow"] = 0
+
+    # Initialize heights of the nodes.
+    heights = {t: 0}
+    q = deque([(t, 0)])
+    while q:
+        u, height = q.popleft()
+        height += 1
+        for v, attr in R_pred[u].items():
+            if v not in heights and attr["flow"] < attr["capacity"]:
+                heights[v] = height
+                q.append((v, height))
+
+    if s not in heights:
+        # t is not reachable from s in the residual network. The maximum flow
+        # must be zero.
+        R.graph["flow_value"] = 0
+        return R
+
+    n = len(G)
+    m = R.size() / 2
+
+    # Initialize heights and 'current edge' data structures of the nodes.
+    for u in R:
+        R_nodes[u]["height"] = heights[u] if u in heights else n
+        R_nodes[u]["curr_edge"] = CurrentEdge(R_succ[u])
+
+    # Initialize counts of nodes in each level.
+    counts = [0] * (2 * n - 1)
+    for u in R:
+        counts[R_nodes[u]["height"]] += 1
+
+    inf = R.graph["inf"]
+
+    def augment(path):
+        """Augment flow along a path from s to t."""
+        # Determine the path residual capacity.
+        flow = inf
+        it = iter(path)
+        u = next(it)
+        for v in it:
+            attr = R_succ[u][v]
+            flow = min(flow, attr["capacity"] - attr["flow"])
+            u = v
+        if flow * 2 > inf:
+            raise nx.NetworkXUnbounded("Infinite capacity path, flow unbounded above.")
+        # Augment flow along the path.
+        it = iter(path)
+        u = next(it)
+        for v in it:
+            R_succ[u][v]["flow"] += flow
+            R_succ[v][u]["flow"] -= flow
+            u = v
+        return flow
+
+    def relabel(u):
+        """Relabel a node to create an admissible edge."""
+        height = n - 1
+        for v, attr in R_succ[u].items():
+            if attr["flow"] < attr["capacity"]:
+                height = min(height, R_nodes[v]["height"])
+        return height + 1
+
+    if cutoff is None:
+        cutoff = float("inf")
+
+    # Phase 1: Look for shortest augmenting paths using depth-first search.
+
+    flow_value = 0
+    path = [s]
+    u = s
+    d = n if not two_phase else int(min(m**0.5, 2 * n ** (2.0 / 3)))
+    done = R_nodes[s]["height"] >= d
+    while not done:
+        height = R_nodes[u]["height"]
+        curr_edge = R_nodes[u]["curr_edge"]
+        # Depth-first search for the next node on the path to t.
+        while True:
+            v, attr = curr_edge.get()
+            if height == R_nodes[v]["height"] + 1 and attr["flow"] < attr["capacity"]:
+                # Advance to the next node following an admissible edge.
+                path.append(v)
+                u = v
+                break
+            try:
+                curr_edge.move_to_next()
+            except StopIteration:
+                counts[height] -= 1
+                if counts[height] == 0:
+                    # Gap heuristic: If relabeling causes a level to become
+                    # empty, a minimum cut has been identified. The algorithm
+                    # can now be terminated.
+                    R.graph["flow_value"] = flow_value
+                    return R
+                height = relabel(u)
+                if u == s and height >= d:
+                    if not two_phase:
+                        # t is disconnected from s in the residual network. No
+                        # more augmenting paths exist.
+                        R.graph["flow_value"] = flow_value
+                        return R
+                    else:
+                        # t is at least d steps away from s. End of phase 1.
+                        done = True
+                        break
+                counts[height] += 1
+                R_nodes[u]["height"] = height
+                if u != s:
+                    # After relabeling, the last edge on the path is no longer
+                    # admissible. Retreat one step to look for an alternative.
+                    path.pop()
+                    u = path[-1]
+                    break
+        if u == t:
+            # t is reached. Augment flow along the path and reset it for a new
+            # depth-first search.
+            flow_value += augment(path)
+            if flow_value >= cutoff:
+                R.graph["flow_value"] = flow_value
+                return R
+            path = [s]
+            u = s
+
+    # Phase 2: Look for shortest augmenting paths using breadth-first search.
+    flow_value += edmonds_karp_core(R, s, t, cutoff - flow_value)
+
+    R.graph["flow_value"] = flow_value
+    return R


-@nx._dispatchable(edge_attrs={'capacity': float('inf')}, returns_graph=True)
-def shortest_augmenting_path(G, s, t, capacity='capacity', residual=None,
-    value_only=False, two_phase=False, cutoff=None):
-    """Find a maximum single-commodity flow using the shortest augmenting path
+@nx._dispatchable(edge_attrs={"capacity": float("inf")}, returns_graph=True)
+def shortest_augmenting_path(
+    G,
+    s,
+    t,
+    capacity="capacity",
+    residual=None,
+    value_only=False,
+    two_phase=False,
+    cutoff=None,
+):
+    r"""Find a maximum single-commodity flow using the shortest augmenting path
     algorithm.

     This function returns the residual network resulting after computing
@@ -58,7 +215,7 @@ def shortest_augmenting_path(G, s, t, capacity='capacity', residual=None,
     two_phase : bool
         If True, a two-phase variant is used. The two-phase variant improves
         the running time on unit-capacity networks from $O(nm)$ to
-        $O(\\min(n^{2/3}, m^{1/2}) m)$. Default value: False.
+        $O(\min(n^{2/3}, m^{1/2}) m)$. Default value: False.

     cutoff : integer, float
         If specified, the algorithm will terminate when the flow value reaches
@@ -137,4 +294,7 @@ def shortest_augmenting_path(G, s, t, capacity='capacity', residual=None,
     True

     """
-    pass
+    R = shortest_augmenting_path_impl(G, s, t, capacity, residual, two_phase, cutoff)
+    R.graph["algorithm"] = "shortest_augmenting_path"
+    nx._clear_cache(R)
+    return R
diff --git a/networkx/algorithms/flow/utils.py b/networkx/algorithms/flow/utils.py
index c33d79d27..03f1d10f7 100644
--- a/networkx/algorithms/flow/utils.py
+++ b/networkx/algorithms/flow/utils.py
@@ -1,27 +1,52 @@
 """
 Utility classes and functions for network flow algorithms.
 """
+
 from collections import deque
+
 import networkx as nx
-__all__ = ['CurrentEdge', 'Level', 'GlobalRelabelThreshold',
-    'build_residual_network', 'detect_unboundedness', 'build_flow_dict']
+
+__all__ = [
+    "CurrentEdge",
+    "Level",
+    "GlobalRelabelThreshold",
+    "build_residual_network",
+    "detect_unboundedness",
+    "build_flow_dict",
+]


 class CurrentEdge:
     """Mechanism for iterating over out-edges incident to a node in a circular
     manner. StopIteration exception is raised when wraparound occurs.
     """
-    __slots__ = '_edges', '_it', '_curr'
+
+    __slots__ = ("_edges", "_it", "_curr")

     def __init__(self, edges):
         self._edges = edges
         if self._edges:
             self._rewind()

+    def get(self):
+        return self._curr
+
+    def move_to_next(self):
+        try:
+            self._curr = next(self._it)
+        except StopIteration:
+            self._rewind()
+            raise
+
+    def _rewind(self):
+        self._it = iter(self._edges.items())
+        self._curr = next(self._it)
+

 class Level:
     """Active and inactive nodes in a level."""
-    __slots__ = 'active', 'inactive'
+
+    __slots__ = ("active", "inactive")

     def __init__(self):
         self.active = set()
@@ -34,11 +59,20 @@ class GlobalRelabelThreshold:
     """

     def __init__(self, n, m, freq):
-        self._threshold = (n + m) / freq if freq else float('inf')
+        self._threshold = (n + m) / freq if freq else float("inf")
         self._work = 0

+    def add_work(self, work):
+        self._work += work

-@nx._dispatchable(edge_attrs={'capacity': float('inf')}, returns_graph=True)
+    def is_reached(self):
+        return self._work >= self._threshold
+
+    def clear_work(self):
+        self._work = 0
+
+
+@nx._dispatchable(edge_attrs={"capacity": float("inf")}, returns_graph=True)
 def build_residual_network(G, capacity):
     """Build a residual network and initialize a zero flow.

@@ -64,18 +98,92 @@ def build_residual_network(G, capacity):
     :samp:`s`-:samp:`t` cut.

     """
-    pass
-
-
-@nx._dispatchable(graphs='R', preserve_edge_attrs={'R': {'capacity': float(
-    'inf')}}, preserve_graph_attrs=True)
+    if G.is_multigraph():
+        raise nx.NetworkXError("MultiGraph and MultiDiGraph not supported (yet).")
+
+    R = nx.DiGraph()
+    R.__networkx_cache__ = None  # Disable caching
+    R.add_nodes_from(G)
+
+    inf = float("inf")
+    # Extract edges with positive capacities. Self loops excluded.
+    edge_list = [
+        (u, v, attr)
+        for u, v, attr in G.edges(data=True)
+        if u != v and attr.get(capacity, inf) > 0
+    ]
+    # Simulate infinity with three times the sum of the finite edge capacities
+    # or any positive value if the sum is zero. This allows the
+    # infinite-capacity edges to be distinguished for unboundedness detection
+    # and directly participate in residual capacity calculation. If the maximum
+    # flow is finite, these edges cannot appear in the minimum cut and thus
+    # guarantee correctness. Since the residual capacity of an
+    # infinite-capacity edge is always at least 2/3 of inf, while that of an
+    # finite-capacity edge is at most 1/3 of inf, if an operation moves more
+    # than 1/3 of inf units of flow to t, there must be an infinite-capacity
+    # s-t path in G.
+    inf = (
+        3
+        * sum(
+            attr[capacity]
+            for u, v, attr in edge_list
+            if capacity in attr and attr[capacity] != inf
+        )
+        or 1
+    )
+    if G.is_directed():
+        for u, v, attr in edge_list:
+            r = min(attr.get(capacity, inf), inf)
+            if not R.has_edge(u, v):
+                # Both (u, v) and (v, u) must be present in the residual
+                # network.
+                R.add_edge(u, v, capacity=r)
+                R.add_edge(v, u, capacity=0)
+            else:
+                # The edge (u, v) was added when (v, u) was visited.
+                R[u][v]["capacity"] = r
+    else:
+        for u, v, attr in edge_list:
+            # Add a pair of edges with equal residual capacities.
+            r = min(attr.get(capacity, inf), inf)
+            R.add_edge(u, v, capacity=r)
+            R.add_edge(v, u, capacity=r)
+
+    # Record the value simulating infinity.
+    R.graph["inf"] = inf
+
+    return R
+
+
+@nx._dispatchable(
+    graphs="R",
+    preserve_edge_attrs={"R": {"capacity": float("inf")}},
+    preserve_graph_attrs=True,
+)
 def detect_unboundedness(R, s, t):
     """Detect an infinite-capacity s-t path in R."""
-    pass
-
-
-@nx._dispatchable(graphs={'G': 0, 'R': 1}, preserve_edge_attrs={'R': {
-    'flow': None}})
+    q = deque([s])
+    seen = {s}
+    inf = R.graph["inf"]
+    while q:
+        u = q.popleft()
+        for v, attr in R[u].items():
+            if attr["capacity"] == inf and v not in seen:
+                if v == t:
+                    raise nx.NetworkXUnbounded(
+                        "Infinite capacity path, flow unbounded above."
+                    )
+                seen.add(v)
+                q.append(v)
+
+
+@nx._dispatchable(graphs={"G": 0, "R": 1}, preserve_edge_attrs={"R": {"flow": None}})
 def build_flow_dict(G, R):
     """Build a flow dictionary from a residual network."""
-    pass
+    flow_dict = {}
+    for u in G:
+        flow_dict[u] = {v: 0 for v in G[u]}
+        flow_dict[u].update(
+            (v, attr["flow"]) for v, attr in R[u].items() if attr["flow"] > 0
+        )
+    return flow_dict
diff --git a/networkx/algorithms/graph_hashing.py b/networkx/algorithms/graph_hashing.py
index e2c0be264..b3761bf57 100644
--- a/networkx/algorithms/graph_hashing.py
+++ b/networkx/algorithms/graph_hashing.py
@@ -3,10 +3,26 @@ Functions for hashing graphs to strings.
 Isomorphic graphs should be assigned identical hashes.
 For now, only Weisfeiler-Lehman hashing is implemented.
 """
+
 from collections import Counter, defaultdict
 from hashlib import blake2b
+
 import networkx as nx
-__all__ = ['weisfeiler_lehman_graph_hash', 'weisfeiler_lehman_subgraph_hashes']
+
+__all__ = ["weisfeiler_lehman_graph_hash", "weisfeiler_lehman_subgraph_hashes"]
+
+
+def _hash_label(label, digest_size):
+    return blake2b(label.encode("ascii"), digest_size=digest_size).hexdigest()
+
+
+def _init_node_labels(G, edge_attr, node_attr):
+    if node_attr:
+        return {u: str(dd[node_attr]) for u, dd in G.nodes(data=True)}
+    elif edge_attr:
+        return {u: "" for u in G}
+    else:
+        return {u: str(deg) for u, deg in G.degree()}


 def _neighborhood_aggregate(G, node, node_labels, edge_attr=None):
@@ -14,12 +30,17 @@ def _neighborhood_aggregate(G, node, node_labels, edge_attr=None):
     Compute new labels for given node by aggregating
     the labels of each node's neighbors.
     """
-    pass
+    label_list = []
+    for nbr in G.neighbors(node):
+        prefix = "" if edge_attr is None else str(G[node][nbr][edge_attr])
+        label_list.append(prefix + node_labels[nbr])
+    return node_labels[node] + "".join(sorted(label_list))


-@nx._dispatchable(edge_attrs={'edge_attr': None}, node_attrs='node_attr')
-def weisfeiler_lehman_graph_hash(G, edge_attr=None, node_attr=None,
-    iterations=3, digest_size=16):
+@nx._dispatchable(edge_attrs={"edge_attr": None}, node_attrs="node_attr")
+def weisfeiler_lehman_graph_hash(
+    G, edge_attr=None, node_attr=None, iterations=3, digest_size=16
+):
     """Return Weisfeiler Lehman (WL) graph hash.

     The function iteratively aggregates and hashes neighborhoods of each node.
@@ -112,12 +133,42 @@ def weisfeiler_lehman_graph_hash(G, edge_attr=None, node_attr=None,
     --------
     weisfeiler_lehman_subgraph_hashes
     """
-    pass

-
-@nx._dispatchable(edge_attrs={'edge_attr': None}, node_attrs='node_attr')
-def weisfeiler_lehman_subgraph_hashes(G, edge_attr=None, node_attr=None,
-    iterations=3, digest_size=16, include_initial_labels=False):
+    def weisfeiler_lehman_step(G, labels, edge_attr=None):
+        """
+        Apply neighborhood aggregation to each node
+        in the graph.
+        Computes a dictionary with labels for each node.
+        """
+        new_labels = {}
+        for node in G.nodes():
+            label = _neighborhood_aggregate(G, node, labels, edge_attr=edge_attr)
+            new_labels[node] = _hash_label(label, digest_size)
+        return new_labels
+
+    # set initial node labels
+    node_labels = _init_node_labels(G, edge_attr, node_attr)
+
+    subgraph_hash_counts = []
+    for _ in range(iterations):
+        node_labels = weisfeiler_lehman_step(G, node_labels, edge_attr=edge_attr)
+        counter = Counter(node_labels.values())
+        # sort the counter, extend total counts
+        subgraph_hash_counts.extend(sorted(counter.items(), key=lambda x: x[0]))
+
+    # hash the final counter
+    return _hash_label(str(tuple(subgraph_hash_counts)), digest_size)
+
+
+@nx._dispatchable(edge_attrs={"edge_attr": None}, node_attrs="node_attr")
+def weisfeiler_lehman_subgraph_hashes(
+    G,
+    edge_attr=None,
+    node_attr=None,
+    iterations=3,
+    digest_size=16,
+    include_initial_labels=False,
+):
     """
     Return a dictionary of subgraph hashes by node.

@@ -238,4 +289,34 @@ def weisfeiler_lehman_subgraph_hashes(G, edge_attr=None, node_attr=None,
     --------
     weisfeiler_lehman_graph_hash
     """
-    pass
+
+    def weisfeiler_lehman_step(G, labels, node_subgraph_hashes, edge_attr=None):
+        """
+        Apply neighborhood aggregation to each node
+        in the graph.
+        Computes a dictionary with labels for each node.
+        Appends the new hashed label to the dictionary of subgraph hashes
+        originating from and indexed by each node in G
+        """
+        new_labels = {}
+        for node in G.nodes():
+            label = _neighborhood_aggregate(G, node, labels, edge_attr=edge_attr)
+            hashed_label = _hash_label(label, digest_size)
+            new_labels[node] = hashed_label
+            node_subgraph_hashes[node].append(hashed_label)
+        return new_labels
+
+    node_labels = _init_node_labels(G, edge_attr, node_attr)
+    if include_initial_labels:
+        node_subgraph_hashes = {
+            k: [_hash_label(v, digest_size)] for k, v in node_labels.items()
+        }
+    else:
+        node_subgraph_hashes = defaultdict(list)
+
+    for _ in range(iterations):
+        node_labels = weisfeiler_lehman_step(
+            G, node_labels, node_subgraph_hashes, edge_attr
+        )
+
+    return dict(node_subgraph_hashes)
diff --git a/networkx/algorithms/graphical.py b/networkx/algorithms/graphical.py
index 8846af1c4..b2ce6c33a 100644
--- a/networkx/algorithms/graphical.py
+++ b/networkx/algorithms/graphical.py
@@ -1,14 +1,21 @@
 """Test sequences for graphiness.
 """
 import heapq
+
 import networkx as nx
-__all__ = ['is_graphical', 'is_multigraphical', 'is_pseudographical',
-    'is_digraphical', 'is_valid_degree_sequence_erdos_gallai',
-    'is_valid_degree_sequence_havel_hakimi']
+
+__all__ = [
+    "is_graphical",
+    "is_multigraphical",
+    "is_pseudographical",
+    "is_digraphical",
+    "is_valid_degree_sequence_erdos_gallai",
+    "is_valid_degree_sequence_havel_hakimi",
+]


 @nx._dispatchable(graphs=None)
-def is_graphical(sequence, method='eg'):
+def is_graphical(sequence, method="eg"):
     """Returns True if sequence is a valid degree sequence.

     A degree sequence is valid if some graph can realize it.
@@ -56,12 +63,39 @@ def is_graphical(sequence, method='eg'):
     .. [CL1996] G. Chartrand and L. Lesniak, "Graphs and Digraphs",
        Chapman and Hall/CRC, 1996.
     """
-    pass
+    if method == "eg":
+        valid = is_valid_degree_sequence_erdos_gallai(list(sequence))
+    elif method == "hh":
+        valid = is_valid_degree_sequence_havel_hakimi(list(sequence))
+    else:
+        msg = "`method` must be 'eg' or 'hh'"
+        raise nx.NetworkXException(msg)
+    return valid
+
+
+def _basic_graphical_tests(deg_sequence):
+    # Sort and perform some simple tests on the sequence
+    deg_sequence = nx.utils.make_list_of_ints(deg_sequence)
+    p = len(deg_sequence)
+    num_degs = [0] * p
+    dmax, dmin, dsum, n = 0, p, 0, 0
+    for d in deg_sequence:
+        # Reject if degree is negative or larger than the sequence length
+        if d < 0 or d >= p:
+            raise nx.NetworkXUnfeasible
+        # Process only the non-zero integers
+        elif d > 0:
+            dmax, dmin, dsum, n = max(dmax, d), min(dmin, d), dsum + d, n + 1
+            num_degs[d] += 1
+    # Reject sequence if it has odd sum or is oversaturated
+    if dsum % 2 or dsum > n * (n - 1):
+        raise nx.NetworkXUnfeasible
+    return dmax, dmin, dsum, n, num_degs


 @nx._dispatchable(graphs=None)
 def is_valid_degree_sequence_havel_hakimi(deg_sequence):
-    """Returns True if deg_sequence can be realized by a simple graph.
+    r"""Returns True if deg_sequence can be realized by a simple graph.

     The validation proceeds using the Havel-Hakimi theorem
     [havel1955]_, [hakimi1962]_, [CL1996]_.
@@ -96,7 +130,7 @@ def is_valid_degree_sequence_havel_hakimi(deg_sequence):
     The ZZ condition says that for the sequence d if

     .. math::
-        |d| >= \\frac{(\\max(d) + \\min(d) + 1)^2}{4*\\min(d)}
+        |d| >= \frac{(\max(d) + \min(d) + 1)^2}{4*\min(d)}

     then d is graphical.  This was shown in Theorem 6 in [1]_.

@@ -111,12 +145,47 @@ def is_valid_degree_sequence_havel_hakimi(deg_sequence):
     .. [CL1996] G. Chartrand and L. Lesniak, "Graphs and Digraphs",
        Chapman and Hall/CRC, 1996.
     """
-    pass
+    try:
+        dmax, dmin, dsum, n, num_degs = _basic_graphical_tests(deg_sequence)
+    except nx.NetworkXUnfeasible:
+        return False
+    # Accept if sequence has no non-zero degrees or passes the ZZ condition
+    if n == 0 or 4 * dmin * n >= (dmax + dmin + 1) * (dmax + dmin + 1):
+        return True
+
+    modstubs = [0] * (dmax + 1)
+    # Successively reduce degree sequence by removing the maximum degree
+    while n > 0:
+        # Retrieve the maximum degree in the sequence
+        while num_degs[dmax] == 0:
+            dmax -= 1
+        # If there are not enough stubs to connect to, then the sequence is
+        # not graphical
+        if dmax > n - 1:
+            return False
+
+        # Remove largest stub in list
+        num_degs[dmax], n = num_degs[dmax] - 1, n - 1
+        # Reduce the next dmax largest stubs
+        mslen = 0
+        k = dmax
+        for i in range(dmax):
+            while num_degs[k] == 0:
+                k -= 1
+            num_degs[k], n = num_degs[k] - 1, n - 1
+            if k > 1:
+                modstubs[mslen] = k - 1
+                mslen += 1
+        # Add back to the list any non-zero stubs that were removed
+        for i in range(mslen):
+            stub = modstubs[i]
+            num_degs[stub], n = num_degs[stub] + 1, n + 1
+    return True


 @nx._dispatchable(graphs=None)
 def is_valid_degree_sequence_erdos_gallai(deg_sequence):
-    """Returns True if deg_sequence can be realized by a simple graph.
+    r"""Returns True if deg_sequence can be realized by a simple graph.

     The validation is done using the Erdős-Gallai theorem [EG1960]_.

@@ -154,8 +223,8 @@ def is_valid_degree_sequence_erdos_gallai(deg_sequence):

      .. math::

-       \\sum_{i=1}^{k} d_i \\leq k(k-1) + \\sum_{j=k+1}^{n} \\min(d_i,k)
-             = k(n-1) - ( k \\sum_{j=0}^{k-1} n_j - \\sum_{j=0}^{k-1} j n_j )
+       \sum_{i=1}^{k} d_i \leq k(k-1) + \sum_{j=k+1}^{n} \min(d_i,k)
+             = k(n-1) - ( k \sum_{j=0}^{k-1} n_j - \sum_{j=0}^{k-1} j n_j )

     A strong index k is any index where d_k >= k and the value n_j is the
     number of occurrences of j in d.  The maximal strong index is called the
@@ -166,7 +235,7 @@ def is_valid_degree_sequence_erdos_gallai(deg_sequence):
     The ZZ condition says that for the sequence d if

     .. math::
-        |d| >= \\frac{(\\max(d) + \\min(d) + 1)^2}{4*\\min(d)}
+        |d| >= \frac{(\max(d) + \min(d) + 1)^2}{4*\min(d)}

     then d is graphical.  This was shown in Theorem 6 in [2]_.

@@ -178,7 +247,31 @@ def is_valid_degree_sequence_erdos_gallai(deg_sequence):
        of graphic sequences", Discrete Mathematics, 105, pp. 292-303 (1992).
     .. [EG1960] Erdős and Gallai, Mat. Lapok 11 264, 1960.
     """
-    pass
+    try:
+        dmax, dmin, dsum, n, num_degs = _basic_graphical_tests(deg_sequence)
+    except nx.NetworkXUnfeasible:
+        return False
+    # Accept if sequence has no non-zero degrees or passes the ZZ condition
+    if n == 0 or 4 * dmin * n >= (dmax + dmin + 1) * (dmax + dmin + 1):
+        return True
+
+    # Perform the EG checks using the reformulation of Zverovich and Zverovich
+    k, sum_deg, sum_nj, sum_jnj = 0, 0, 0, 0
+    for dk in range(dmax, dmin - 1, -1):
+        if dk < k + 1:  # Check if already past Durfee index
+            return True
+        if num_degs[dk] > 0:
+            run_size = num_degs[dk]  # Process a run of identical-valued degrees
+            if dk < k + run_size:  # Check if end of run is past Durfee index
+                run_size = dk - k  # Adjust back to Durfee index
+            sum_deg += run_size * dk
+            for v in range(run_size):
+                sum_nj += num_degs[k + v]
+                sum_jnj += (k + v) * num_degs[k + v]
+            k += run_size
+            if sum_deg > k * (n - 1) - k * sum_nj + sum_jnj:
+                return False
+    return True


 @nx._dispatchable(graphs=None)
@@ -218,7 +311,18 @@ def is_multigraphical(sequence):
        degrees of the vertices of a linear graph", J. SIAM, 10, pp. 496-506
        (1962).
     """
-    pass
+    try:
+        deg_sequence = nx.utils.make_list_of_ints(sequence)
+    except nx.NetworkXError:
+        return False
+    dsum, dmax = 0, 0
+    for d in deg_sequence:
+        if d < 0:
+            return False
+        dsum, dmax = dsum + d, max(dmax, d)
+    if dsum % 2 or dsum < 2 * dmax:
+        return False
+    return True


 @nx._dispatchable(graphs=None)
@@ -261,12 +365,16 @@ def is_pseudographical(sequence):
        and their degree lists", IEEE Trans. Circuits and Systems, CAS-23(12),
        pp. 778-782 (1976).
     """
-    pass
+    try:
+        deg_sequence = nx.utils.make_list_of_ints(sequence)
+    except nx.NetworkXError:
+        return False
+    return sum(deg_sequence) % 2 == 0 and min(deg_sequence) >= 0


 @nx._dispatchable(graphs=None)
 def is_digraphical(in_sequence, out_sequence):
-    """Returns True if some directed graph can realize the in- and out-degree
+    r"""Returns True if some directed graph can realize the in- and out-degree
     sequences.

     Parameters
@@ -299,7 +407,7 @@ def is_digraphical(in_sequence, out_sequence):
     Notes
     -----
     This algorithm is from Kleitman and Wang [1]_.
-    The worst case runtime is $O(s \\times \\log n)$ where $s$ and $n$ are the
+    The worst case runtime is $O(s \times \log n)$ where $s$ and $n$ are the
     sum and length of the sequences respectively.

     References
@@ -308,4 +416,68 @@ def is_digraphical(in_sequence, out_sequence):
        Algorithms for Constructing Graphs and Digraphs with Given Valences
        and Factors, Discrete Mathematics, 6(1), pp. 79-88 (1973)
     """
-    pass
+    try:
+        in_deg_sequence = nx.utils.make_list_of_ints(in_sequence)
+        out_deg_sequence = nx.utils.make_list_of_ints(out_sequence)
+    except nx.NetworkXError:
+        return False
+    # Process the sequences and form two heaps to store degree pairs with
+    # either zero or non-zero out degrees
+    sumin, sumout, nin, nout = 0, 0, len(in_deg_sequence), len(out_deg_sequence)
+    maxn = max(nin, nout)
+    maxin = 0
+    if maxn == 0:
+        return True
+    stubheap, zeroheap = [], []
+    for n in range(maxn):
+        in_deg, out_deg = 0, 0
+        if n < nout:
+            out_deg = out_deg_sequence[n]
+        if n < nin:
+            in_deg = in_deg_sequence[n]
+        if in_deg < 0 or out_deg < 0:
+            return False
+        sumin, sumout, maxin = sumin + in_deg, sumout + out_deg, max(maxin, in_deg)
+        if in_deg > 0:
+            stubheap.append((-1 * out_deg, -1 * in_deg))
+        elif out_deg > 0:
+            zeroheap.append(-1 * out_deg)
+    if sumin != sumout:
+        return False
+    heapq.heapify(stubheap)
+    heapq.heapify(zeroheap)
+
+    modstubs = [(0, 0)] * (maxin + 1)
+    # Successively reduce degree sequence by removing the maximum out degree
+    while stubheap:
+        # Take the first value in the sequence with non-zero in degree
+        (freeout, freein) = heapq.heappop(stubheap)
+        freein *= -1
+        if freein > len(stubheap) + len(zeroheap):
+            return False
+
+        # Attach out stubs to the nodes with the most in stubs
+        mslen = 0
+        for i in range(freein):
+            if zeroheap and (not stubheap or stubheap[0][0] > zeroheap[0]):
+                stubout = heapq.heappop(zeroheap)
+                stubin = 0
+            else:
+                (stubout, stubin) = heapq.heappop(stubheap)
+            if stubout == 0:
+                return False
+            # Check if target is now totally connected
+            if stubout + 1 < 0 or stubin < 0:
+                modstubs[mslen] = (stubout + 1, stubin)
+                mslen += 1
+
+        # Add back the nodes to the heap that still have available stubs
+        for i in range(mslen):
+            stub = modstubs[i]
+            if stub[1] < 0:
+                heapq.heappush(stubheap, stub)
+            else:
+                heapq.heappush(zeroheap, stub[0])
+        if freeout < 0:
+            heapq.heappush(zeroheap, freeout)
+    return True
diff --git a/networkx/algorithms/hierarchy.py b/networkx/algorithms/hierarchy.py
index 41e0823a8..4bb01cb45 100644
--- a/networkx/algorithms/hierarchy.py
+++ b/networkx/algorithms/hierarchy.py
@@ -2,10 +2,11 @@
 Flow Hierarchy.
 """
 import networkx as nx
-__all__ = ['flow_hierarchy']

+__all__ = ["flow_hierarchy"]

-@nx._dispatchable(edge_attrs='weight')
+
+@nx._dispatchable(edge_attrs="weight")
 def flow_hierarchy(G, weight=None):
     """Returns the flow hierarchy of a directed network.

@@ -41,4 +42,7 @@ def flow_hierarchy(G, weight=None):
        DOI: 10.1002/cplx.20368
        http://web.mit.edu/~cmagee/www/documents/28-DetectingEvolvingPatterns_FlowHierarchy.pdf
     """
-    pass
+    if not G.is_directed():
+        raise nx.NetworkXError("G must be a digraph in flow_hierarchy")
+    scc = nx.strongly_connected_components(G)
+    return 1 - sum(G.subgraph(c).size(weight) for c in scc) / G.size(weight)
diff --git a/networkx/algorithms/hybrid.py b/networkx/algorithms/hybrid.py
index 2498f7fea..4d0e5c36e 100644
--- a/networkx/algorithms/hybrid.py
+++ b/networkx/algorithms/hybrid.py
@@ -4,8 +4,10 @@ graphs.

 """
 import copy
+
 import networkx as nx
-__all__ = ['kl_connected_subgraph', 'is_kl_connected']
+
+__all__ = ["kl_connected_subgraph", "is_kl_connected"]


 @nx._dispatchable(returns_graph=True)
@@ -59,7 +61,58 @@ def kl_connected_subgraph(G, k, l, low_memory=False, same_as_graph=False):
            2004. 89--104.

     """
-    pass
+    H = copy.deepcopy(G)  # subgraph we construct by removing from G
+
+    graphOK = True
+    deleted_some = True  # hack to start off the while loop
+    while deleted_some:
+        deleted_some = False
+        # We use `for edge in list(H.edges()):` instead of
+        # `for edge in H.edges():` because we edit the graph `H` in
+        # the loop. Hence using an iterator will result in
+        # `RuntimeError: dictionary changed size during iteration`
+        for edge in list(H.edges()):
+            (u, v) = edge
+            # Get copy of graph needed for this search
+            if low_memory:
+                verts = {u, v}
+                for i in range(k):
+                    for w in verts.copy():
+                        verts.update(G[w])
+                G2 = G.subgraph(verts).copy()
+            else:
+                G2 = copy.deepcopy(G)
+            ###
+            path = [u, v]
+            cnt = 0
+            accept = 0
+            while path:
+                cnt += 1  # Found a path
+                if cnt >= l:
+                    accept = 1
+                    break
+                # record edges along this graph
+                prev = u
+                for w in path:
+                    if prev != w:
+                        G2.remove_edge(prev, w)
+                        prev = w
+                #                path = shortest_path(G2, u, v, k) # ??? should "Cutoff" be k+1?
+                try:
+                    path = nx.shortest_path(G2, u, v)  # ??? should "Cutoff" be k+1?
+                except nx.NetworkXNoPath:
+                    path = False
+            # No Other Paths
+            if accept == 0:
+                H.remove_edge(u, v)
+                deleted_some = True
+                if graphOK:
+                    graphOK = False
+    # We looked through all edges and removed none of them.
+    # So, H is the maximal (k,l)-connected subgraph of G
+    if same_as_graph:
+        return (H, graphOK)
+    return H


 @nx._dispatchable
@@ -103,4 +156,40 @@ def is_kl_connected(G, k, l, low_memory=False):
            2004. 89--104.

     """
-    pass
+    graphOK = True
+    for edge in G.edges():
+        (u, v) = edge
+        # Get copy of graph needed for this search
+        if low_memory:
+            verts = {u, v}
+            for i in range(k):
+                [verts.update(G.neighbors(w)) for w in verts.copy()]
+            G2 = G.subgraph(verts)
+        else:
+            G2 = copy.deepcopy(G)
+        ###
+        path = [u, v]
+        cnt = 0
+        accept = 0
+        while path:
+            cnt += 1  # Found a path
+            if cnt >= l:
+                accept = 1
+                break
+            # record edges along this graph
+            prev = u
+            for w in path:
+                if w != prev:
+                    G2.remove_edge(prev, w)
+                    prev = w
+            #            path = shortest_path(G2, u, v, k) # ??? should "Cutoff" be k+1?
+            try:
+                path = nx.shortest_path(G2, u, v)  # ??? should "Cutoff" be k+1?
+            except nx.NetworkXNoPath:
+                path = False
+        # No Other Paths
+        if accept == 0:
+            graphOK = False
+            break
+    # return status
+    return graphOK
diff --git a/networkx/algorithms/isolate.py b/networkx/algorithms/isolate.py
index 4cdf6c772..23ac23875 100644
--- a/networkx/algorithms/isolate.py
+++ b/networkx/algorithms/isolate.py
@@ -2,7 +2,8 @@
 Functions for identifying isolate (degree zero) nodes.
 """
 import networkx as nx
-__all__ = ['is_isolate', 'isolates', 'number_of_isolates']
+
+__all__ = ["is_isolate", "isolates", "number_of_isolates"]


 @nx._dispatchable
@@ -35,7 +36,7 @@ def is_isolate(G, n):
     >>> nx.is_isolate(G, 3)
     True
     """
-    pass
+    return G.degree(n) == 0


 @nx._dispatchable
@@ -81,7 +82,7 @@ def isolates(G):
         [3]

     """
-    pass
+    return (n for n, d in G.degree() if d == 0)


 @nx._dispatchable
@@ -102,4 +103,5 @@ def number_of_isolates(G):
         The number of degree zero nodes in the graph `G`.

     """
-    pass
+    # TODO This can be parallelized.
+    return sum(1 for v in isolates(G))
diff --git a/networkx/algorithms/isomorphism/ismags.py b/networkx/algorithms/isomorphism/ismags.py
index 483833c52..24819faf9 100644
--- a/networkx/algorithms/isomorphism/ismags.py
+++ b/networkx/algorithms/isomorphism/ismags.py
@@ -105,7 +105,9 @@ References
    https://doi.org/10.1371/journal.pone.0097896
 .. [2] https://en.wikipedia.org/wiki/Maximum_common_induced_subgraph
 """
-__all__ = ['ISMAGS']
+
+__all__ = ["ISMAGS"]
+
 import itertools
 from collections import Counter, defaultdict
 from functools import reduce, wraps
@@ -127,7 +129,18 @@ def are_all_equal(iterable):
         ``True`` iff all elements in `iterable` compare equal, ``False``
         otherwise.
     """
-    pass
+    try:
+        shape = iterable.shape
+    except AttributeError:
+        pass
+    else:
+        if len(shape) > 1:
+            message = "The function does not works on multidimensional arrays."
+            raise NotImplementedError(message) from None
+
+    iterator = iter(iterable)
+    first = next(iterator, None)
+    return all(item == first for item in iterator)


 def make_partitions(items, test):
@@ -156,7 +169,16 @@ def make_partitions(items, test):
     The function `test` is assumed to be transitive: if ``test(a, b)`` and
     ``test(b, c)`` return ``True``, then ``test(a, c)`` must also be ``True``.
     """
-    pass
+    partitions = []
+    for item in items:
+        for partition in partitions:
+            p_item = next(iter(partition))
+            if test(item, p_item):
+                partition.add(item)
+                break
+        else:  # No break
+            partitions.append({item})
+    return partitions


 def partition_to_color(partitions):
@@ -173,7 +195,11 @@ def partition_to_color(partitions):
     -------
     dict
     """
-    pass
+    colors = {}
+    for color, keys in enumerate(partitions):
+        for key in keys:
+            colors[key] = color
+    return colors


 def intersect(collection_of_sets):
@@ -191,7 +217,10 @@ def intersect(collection_of_sets):
         An intersection of all sets in `collection_of_sets`. Will have the same
         type as the item initially taken from `collection_of_sets`.
     """
-    pass
+    collection_of_sets = list(collection_of_sets)
+    first = collection_of_sets.pop()
+    out = reduce(set.intersection, collection_of_sets, set(first))
+    return type(first)(out)


 class ISMAGS:
@@ -237,8 +266,7 @@ class ISMAGS:
        https://doi.org/10.1371/journal.pone.0097896
     """

-    def __init__(self, graph, subgraph, node_match=None, edge_match=None,
-        cache=None):
+    def __init__(self, graph, subgraph, node_match=None, edge_match=None, cache=None):
         """
         Parameters
         ----------
@@ -261,34 +289,156 @@ class ISMAGS:
         cache: collections.abc.Mapping
             A cache used for caching graph symmetries.
         """
+        # TODO: graph and subgraph setter methods that invalidate the caches.
+        # TODO: allow for precomputed partitions and colors
         self.graph = graph
         self.subgraph = subgraph
         self._symmetry_cache = cache
+        # Naming conventions are taken from the original paper. For your
+        # sanity:
+        #   sg: subgraph
+        #   g: graph
+        #   e: edge(s)
+        #   n: node(s)
+        # So: sgn means "subgraph nodes".
         self._sgn_partitions_ = None
         self._sge_partitions_ = None
+
         self._sgn_colors_ = None
         self._sge_colors_ = None
+
         self._gn_partitions_ = None
         self._ge_partitions_ = None
+
         self._gn_colors_ = None
         self._ge_colors_ = None
+
         self._node_compat_ = None
         self._edge_compat_ = None
+
         if node_match is None:
             self.node_equality = self._node_match_maker(lambda n1, n2: True)
             self._sgn_partitions_ = [set(self.subgraph.nodes)]
             self._gn_partitions_ = [set(self.graph.nodes)]
-            self._node_compat_ = {(0): 0}
+            self._node_compat_ = {0: 0}
         else:
             self.node_equality = self._node_match_maker(node_match)
         if edge_match is None:
             self.edge_equality = self._edge_match_maker(lambda e1, e2: True)
             self._sge_partitions_ = [set(self.subgraph.edges)]
             self._ge_partitions_ = [set(self.graph.edges)]
-            self._edge_compat_ = {(0): 0}
+            self._edge_compat_ = {0: 0}
         else:
             self.edge_equality = self._edge_match_maker(edge_match)

+    @property
+    def _sgn_partitions(self):
+        if self._sgn_partitions_ is None:
+
+            def nodematch(node1, node2):
+                return self.node_equality(self.subgraph, node1, self.subgraph, node2)
+
+            self._sgn_partitions_ = make_partitions(self.subgraph.nodes, nodematch)
+        return self._sgn_partitions_
+
+    @property
+    def _sge_partitions(self):
+        if self._sge_partitions_ is None:
+
+            def edgematch(edge1, edge2):
+                return self.edge_equality(self.subgraph, edge1, self.subgraph, edge2)
+
+            self._sge_partitions_ = make_partitions(self.subgraph.edges, edgematch)
+        return self._sge_partitions_
+
+    @property
+    def _gn_partitions(self):
+        if self._gn_partitions_ is None:
+
+            def nodematch(node1, node2):
+                return self.node_equality(self.graph, node1, self.graph, node2)
+
+            self._gn_partitions_ = make_partitions(self.graph.nodes, nodematch)
+        return self._gn_partitions_
+
+    @property
+    def _ge_partitions(self):
+        if self._ge_partitions_ is None:
+
+            def edgematch(edge1, edge2):
+                return self.edge_equality(self.graph, edge1, self.graph, edge2)
+
+            self._ge_partitions_ = make_partitions(self.graph.edges, edgematch)
+        return self._ge_partitions_
+
+    @property
+    def _sgn_colors(self):
+        if self._sgn_colors_ is None:
+            self._sgn_colors_ = partition_to_color(self._sgn_partitions)
+        return self._sgn_colors_
+
+    @property
+    def _sge_colors(self):
+        if self._sge_colors_ is None:
+            self._sge_colors_ = partition_to_color(self._sge_partitions)
+        return self._sge_colors_
+
+    @property
+    def _gn_colors(self):
+        if self._gn_colors_ is None:
+            self._gn_colors_ = partition_to_color(self._gn_partitions)
+        return self._gn_colors_
+
+    @property
+    def _ge_colors(self):
+        if self._ge_colors_ is None:
+            self._ge_colors_ = partition_to_color(self._ge_partitions)
+        return self._ge_colors_
+
+    @property
+    def _node_compatibility(self):
+        if self._node_compat_ is not None:
+            return self._node_compat_
+        self._node_compat_ = {}
+        for sgn_part_color, gn_part_color in itertools.product(
+            range(len(self._sgn_partitions)), range(len(self._gn_partitions))
+        ):
+            sgn = next(iter(self._sgn_partitions[sgn_part_color]))
+            gn = next(iter(self._gn_partitions[gn_part_color]))
+            if self.node_equality(self.subgraph, sgn, self.graph, gn):
+                self._node_compat_[sgn_part_color] = gn_part_color
+        return self._node_compat_
+
+    @property
+    def _edge_compatibility(self):
+        if self._edge_compat_ is not None:
+            return self._edge_compat_
+        self._edge_compat_ = {}
+        for sge_part_color, ge_part_color in itertools.product(
+            range(len(self._sge_partitions)), range(len(self._ge_partitions))
+        ):
+            sge = next(iter(self._sge_partitions[sge_part_color]))
+            ge = next(iter(self._ge_partitions[ge_part_color]))
+            if self.edge_equality(self.subgraph, sge, self.graph, ge):
+                self._edge_compat_[sge_part_color] = ge_part_color
+        return self._edge_compat_
+
+    @staticmethod
+    def _node_match_maker(cmp):
+        @wraps(cmp)
+        def comparer(graph1, node1, graph2, node2):
+            return cmp(graph1.nodes[node1], graph2.nodes[node2])
+
+        return comparer
+
+    @staticmethod
+    def _edge_match_maker(cmp):
+        @wraps(cmp)
+        def comparer(graph1, edge1, graph2, edge2):
+            return cmp(graph1.edges[edge1], graph2.edges[edge2])
+
+        return comparer
+
     def find_isomorphisms(self, symmetry=True):
         """Find all subgraph isomorphisms between subgraph and graph

@@ -305,7 +455,37 @@ class ISMAGS:
         dict
             The found isomorphism mappings of {graph_node: subgraph_node}.
         """
-        pass
+        # The networkx VF2 algorithm is slightly funny in when it yields an
+        # empty dict and when not.
+        if not self.subgraph:
+            yield {}
+            return
+        elif not self.graph:
+            return
+        elif len(self.graph) < len(self.subgraph):
+            return
+
+        if symmetry:
+            _, cosets = self.analyze_symmetry(
+                self.subgraph, self._sgn_partitions, self._sge_colors
+            )
+            constraints = self._make_constraints(cosets)
+        else:
+            constraints = []
+
+        candidates = self._find_nodecolor_candidates()
+        la_candidates = self._get_lookahead_candidates()
+        for sgn in self.subgraph:
+            extra_candidates = la_candidates[sgn]
+            if extra_candidates:
+                candidates[sgn] = candidates[sgn] | {frozenset(extra_candidates)}
+
+        if any(candidates.values()):
+            start_sgn = min(candidates, key=lambda n: min(candidates[n], key=len))
+            candidates[start_sgn] = (intersect(candidates[start_sgn]),)
+            yield from self._map_nodes(start_sgn, candidates, constraints)
+        else:
+            return

     @staticmethod
     def _find_neighbor_color_count(graph, node, node_color, edge_color):
@@ -313,7 +493,16 @@ class ISMAGS:
         For `node` in `graph`, count the number of edges of a specific color
         it has to nodes of a specific color.
         """
-        pass
+        counts = Counter()
+        neighbors = graph[node]
+        for neighbor in neighbors:
+            n_color = node_color[neighbor]
+            if (node, neighbor) in edge_color:
+                e_color = edge_color[node, neighbor]
+            else:
+                e_color = edge_color[neighbor, node]
+            counts[e_color, n_color] += 1
+        return counts

     def _get_lookahead_candidates(self):
         """
@@ -321,7 +510,31 @@ class ISMAGS:
         which the graph nodes are feasible candidates for the subgraph node, as
         determined by looking ahead one edge.
         """
-        pass
+        g_counts = {}
+        for gn in self.graph:
+            g_counts[gn] = self._find_neighbor_color_count(
+                self.graph, gn, self._gn_colors, self._ge_colors
+            )
+        candidates = defaultdict(set)
+        for sgn in self.subgraph:
+            sg_count = self._find_neighbor_color_count(
+                self.subgraph, sgn, self._sgn_colors, self._sge_colors
+            )
+            new_sg_count = Counter()
+            for (sge_color, sgn_color), count in sg_count.items():
+                try:
+                    ge_color = self._edge_compatibility[sge_color]
+                    gn_color = self._node_compatibility[sgn_color]
+                except KeyError:
+                    pass
+                else:
+                    new_sg_count[ge_color, gn_color] = count
+
+            for gn, g_count in g_counts.items():
+                if all(new_sg_count[x] <= g_count[x] for x in new_sg_count):
+                    # Valid candidate
+                    candidates[sgn].add(gn)
+        return candidates

     def largest_common_subgraph(self, symmetry=True):
         """
@@ -339,7 +552,28 @@ class ISMAGS:
         dict
             The found isomorphism mappings of {graph_node: subgraph_node}.
         """
-        pass
+        # The networkx VF2 algorithm is slightly funny in when it yields an
+        # empty dict and when not.
+        if not self.subgraph:
+            yield {}
+            return
+        elif not self.graph:
+            return
+
+        if symmetry:
+            _, cosets = self.analyze_symmetry(
+                self.subgraph, self._sgn_partitions, self._sge_colors
+            )
+            constraints = self._make_constraints(cosets)
+        else:
+            constraints = []
+
+        candidates = self._find_nodecolor_candidates()
+
+        if any(candidates.values()):
+            yield from self._largest_common_subgraph(candidates, constraints)
+        else:
+            return

     def analyze_symmetry(self, graph, node_partitions, edge_colors):
         """
@@ -373,7 +607,28 @@ class ISMAGS:
             Every key-value pair describes which ``values`` can be interchanged
             without changing nodes less than ``key``.
         """
-        pass
+        if self._symmetry_cache is not None:
+            key = hash(
+                (
+                    tuple(graph.nodes),
+                    tuple(graph.edges),
+                    tuple(map(tuple, node_partitions)),
+                    tuple(edge_colors.items()),
+                )
+            )
+            if key in self._symmetry_cache:
+                return self._symmetry_cache[key]
+        node_partitions = list(
+            self._refine_node_partitions(graph, node_partitions, edge_colors)
+        )
+        assert len(node_partitions) == 1
+        node_partitions = node_partitions[0]
+        permutations, cosets = self._process_ordered_pair_partitions(
+            graph, node_partitions, node_partitions, edge_colors
+        )
+        if self._symmetry_cache is not None:
+            self._symmetry_cache[key] = permutations, cosets
+        return permutations, cosets

     def is_isomorphic(self, symmetry=False):
         """
@@ -384,7 +639,9 @@ class ISMAGS:
         -------
         bool
         """
-        pass
+        return len(self.subgraph) == len(self.graph) and self.subgraph_is_isomorphic(
+            symmetry
+        )

     def subgraph_is_isomorphic(self, symmetry=False):
         """
@@ -395,31 +652,53 @@ class ISMAGS:
         -------
         bool
         """
-        pass
+        # symmetry=False, since we only need to know whether there is any
+        # example; figuring out all symmetry elements probably costs more time
+        # than it gains.
+        isom = next(self.subgraph_isomorphisms_iter(symmetry=symmetry), None)
+        return isom is not None

     def isomorphisms_iter(self, symmetry=True):
         """
         Does the same as :meth:`find_isomorphisms` if :attr:`graph` and
         :attr:`subgraph` have the same number of nodes.
         """
-        pass
+        if len(self.graph) == len(self.subgraph):
+            yield from self.subgraph_isomorphisms_iter(symmetry=symmetry)

     def subgraph_isomorphisms_iter(self, symmetry=True):
         """Alternative name for :meth:`find_isomorphisms`."""
-        pass
+        return self.find_isomorphisms(symmetry)

     def _find_nodecolor_candidates(self):
         """
         Per node in subgraph find all nodes in graph that have the same color.
         """
-        pass
+        candidates = defaultdict(set)
+        for sgn in self.subgraph.nodes:
+            sgn_color = self._sgn_colors[sgn]
+            if sgn_color in self._node_compatibility:
+                gn_color = self._node_compatibility[sgn_color]
+                candidates[sgn].add(frozenset(self._gn_partitions[gn_color]))
+            else:
+                candidates[sgn].add(frozenset())
+        candidates = dict(candidates)
+        for sgn, options in candidates.items():
+            candidates[sgn] = frozenset(options)
+        return candidates

     @staticmethod
     def _make_constraints(cosets):
         """
         Turn cosets into constraints.
         """
-        pass
+        constraints = []
+        for node_i, node_ts in cosets.items():
+            for node_t in node_ts:
+                if node_i != node_t:
+                    # Node i must be smaller than node t.
+                    constraints.append((node_i, node_t))
+        return constraints

     @staticmethod
     def _find_node_edge_color(graph, node_colors, edge_colors):
@@ -428,7 +707,23 @@ class ISMAGS:
         color of the node, and 2) the number of edges of a color to each type
         of node.
         """
-        pass
+        counts = defaultdict(lambda: defaultdict(int))
+        for node1, node2 in graph.edges:
+            if (node1, node2) in edge_colors:
+                # FIXME directed graphs
+                ecolor = edge_colors[node1, node2]
+            else:
+                ecolor = edge_colors[node2, node1]
+            # Count per node how many edges it has of what color to nodes of
+            # what color
+            counts[node1][ecolor, node_colors[node2]] += 1
+            counts[node2][ecolor, node_colors[node1]] += 1
+
+        node_edge_colors = {}
+        for node in graph.nodes:
+            node_edge_colors[node] = node_colors[node], set(counts[node].items())
+
+        return node_edge_colors

     @staticmethod
     def _get_permutations_by_length(items):
@@ -446,38 +741,225 @@ class ISMAGS:
         >>> found == answer
         True
         """
-        pass
+        by_len = defaultdict(list)
+        for item in items:
+            by_len[len(item)].append(item)
+
+        yield from itertools.product(
+            *(itertools.permutations(by_len[l]) for l in sorted(by_len))
+        )

     @classmethod
-    def _refine_node_partitions(cls, graph, node_partitions, edge_colors,
-        branch=False):
+    def _refine_node_partitions(cls, graph, node_partitions, edge_colors, branch=False):
         """
         Given a partition of nodes in graph, make the partitions smaller such
         that all nodes in a partition have 1) the same color, and 2) the same
         number of edges to specific other partitions.
         """
-        pass
+
+        def equal_color(node1, node2):
+            return node_edge_colors[node1] == node_edge_colors[node2]
+
+        node_partitions = list(node_partitions)
+        node_colors = partition_to_color(node_partitions)
+        node_edge_colors = cls._find_node_edge_color(graph, node_colors, edge_colors)
+        if all(
+            are_all_equal(node_edge_colors[node] for node in partition)
+            for partition in node_partitions
+        ):
+            yield node_partitions
+            return
+
+        new_partitions = []
+        output = [new_partitions]
+        for partition in node_partitions:
+            if not are_all_equal(node_edge_colors[node] for node in partition):
+                refined = make_partitions(partition, equal_color)
+                if (
+                    branch
+                    and len(refined) != 1
+                    and len({len(r) for r in refined}) != len([len(r) for r in refined])
+                ):
+                    # This is where it breaks. There are multiple new cells
+                    # in refined with the same length, and their order
+                    # matters.
+                    # So option 1) Hit it with a big hammer and simply make all
+                    # orderings.
+                    permutations = cls._get_permutations_by_length(refined)
+                    new_output = []
+                    for n_p in output:
+                        for permutation in permutations:
+                            new_output.append(n_p + list(permutation[0]))
+                    output = new_output
+                else:
+                    for n_p in output:
+                        n_p.extend(sorted(refined, key=len))
+            else:
+                for n_p in output:
+                    n_p.append(partition)
+        for n_p in output:
+            yield from cls._refine_node_partitions(graph, n_p, edge_colors, branch)

     def _edges_of_same_color(self, sgn1, sgn2):
         """
         Returns all edges in :attr:`graph` that have the same colour as the
         edge between sgn1 and sgn2 in :attr:`subgraph`.
         """
-        pass
+        if (sgn1, sgn2) in self._sge_colors:
+            # FIXME directed graphs
+            sge_color = self._sge_colors[sgn1, sgn2]
+        else:
+            sge_color = self._sge_colors[sgn2, sgn1]
+        if sge_color in self._edge_compatibility:
+            ge_color = self._edge_compatibility[sge_color]
+            g_edges = self._ge_partitions[ge_color]
+        else:
+            g_edges = []
+        return g_edges

-    def _map_nodes(self, sgn, candidates, constraints, mapping=None,
-        to_be_mapped=None):
+    def _map_nodes(self, sgn, candidates, constraints, mapping=None, to_be_mapped=None):
         """
         Find all subgraph isomorphisms honoring constraints.
         """
-        pass
-
-    def _largest_common_subgraph(self, candidates, constraints,
-        to_be_mapped=None):
+        if mapping is None:
+            mapping = {}
+        else:
+            mapping = mapping.copy()
+        if to_be_mapped is None:
+            to_be_mapped = set(self.subgraph.nodes)
+
+        # Note, we modify candidates here. Doesn't seem to affect results, but
+        # remember this.
+        # candidates = candidates.copy()
+        sgn_candidates = intersect(candidates[sgn])
+        candidates[sgn] = frozenset([sgn_candidates])
+        for gn in sgn_candidates:
+            # We're going to try to map sgn to gn.
+            if gn in mapping.values() or sgn not in to_be_mapped:
+                # gn is already mapped to something
+                continue  # pragma: no cover
+
+            # REDUCTION and COMBINATION
+            mapping[sgn] = gn
+            # BASECASE
+            if to_be_mapped == set(mapping.keys()):
+                yield {v: k for k, v in mapping.items()}
+                continue
+            left_to_map = to_be_mapped - set(mapping.keys())
+
+            new_candidates = candidates.copy()
+            sgn_nbrs = set(self.subgraph[sgn])
+            not_gn_nbrs = set(self.graph.nodes) - set(self.graph[gn])
+            for sgn2 in left_to_map:
+                if sgn2 not in sgn_nbrs:
+                    gn2_options = not_gn_nbrs
+                else:
+                    # Get all edges to gn of the right color:
+                    g_edges = self._edges_of_same_color(sgn, sgn2)
+                    # FIXME directed graphs
+                    # And all nodes involved in those which are connected to gn
+                    gn2_options = {n for e in g_edges for n in e if gn in e}
+                # Node color compatibility should be taken care of by the
+                # initial candidate lists made by find_subgraphs
+
+                # Add gn2_options to the right collection. Since new_candidates
+                # is a dict of frozensets of frozensets of node indices it's
+                # a bit clunky. We can't do .add, and + also doesn't work. We
+                # could do |, but I deem union to be clearer.
+                new_candidates[sgn2] = new_candidates[sgn2].union(
+                    [frozenset(gn2_options)]
+                )
+
+                if (sgn, sgn2) in constraints:
+                    gn2_options = {gn2 for gn2 in self.graph if gn2 > gn}
+                elif (sgn2, sgn) in constraints:
+                    gn2_options = {gn2 for gn2 in self.graph if gn2 < gn}
+                else:
+                    continue  # pragma: no cover
+                new_candidates[sgn2] = new_candidates[sgn2].union(
+                    [frozenset(gn2_options)]
+                )
+
+            # The next node is the one that is unmapped and has fewest
+            # candidates
+            next_sgn = min(left_to_map, key=lambda n: min(new_candidates[n], key=len))
+            yield from self._map_nodes(
+                next_sgn,
+                new_candidates,
+                constraints,
+                mapping=mapping,
+                to_be_mapped=to_be_mapped,
+            )
+            # Unmap sgn-gn. Strictly not necessary since it'd get overwritten
+            # when making a new mapping for sgn.
+            # del mapping[sgn]
+
+    def _largest_common_subgraph(self, candidates, constraints, to_be_mapped=None):
         """
         Find all largest common subgraphs honoring constraints.
         """
-        pass
+        if to_be_mapped is None:
+            to_be_mapped = {frozenset(self.subgraph.nodes)}
+
+        # The LCS problem is basically a repeated subgraph isomorphism problem
+        # with smaller and smaller subgraphs. We store the nodes that are
+        # "part of" the subgraph in to_be_mapped, and we make it a little
+        # smaller every iteration.
+
+        current_size = len(next(iter(to_be_mapped), []))
+
+        found_iso = False
+        if current_size <= len(self.graph):
+            # There's no point in trying to find isomorphisms of
+            # graph >= subgraph if subgraph has more nodes than graph.
+
+            # Try the isomorphism first with the nodes with lowest ID. So sort
+            # them. Those are more likely to be part of the final
+            # correspondence. This makes finding the first answer(s) faster. In
+            # theory.
+            for nodes in sorted(to_be_mapped, key=sorted):
+                # Find the isomorphism between subgraph[to_be_mapped] <= graph
+                next_sgn = min(nodes, key=lambda n: min(candidates[n], key=len))
+                isomorphs = self._map_nodes(
+                    next_sgn, candidates, constraints, to_be_mapped=nodes
+                )
+
+                # This is effectively `yield from isomorphs`, except that we look
+                # whether an item was yielded.
+                try:
+                    item = next(isomorphs)
+                except StopIteration:
+                    pass
+                else:
+                    yield item
+                    yield from isomorphs
+                    found_iso = True
+
+        # BASECASE
+        if found_iso or current_size == 1:
+            # Shrinking has no point because either 1) we end up with a smaller
+            # common subgraph (and we want the largest), or 2) there'll be no
+            # more subgraph.
+            return
+
+        left_to_be_mapped = set()
+        for nodes in to_be_mapped:
+            for sgn in nodes:
+                # We're going to remove sgn from to_be_mapped, but subject to
+                # symmetry constraints. We know that for every constraint we
+                # have those subgraph nodes are equal. So whenever we would
+                # remove the lower part of a constraint, remove the higher
+                # instead. This is all dealth with by _remove_node. And because
+                # left_to_be_mapped is a set, we don't do double work.
+
+                # And finally, make the subgraph one node smaller.
+                # REDUCTION
+                new_nodes = self._remove_node(sgn, nodes, constraints)
+                left_to_be_mapped.add(new_nodes)
+        # COMBINATION
+        yield from self._largest_common_subgraph(
+            candidates, constraints, to_be_mapped=left_to_be_mapped
+        )

     @staticmethod
     def _remove_node(node, nodes, constraints):
@@ -487,7 +969,14 @@ class ISMAGS:
         those subgraph nodes are equal. So whenever we would remove the
         lower part of a constraint, remove the higher instead.
         """
-        pass
+        while True:
+            for low, high in constraints:
+                if low == node and high in nodes:
+                    node = high
+                    break
+            else:  # no break, couldn't find node in constraints
+                break
+        return frozenset(nodes - {node})

     @staticmethod
     def _find_permutations(top_partitions, bottom_partitions):
@@ -496,7 +985,18 @@ class ISMAGS:
         different. Ensures that all partitions in both top and bottom
         partitions have size 1.
         """
-        pass
+        # Find permutations
+        permutations = set()
+        for top, bot in zip(top_partitions, bottom_partitions):
+            # top and bot have only one element
+            if len(top) != 1 or len(bot) != 1:
+                raise IndexError(
+                    "Not all nodes are coupled. This is"
+                    f" impossible: {top_partitions}, {bottom_partitions}"
+                )
+            if top != bot:
+                permutations.add(frozenset((next(iter(top)), next(iter(bot)))))
+        return permutations

     @staticmethod
     def _update_orbits(orbits, permutations):
@@ -505,21 +1005,159 @@ class ISMAGS:
         For every pair of items in permutations their respective orbits are
         merged.
         """
-        pass
-
-    def _couple_nodes(self, top_partitions, bottom_partitions, pair_idx,
-        t_node, b_node, graph, edge_colors):
+        for permutation in permutations:
+            node, node2 = permutation
+            # Find the orbits that contain node and node2, and replace the
+            # orbit containing node with the union
+            first = second = None
+            for idx, orbit in enumerate(orbits):
+                if first is not None and second is not None:
+                    break
+                if node in orbit:
+                    first = idx
+                if node2 in orbit:
+                    second = idx
+            if first != second:
+                orbits[first].update(orbits[second])
+                del orbits[second]
+
+    def _couple_nodes(
+        self,
+        top_partitions,
+        bottom_partitions,
+        pair_idx,
+        t_node,
+        b_node,
+        graph,
+        edge_colors,
+    ):
         """
         Generate new partitions from top and bottom_partitions where t_node is
         coupled to b_node. pair_idx is the index of the partitions where t_ and
         b_node can be found.
         """
-        pass
-
-    def _process_ordered_pair_partitions(self, graph, top_partitions,
-        bottom_partitions, edge_colors, orbits=None, cosets=None):
+        t_partition = top_partitions[pair_idx]
+        b_partition = bottom_partitions[pair_idx]
+        assert t_node in t_partition and b_node in b_partition
+        # Couple node to node2. This means they get their own partition
+        new_top_partitions = [top.copy() for top in top_partitions]
+        new_bottom_partitions = [bot.copy() for bot in bottom_partitions]
+        new_t_groups = {t_node}, t_partition - {t_node}
+        new_b_groups = {b_node}, b_partition - {b_node}
+        # Replace the old partitions with the coupled ones
+        del new_top_partitions[pair_idx]
+        del new_bottom_partitions[pair_idx]
+        new_top_partitions[pair_idx:pair_idx] = new_t_groups
+        new_bottom_partitions[pair_idx:pair_idx] = new_b_groups
+
+        new_top_partitions = self._refine_node_partitions(
+            graph, new_top_partitions, edge_colors
+        )
+        new_bottom_partitions = self._refine_node_partitions(
+            graph, new_bottom_partitions, edge_colors, branch=True
+        )
+        new_top_partitions = list(new_top_partitions)
+        assert len(new_top_partitions) == 1
+        new_top_partitions = new_top_partitions[0]
+        for bot in new_bottom_partitions:
+            yield list(new_top_partitions), bot
+
+    def _process_ordered_pair_partitions(
+        self,
+        graph,
+        top_partitions,
+        bottom_partitions,
+        edge_colors,
+        orbits=None,
+        cosets=None,
+    ):
         """
         Processes ordered pair partitions as per the reference paper. Finds and
         returns all permutations and cosets that leave the graph unchanged.
         """
-        pass
+        if orbits is None:
+            orbits = [{node} for node in graph.nodes]
+        else:
+            # Note that we don't copy orbits when we are given one. This means
+            # we leak information between the recursive branches. This is
+            # intentional!
+            orbits = orbits
+        if cosets is None:
+            cosets = {}
+        else:
+            cosets = cosets.copy()
+
+        assert all(
+            len(t_p) == len(b_p) for t_p, b_p in zip(top_partitions, bottom_partitions)
+        )
+
+        # BASECASE
+        if all(len(top) == 1 for top in top_partitions):
+            # All nodes are mapped
+            permutations = self._find_permutations(top_partitions, bottom_partitions)
+            self._update_orbits(orbits, permutations)
+            if permutations:
+                return [permutations], cosets
+            else:
+                return [], cosets
+
+        permutations = []
+        unmapped_nodes = {
+            (node, idx)
+            for idx, t_partition in enumerate(top_partitions)
+            for node in t_partition
+            if len(t_partition) > 1
+        }
+        node, pair_idx = min(unmapped_nodes)
+        b_partition = bottom_partitions[pair_idx]
+
+        for node2 in sorted(b_partition):
+            if len(b_partition) == 1:
+                # Can never result in symmetry
+                continue
+            if node != node2 and any(
+                node in orbit and node2 in orbit for orbit in orbits
+            ):
+                # Orbit prune branch
+                continue
+            # REDUCTION
+            # Couple node to node2
+            partitions = self._couple_nodes(
+                top_partitions,
+                bottom_partitions,
+                pair_idx,
+                node,
+                node2,
+                graph,
+                edge_colors,
+            )
+            for opp in partitions:
+                new_top_partitions, new_bottom_partitions = opp
+
+                new_perms, new_cosets = self._process_ordered_pair_partitions(
+                    graph,
+                    new_top_partitions,
+                    new_bottom_partitions,
+                    edge_colors,
+                    orbits,
+                    cosets,
+                )
+                # COMBINATION
+                permutations += new_perms
+                cosets.update(new_cosets)
+
+        mapped = {
+            k
+            for top, bottom in zip(top_partitions, bottom_partitions)
+            for k in top
+            if len(top) == 1 and top == bottom
+        }
+        ks = {k for k in graph.nodes if k < node}
+        # Have all nodes with ID < node been mapped?
+        find_coset = ks <= mapped and node not in cosets
+        if find_coset:
+            # Find the orbit that contains node
+            for orbit in orbits:
+                if node in orbit:
+                    cosets[node] = orbit.copy()
+        return permutations, cosets
diff --git a/networkx/algorithms/isomorphism/isomorph.py b/networkx/algorithms/isomorphism/isomorph.py
index 2f0f36208..00395b71c 100644
--- a/networkx/algorithms/isomorphism/isomorph.py
+++ b/networkx/algorithms/isomorphism/isomorph.py
@@ -3,11 +3,16 @@ Graph isomorphism functions.
 """
 import networkx as nx
 from networkx.exception import NetworkXError
-__all__ = ['could_be_isomorphic', 'fast_could_be_isomorphic',
-    'faster_could_be_isomorphic', 'is_isomorphic']

+__all__ = [
+    "could_be_isomorphic",
+    "fast_could_be_isomorphic",
+    "faster_could_be_isomorphic",
+    "is_isomorphic",
+]

-@nx._dispatchable(graphs={'G1': 0, 'G2': 1})
+
+@nx._dispatchable(graphs={"G1": 0, "G2": 1})
 def could_be_isomorphic(G1, G2):
     """Returns False if graphs are definitely not isomorphic.
     True does NOT guarantee isomorphism.
@@ -25,13 +30,37 @@ def could_be_isomorphic(G1, G2):
     involving that node.

     """
-    pass
+
+    # Check global properties
+    if G1.order() != G2.order():
+        return False
+
+    # Check local properties
+    d1 = G1.degree()
+    t1 = nx.triangles(G1)
+    clqs_1 = list(nx.find_cliques(G1))
+    c1 = {n: sum(1 for c in clqs_1 if n in c) for n in G1}  # number of cliques
+    props1 = [[d, t1[v], c1[v]] for v, d in d1]
+    props1.sort()
+
+    d2 = G2.degree()
+    t2 = nx.triangles(G2)
+    clqs_2 = list(nx.find_cliques(G2))
+    c2 = {n: sum(1 for c in clqs_2 if n in c) for n in G2}  # number of cliques
+    props2 = [[d, t2[v], c2[v]] for v, d in d2]
+    props2.sort()
+
+    if props1 != props2:
+        return False
+
+    # OK...
+    return True


 graph_could_be_isomorphic = could_be_isomorphic


-@nx._dispatchable(graphs={'G1': 0, 'G2': 1})
+@nx._dispatchable(graphs={"G1": 0, "G2": 1})
 def fast_could_be_isomorphic(G1, G2):
     """Returns False if graphs are definitely not isomorphic.

@@ -47,13 +76,32 @@ def fast_could_be_isomorphic(G1, G2):
     Checks for matching degree and triangle sequences. The triangle
     sequence contains the number of triangles each node is part of.
     """
-    pass
+    # Check global properties
+    if G1.order() != G2.order():
+        return False
+
+    # Check local properties
+    d1 = G1.degree()
+    t1 = nx.triangles(G1)
+    props1 = [[d, t1[v]] for v, d in d1]
+    props1.sort()
+
+    d2 = G2.degree()
+    t2 = nx.triangles(G2)
+    props2 = [[d, t2[v]] for v, d in d2]
+    props2.sort()
+
+    if props1 != props2:
+        return False
+
+    # OK...
+    return True


 fast_graph_could_be_isomorphic = fast_could_be_isomorphic


-@nx._dispatchable(graphs={'G1': 0, 'G2': 1})
+@nx._dispatchable(graphs={"G1": 0, "G2": 1})
 def faster_could_be_isomorphic(G1, G2):
     """Returns False if graphs are definitely not isomorphic.

@@ -68,14 +116,29 @@ def faster_could_be_isomorphic(G1, G2):
     -----
     Checks for matching degree sequences.
     """
-    pass
+    # Check global properties
+    if G1.order() != G2.order():
+        return False
+
+    # Check local properties
+    d1 = sorted(d for n, d in G1.degree())
+    d2 = sorted(d for n, d in G2.degree())
+
+    if d1 != d2:
+        return False
+
+    # OK...
+    return True


 faster_graph_could_be_isomorphic = faster_could_be_isomorphic


-@nx._dispatchable(graphs={'G1': 0, 'G2': 1}, preserve_edge_attrs=
-    'edge_match', preserve_node_attrs='node_match')
+@nx._dispatchable(
+    graphs={"G1": 0, "G2": 1},
+    preserve_edge_attrs="edge_match",
+    preserve_node_attrs="node_match",
+)
 def is_isomorphic(G1, G2, node_match=None, edge_match=None):
     """Returns True if the graphs G1 and G2 are isomorphic and False otherwise.

@@ -173,4 +236,13 @@ def is_isomorphic(G1, G2, node_match=None, edge_match=None):
        Pattern Recognition, Cuen, pp. 149-159, 2001.
        https://www.researchgate.net/publication/200034365_An_Improved_Algorithm_for_Matching_Large_Graphs
     """
-    pass
+    if G1.is_directed() and G2.is_directed():
+        GM = nx.algorithms.isomorphism.DiGraphMatcher
+    elif (not G1.is_directed()) and (not G2.is_directed()):
+        GM = nx.algorithms.isomorphism.GraphMatcher
+    else:
+        raise NetworkXError("Graphs G1 and G2 are not of the same type.")
+
+    gm = GM(G1, G2, node_match=node_match, edge_match=edge_match)
+
+    return gm.is_isomorphic()
diff --git a/networkx/algorithms/isomorphism/isomorphvf2.py b/networkx/algorithms/isomorphism/isomorphvf2.py
index 9b795efe0..1b6cc7d75 100644
--- a/networkx/algorithms/isomorphism/isomorphvf2.py
+++ b/networkx/algorithms/isomorphism/isomorphvf2.py
@@ -138,8 +138,15 @@ graph isomorphism problem is most likely not NP-complete (although no
 polynomial-time algorithm is known to exist).

 """
+
+# This work was originally coded by Christopher Ellison
+# as part of the Computational Mechanics Python (CMPy) project.
+# James P. Crutchfield, principal investigator.
+# Complexity Sciences Center and Physics Department, UC Davis.
+
 import sys
-__all__ = ['GraphMatcher', 'DiGraphMatcher']
+
+__all__ = ["GraphMatcher", "DiGraphMatcher"]


 class GraphMatcher:
@@ -170,20 +177,65 @@ class GraphMatcher:
         self.G1_nodes = set(G1.nodes())
         self.G2_nodes = set(G2.nodes())
         self.G2_node_order = {n: i for i, n in enumerate(G2)}
+
+        # Set recursion limit.
         self.old_recursion_limit = sys.getrecursionlimit()
         expected_max_recursion_level = len(self.G2)
         if self.old_recursion_limit < 1.5 * expected_max_recursion_level:
+            # Give some breathing room.
             sys.setrecursionlimit(int(1.5 * expected_max_recursion_level))
-        self.test = 'graph'
+
+        # Declare that we will be searching for a graph-graph isomorphism.
+        self.test = "graph"
+
+        # Initialize state
         self.initialize()

     def reset_recursion_limit(self):
         """Restores the recursion limit."""
-        pass
+        # TODO:
+        # Currently, we use recursion and set the recursion level higher.
+        # It would be nice to restore the level, but because the
+        # (Di)GraphMatcher classes make use of cyclic references, garbage
+        # collection will never happen when we define __del__() to
+        # restore the recursion level. The result is a memory leak.
+        # So for now, we do not automatically restore the recursion level,
+        # and instead provide a method to do this manually. Eventually,
+        # we should turn this into a non-recursive implementation.
+        sys.setrecursionlimit(self.old_recursion_limit)

     def candidate_pairs_iter(self):
         """Iterator over candidate pairs of nodes in G1 and G2."""
-        pass
+
+        # All computations are done using the current state!
+
+        G1_nodes = self.G1_nodes
+        G2_nodes = self.G2_nodes
+        min_key = self.G2_node_order.__getitem__
+
+        # First we compute the inout-terminal sets.
+        T1_inout = [node for node in self.inout_1 if node not in self.core_1]
+        T2_inout = [node for node in self.inout_2 if node not in self.core_2]
+
+        # If T1_inout and T2_inout are both nonempty.
+        # P(s) = T1_inout x {min T2_inout}
+        if T1_inout and T2_inout:
+            node_2 = min(T2_inout, key=min_key)
+            for node_1 in T1_inout:
+                yield node_1, node_2
+
+        else:
+            # If T1_inout and T2_inout were both empty....
+            # P(s) = (N_1 - M_1) x {min (N_2 - M_2)}
+            # if not (T1_inout or T2_inout):  # as suggested by  [2], incorrect
+            if 1:  # as inferred from [1], correct
+                # First we determine the candidate node for G2
+                other_node = min(G2_nodes - set(self.core_2), key=min_key)
+                for node in self.G1:
+                    if node not in self.core_1:
+                        yield node, other_node
+
+        # For all other cases, we don't have any candidate pairs.

     def initialize(self):
         """Reinitializes the state of the algorithm.
@@ -192,15 +244,59 @@ class GraphMatcher:
         If only subclassing GraphMatcher, a redefinition is not necessary.

         """
-        pass
+
+        # core_1[n] contains the index of the node paired with n, which is m,
+        #           provided n is in the mapping.
+        # core_2[m] contains the index of the node paired with m, which is n,
+        #           provided m is in the mapping.
+        self.core_1 = {}
+        self.core_2 = {}
+
+        # See the paper for definitions of M_x and T_x^{y}
+
+        # inout_1[n]  is non-zero if n is in M_1 or in T_1^{inout}
+        # inout_2[m]  is non-zero if m is in M_2 or in T_2^{inout}
+        #
+        # The value stored is the depth of the SSR tree when the node became
+        # part of the corresponding set.
+        self.inout_1 = {}
+        self.inout_2 = {}
+        # Practically, these sets simply store the nodes in the subgraph.
+
+        self.state = GMState(self)
+
+        # Provide a convenient way to access the isomorphism mapping.
+        self.mapping = self.core_1.copy()

     def is_isomorphic(self):
         """Returns True if G1 and G2 are isomorphic graphs."""
-        pass
+
+        # Let's do two very quick checks!
+        # QUESTION: Should we call faster_graph_could_be_isomorphic(G1,G2)?
+        # For now, I just copy the code.
+
+        # Check global properties
+        if self.G1.order() != self.G2.order():
+            return False
+
+        # Check local properties
+        d1 = sorted(d for n, d in self.G1.degree())
+        d2 = sorted(d for n, d in self.G2.degree())
+        if d1 != d2:
+            return False
+
+        try:
+            x = next(self.isomorphisms_iter())
+            return True
+        except StopIteration:
+            return False

     def isomorphisms_iter(self):
         """Generator over isomorphisms between G1 and G2."""
-        pass
+        # Declare that we are looking for a graph-graph isomorphism.
+        self.test = "graph"
+        self.initialize()
+        yield from self.match()

     def match(self):
         """Extends the isomorphism mapping.
@@ -211,7 +307,21 @@ class GraphMatcher:
         we yield the mapping.

         """
-        pass
+        if len(self.core_1) == len(self.G2):
+            # Save the final mapping, otherwise garbage collection deletes it.
+            self.mapping = self.core_1.copy()
+            # The mapping is complete.
+            yield self.mapping
+        else:
+            for G1_node, G2_node in self.candidate_pairs_iter():
+                if self.syntactic_feasibility(G1_node, G2_node):
+                    if self.semantic_feasibility(G1_node, G2_node):
+                        # Recursive call, adding the feasible state.
+                        newstate = self.state.__class__(self, G1_node, G2_node)
+                        yield from self.match()
+
+                        # restore data structures
+                        newstate.restore()

     def semantic_feasibility(self, G1_node, G2_node):
         """Returns True if adding (G1_node, G2_node) is semantically feasible.
@@ -251,23 +361,41 @@ class GraphMatcher:
         the above form to keep the match() method functional. Implementations
         should consider multigraphs.
         """
-        pass
+        return True

     def subgraph_is_isomorphic(self):
         """Returns True if a subgraph of G1 is isomorphic to G2."""
-        pass
+        try:
+            x = next(self.subgraph_isomorphisms_iter())
+            return True
+        except StopIteration:
+            return False

     def subgraph_is_monomorphic(self):
         """Returns True if a subgraph of G1 is monomorphic to G2."""
-        pass
+        try:
+            x = next(self.subgraph_monomorphisms_iter())
+            return True
+        except StopIteration:
+            return False
+
+    #    subgraph_is_isomorphic.__doc__ += "\n" + subgraph.replace('\n','\n'+indent)

     def subgraph_isomorphisms_iter(self):
         """Generator over isomorphisms between a subgraph of G1 and G2."""
-        pass
+        # Declare that we are looking for graph-subgraph isomorphism.
+        self.test = "subgraph"
+        self.initialize()
+        yield from self.match()

     def subgraph_monomorphisms_iter(self):
         """Generator over monomorphisms between a subgraph of G1 and G2."""
-        pass
+        # Declare that we are looking for graph-subgraph monomorphism.
+        self.test = "mono"
+        self.initialize()
+        yield from self.match()
+
+    #    subgraph_isomorphisms_iter.__doc__ += "\n" + subgraph.replace('\n','\n'+indent)

     def syntactic_feasibility(self, G1_node, G2_node):
         """Returns True if adding (G1_node, G2_node) is syntactically feasible.
@@ -277,7 +405,120 @@ class GraphMatcher:
         The addition is allowable if the inclusion of the candidate pair does
         not make it impossible for an isomorphism/monomorphism to be found.
         """
-        pass
+
+        # The VF2 algorithm was designed to work with graphs having, at most,
+        # one edge connecting any two nodes.  This is not the case when
+        # dealing with an MultiGraphs.
+        #
+        # Basically, when we test the look-ahead rules R_neighbor, we will
+        # make sure that the number of edges are checked. We also add
+        # a R_self check to verify that the number of selfloops is acceptable.
+        #
+        # Users might be comparing Graph instances with MultiGraph instances.
+        # So the generic GraphMatcher class must work with MultiGraphs.
+        # Care must be taken since the value in the innermost dictionary is a
+        # singlet for Graph instances.  For MultiGraphs, the value in the
+        # innermost dictionary is a list.
+
+        ###
+        # Test at each step to get a return value as soon as possible.
+        ###
+
+        # Look ahead 0
+
+        # R_self
+
+        # The number of selfloops for G1_node must equal the number of
+        # self-loops for G2_node. Without this check, we would fail on
+        # R_neighbor at the next recursion level. But it is good to prune the
+        # search tree now.
+
+        if self.test == "mono":
+            if self.G1.number_of_edges(G1_node, G1_node) < self.G2.number_of_edges(
+                G2_node, G2_node
+            ):
+                return False
+        else:
+            if self.G1.number_of_edges(G1_node, G1_node) != self.G2.number_of_edges(
+                G2_node, G2_node
+            ):
+                return False
+
+        # R_neighbor
+
+        # For each neighbor n' of n in the partial mapping, the corresponding
+        # node m' is a neighbor of m, and vice versa. Also, the number of
+        # edges must be equal.
+        if self.test != "mono":
+            for neighbor in self.G1[G1_node]:
+                if neighbor in self.core_1:
+                    if self.core_1[neighbor] not in self.G2[G2_node]:
+                        return False
+                    elif self.G1.number_of_edges(
+                        neighbor, G1_node
+                    ) != self.G2.number_of_edges(self.core_1[neighbor], G2_node):
+                        return False
+
+        for neighbor in self.G2[G2_node]:
+            if neighbor in self.core_2:
+                if self.core_2[neighbor] not in self.G1[G1_node]:
+                    return False
+                elif self.test == "mono":
+                    if self.G1.number_of_edges(
+                        self.core_2[neighbor], G1_node
+                    ) < self.G2.number_of_edges(neighbor, G2_node):
+                        return False
+                else:
+                    if self.G1.number_of_edges(
+                        self.core_2[neighbor], G1_node
+                    ) != self.G2.number_of_edges(neighbor, G2_node):
+                        return False
+
+        if self.test != "mono":
+            # Look ahead 1
+
+            # R_terminout
+            # The number of neighbors of n in T_1^{inout} is equal to the
+            # number of neighbors of m that are in T_2^{inout}, and vice versa.
+            num1 = 0
+            for neighbor in self.G1[G1_node]:
+                if (neighbor in self.inout_1) and (neighbor not in self.core_1):
+                    num1 += 1
+            num2 = 0
+            for neighbor in self.G2[G2_node]:
+                if (neighbor in self.inout_2) and (neighbor not in self.core_2):
+                    num2 += 1
+            if self.test == "graph":
+                if num1 != num2:
+                    return False
+            else:  # self.test == 'subgraph'
+                if not (num1 >= num2):
+                    return False
+
+            # Look ahead 2
+
+            # R_new
+
+            # The number of neighbors of n that are neither in the core_1 nor
+            # T_1^{inout} is equal to the number of neighbors of m
+            # that are neither in core_2 nor T_2^{inout}.
+            num1 = 0
+            for neighbor in self.G1[G1_node]:
+                if neighbor not in self.inout_1:
+                    num1 += 1
+            num2 = 0
+            for neighbor in self.G2[G2_node]:
+                if neighbor not in self.inout_2:
+                    num2 += 1
+            if self.test == "graph":
+                if num1 != num2:
+                    return False
+            else:  # self.test == 'subgraph'
+                if not (num1 >= num2):
+                    return False
+
+        # Otherwise, this node pair is syntactically feasible!
+        return True


 class DiGraphMatcher(GraphMatcher):
@@ -304,7 +545,50 @@ class DiGraphMatcher(GraphMatcher):

     def candidate_pairs_iter(self):
         """Iterator over candidate pairs of nodes in G1 and G2."""
-        pass
+
+        # All computations are done using the current state!
+
+        G1_nodes = self.G1_nodes
+        G2_nodes = self.G2_nodes
+        min_key = self.G2_node_order.__getitem__
+
+        # First we compute the out-terminal sets.
+        T1_out = [node for node in self.out_1 if node not in self.core_1]
+        T2_out = [node for node in self.out_2 if node not in self.core_2]
+
+        # If T1_out and T2_out are both nonempty.
+        # P(s) = T1_out x {min T2_out}
+        if T1_out and T2_out:
+            node_2 = min(T2_out, key=min_key)
+            for node_1 in T1_out:
+                yield node_1, node_2
+
+        # If T1_out and T2_out were both empty....
+        # We compute the in-terminal sets.
+
+        # elif not (T1_out or T2_out):   # as suggested by [2], incorrect
+        else:  # as suggested by [1], correct
+            T1_in = [node for node in self.in_1 if node not in self.core_1]
+            T2_in = [node for node in self.in_2 if node not in self.core_2]
+
+            # If T1_in and T2_in are both nonempty.
+            # P(s) = T1_out x {min T2_out}
+            if T1_in and T2_in:
+                node_2 = min(T2_in, key=min_key)
+                for node_1 in T1_in:
+                    yield node_1, node_2
+
+            # If all terminal sets are empty...
+            # P(s) = (N_1 - M_1) x {min (N_2 - M_2)}
+
+            # elif not (T1_in or T2_in):   # as suggested by  [2], incorrect
+            else:  # as inferred from [1], correct
+                node_2 = min(G2_nodes - set(self.core_2), key=min_key)
+                for node_1 in G1_nodes:
+                    if node_1 not in self.core_1:
+                        yield node_1, node_2
+
+        # For all other cases, we don't have any candidate pairs.

     def initialize(self):
         """Reinitializes the state of the algorithm.
@@ -312,7 +596,33 @@ class DiGraphMatcher(GraphMatcher):
         This method should be redefined if using something other than DiGMState.
         If only subclassing GraphMatcher, a redefinition is not necessary.
         """
-        pass
+
+        # core_1[n] contains the index of the node paired with n, which is m,
+        #           provided n is in the mapping.
+        # core_2[m] contains the index of the node paired with m, which is n,
+        #           provided m is in the mapping.
+        self.core_1 = {}
+        self.core_2 = {}
+
+        # See the paper for definitions of M_x and T_x^{y}
+
+        # in_1[n]  is non-zero if n is in M_1 or in T_1^{in}
+        # out_1[n] is non-zero if n is in M_1 or in T_1^{out}
+        #
+        # in_2[m]  is non-zero if m is in M_2 or in T_2^{in}
+        # out_2[m] is non-zero if m is in M_2 or in T_2^{out}
+        #
+        # The value stored is the depth of the search tree when the node became
+        # part of the corresponding set.
+        self.in_1 = {}
+        self.in_2 = {}
+        self.out_1 = {}
+        self.out_2 = {}
+
+        self.state = DiGMState(self)
+
+        # Provide a convenient way to access the isomorphism mapping.
+        self.mapping = self.core_1.copy()

     def syntactic_feasibility(self, G1_node, G2_node):
         """Returns True if adding (G1_node, G2_node) is syntactically feasible.
@@ -322,7 +632,219 @@ class DiGraphMatcher(GraphMatcher):
         The addition is allowable if the inclusion of the candidate pair does
         not make it impossible for an isomorphism/monomorphism to be found.
         """
-        pass
+
+        # The VF2 algorithm was designed to work with graphs having, at most,
+        # one edge connecting any two nodes.  This is not the case when
+        # dealing with an MultiGraphs.
+        #
+        # Basically, when we test the look-ahead rules R_pred and R_succ, we
+        # will make sure that the number of edges are checked.  We also add
+        # a R_self check to verify that the number of selfloops is acceptable.
+
+        # Users might be comparing DiGraph instances with MultiDiGraph
+        # instances. So the generic DiGraphMatcher class must work with
+        # MultiDiGraphs. Care must be taken since the value in the innermost
+        # dictionary is a singlet for DiGraph instances.  For MultiDiGraphs,
+        # the value in the innermost dictionary is a list.
+
+        ###
+        # Test at each step to get a return value as soon as possible.
+        ###
+
+        # Look ahead 0
+
+        # R_self
+
+        # The number of selfloops for G1_node must equal the number of
+        # self-loops for G2_node. Without this check, we would fail on R_pred
+        # at the next recursion level. This should prune the tree even further.
+        if self.test == "mono":
+            if self.G1.number_of_edges(G1_node, G1_node) < self.G2.number_of_edges(
+                G2_node, G2_node
+            ):
+                return False
+        else:
+            if self.G1.number_of_edges(G1_node, G1_node) != self.G2.number_of_edges(
+                G2_node, G2_node
+            ):
+                return False
+
+        # R_pred
+
+        # For each predecessor n' of n in the partial mapping, the
+        # corresponding node m' is a predecessor of m, and vice versa. Also,
+        # the number of edges must be equal
+        if self.test != "mono":
+            for predecessor in self.G1.pred[G1_node]:
+                if predecessor in self.core_1:
+                    if self.core_1[predecessor] not in self.G2.pred[G2_node]:
+                        return False
+                    elif self.G1.number_of_edges(
+                        predecessor, G1_node
+                    ) != self.G2.number_of_edges(self.core_1[predecessor], G2_node):
+                        return False
+
+        for predecessor in self.G2.pred[G2_node]:
+            if predecessor in self.core_2:
+                if self.core_2[predecessor] not in self.G1.pred[G1_node]:
+                    return False
+                elif self.test == "mono":
+                    if self.G1.number_of_edges(
+                        self.core_2[predecessor], G1_node
+                    ) < self.G2.number_of_edges(predecessor, G2_node):
+                        return False
+                else:
+                    if self.G1.number_of_edges(
+                        self.core_2[predecessor], G1_node
+                    ) != self.G2.number_of_edges(predecessor, G2_node):
+                        return False
+
+        # R_succ
+
+        # For each successor n' of n in the partial mapping, the corresponding
+        # node m' is a successor of m, and vice versa. Also, the number of
+        # edges must be equal.
+        if self.test != "mono":
+            for successor in self.G1[G1_node]:
+                if successor in self.core_1:
+                    if self.core_1[successor] not in self.G2[G2_node]:
+                        return False
+                    elif self.G1.number_of_edges(
+                        G1_node, successor
+                    ) != self.G2.number_of_edges(G2_node, self.core_1[successor]):
+                        return False
+
+        for successor in self.G2[G2_node]:
+            if successor in self.core_2:
+                if self.core_2[successor] not in self.G1[G1_node]:
+                    return False
+                elif self.test == "mono":
+                    if self.G1.number_of_edges(
+                        G1_node, self.core_2[successor]
+                    ) < self.G2.number_of_edges(G2_node, successor):
+                        return False
+                else:
+                    if self.G1.number_of_edges(
+                        G1_node, self.core_2[successor]
+                    ) != self.G2.number_of_edges(G2_node, successor):
+                        return False
+
+        if self.test != "mono":
+            # Look ahead 1
+
+            # R_termin
+            # The number of predecessors of n that are in T_1^{in} is equal to the
+            # number of predecessors of m that are in T_2^{in}.
+            num1 = 0
+            for predecessor in self.G1.pred[G1_node]:
+                if (predecessor in self.in_1) and (predecessor not in self.core_1):
+                    num1 += 1
+            num2 = 0
+            for predecessor in self.G2.pred[G2_node]:
+                if (predecessor in self.in_2) and (predecessor not in self.core_2):
+                    num2 += 1
+            if self.test == "graph":
+                if num1 != num2:
+                    return False
+            else:  # self.test == 'subgraph'
+                if not (num1 >= num2):
+                    return False
+
+            # The number of successors of n that are in T_1^{in} is equal to the
+            # number of successors of m that are in T_2^{in}.
+            num1 = 0
+            for successor in self.G1[G1_node]:
+                if (successor in self.in_1) and (successor not in self.core_1):
+                    num1 += 1
+            num2 = 0
+            for successor in self.G2[G2_node]:
+                if (successor in self.in_2) and (successor not in self.core_2):
+                    num2 += 1
+            if self.test == "graph":
+                if num1 != num2:
+                    return False
+            else:  # self.test == 'subgraph'
+                if not (num1 >= num2):
+                    return False
+
+            # R_termout
+
+            # The number of predecessors of n that are in T_1^{out} is equal to the
+            # number of predecessors of m that are in T_2^{out}.
+            num1 = 0
+            for predecessor in self.G1.pred[G1_node]:
+                if (predecessor in self.out_1) and (predecessor not in self.core_1):
+                    num1 += 1
+            num2 = 0
+            for predecessor in self.G2.pred[G2_node]:
+                if (predecessor in self.out_2) and (predecessor not in self.core_2):
+                    num2 += 1
+            if self.test == "graph":
+                if num1 != num2:
+                    return False
+            else:  # self.test == 'subgraph'
+                if not (num1 >= num2):
+                    return False
+
+            # The number of successors of n that are in T_1^{out} is equal to the
+            # number of successors of m that are in T_2^{out}.
+            num1 = 0
+            for successor in self.G1[G1_node]:
+                if (successor in self.out_1) and (successor not in self.core_1):
+                    num1 += 1
+            num2 = 0
+            for successor in self.G2[G2_node]:
+                if (successor in self.out_2) and (successor not in self.core_2):
+                    num2 += 1
+            if self.test == "graph":
+                if num1 != num2:
+                    return False
+            else:  # self.test == 'subgraph'
+                if not (num1 >= num2):
+                    return False
+
+            # Look ahead 2
+
+            # R_new
+
+            # The number of predecessors of n that are neither in the core_1 nor
+            # T_1^{in} nor T_1^{out} is equal to the number of predecessors of m
+            # that are neither in core_2 nor T_2^{in} nor T_2^{out}.
+            num1 = 0
+            for predecessor in self.G1.pred[G1_node]:
+                if (predecessor not in self.in_1) and (predecessor not in self.out_1):
+                    num1 += 1
+            num2 = 0
+            for predecessor in self.G2.pred[G2_node]:
+                if (predecessor not in self.in_2) and (predecessor not in self.out_2):
+                    num2 += 1
+            if self.test == "graph":
+                if num1 != num2:
+                    return False
+            else:  # self.test == 'subgraph'
+                if not (num1 >= num2):
+                    return False
+
+            # The number of successors of n that are neither in the core_1 nor
+            # T_1^{in} nor T_1^{out} is equal to the number of successors of m
+            # that are neither in core_2 nor T_2^{in} nor T_2^{out}.
+            num1 = 0
+            for successor in self.G1[G1_node]:
+                if (successor not in self.in_1) and (successor not in self.out_1):
+                    num1 += 1
+            num2 = 0
+            for successor in self.G2[G2_node]:
+                if (successor not in self.in_2) and (successor not in self.out_2):
+                    num2 += 1
+            if self.test == "graph":
+                if num1 != num2:
+                    return False
+            else:  # self.test == 'subgraph'
+                if not (num1 >= num2):
+                    return False
+
+        # Otherwise, this node pair is syntactically feasible!
+        return True


 class GMState:
@@ -342,42 +864,75 @@ class GMState:
         isomorphism mapping.
         """
         self.GM = GM
+
+        # Initialize the last stored node pair.
         self.G1_node = None
         self.G2_node = None
         self.depth = len(GM.core_1)
+
         if G1_node is None or G2_node is None:
+            # Then we reset the class variables
             GM.core_1 = {}
             GM.core_2 = {}
             GM.inout_1 = {}
             GM.inout_2 = {}
+
+        # Watch out! G1_node == 0 should evaluate to True.
         if G1_node is not None and G2_node is not None:
+            # Add the node pair to the isomorphism mapping.
             GM.core_1[G1_node] = G2_node
             GM.core_2[G2_node] = G1_node
+
+            # Store the node that was added last.
             self.G1_node = G1_node
             self.G2_node = G2_node
+
+            # Now we must update the other two vectors.
+            # We will add only if it is not in there already!
             self.depth = len(GM.core_1)
+
+            # First we add the new nodes...
             if G1_node not in GM.inout_1:
                 GM.inout_1[G1_node] = self.depth
             if G2_node not in GM.inout_2:
                 GM.inout_2[G2_node] = self.depth
+
+            # Now we add every other node...
+
+            # Updates for T_1^{inout}
             new_nodes = set()
             for node in GM.core_1:
-                new_nodes.update([neighbor for neighbor in GM.G1[node] if 
-                    neighbor not in GM.core_1])
+                new_nodes.update(
+                    [neighbor for neighbor in GM.G1[node] if neighbor not in GM.core_1]
+                )
             for node in new_nodes:
                 if node not in GM.inout_1:
                     GM.inout_1[node] = self.depth
+
+            # Updates for T_2^{inout}
             new_nodes = set()
             for node in GM.core_2:
-                new_nodes.update([neighbor for neighbor in GM.G2[node] if 
-                    neighbor not in GM.core_2])
+                new_nodes.update(
+                    [neighbor for neighbor in GM.G2[node] if neighbor not in GM.core_2]
+                )
             for node in new_nodes:
                 if node not in GM.inout_2:
                     GM.inout_2[node] = self.depth

     def restore(self):
         """Deletes the GMState object and restores the class variables."""
-        pass
+        # First we remove the node that was added from the core vectors.
+        # Watch out! G1_node == 0 should evaluate to True.
+        if self.G1_node is not None and self.G2_node is not None:
+            del self.GM.core_1[self.G1_node]
+            del self.GM.core_2[self.G2_node]
+
+        # Now we revert the other two vectors.
+        # Thus, we delete all entries which have this depth level.
+        for vector in (self.GM.inout_1, self.GM.inout_2):
+            for node in list(vector.keys()):
+                if vector[node] == self.depth:
+                    del vector[node]


 class DiGMState:
@@ -398,57 +953,113 @@ class DiGMState:
         isomorphism mapping.
         """
         self.GM = GM
+
+        # Initialize the last stored node pair.
         self.G1_node = None
         self.G2_node = None
         self.depth = len(GM.core_1)
+
         if G1_node is None or G2_node is None:
+            # Then we reset the class variables
             GM.core_1 = {}
             GM.core_2 = {}
             GM.in_1 = {}
             GM.in_2 = {}
             GM.out_1 = {}
             GM.out_2 = {}
+
+        # Watch out! G1_node == 0 should evaluate to True.
         if G1_node is not None and G2_node is not None:
+            # Add the node pair to the isomorphism mapping.
             GM.core_1[G1_node] = G2_node
             GM.core_2[G2_node] = G1_node
+
+            # Store the node that was added last.
             self.G1_node = G1_node
             self.G2_node = G2_node
+
+            # Now we must update the other four vectors.
+            # We will add only if it is not in there already!
             self.depth = len(GM.core_1)
+
+            # First we add the new nodes...
             for vector in (GM.in_1, GM.out_1):
                 if G1_node not in vector:
                     vector[G1_node] = self.depth
             for vector in (GM.in_2, GM.out_2):
                 if G2_node not in vector:
                     vector[G2_node] = self.depth
+
+            # Now we add every other node...
+
+            # Updates for T_1^{in}
             new_nodes = set()
             for node in GM.core_1:
-                new_nodes.update([predecessor for predecessor in GM.G1.
-                    predecessors(node) if predecessor not in GM.core_1])
+                new_nodes.update(
+                    [
+                        predecessor
+                        for predecessor in GM.G1.predecessors(node)
+                        if predecessor not in GM.core_1
+                    ]
+                )
             for node in new_nodes:
                 if node not in GM.in_1:
                     GM.in_1[node] = self.depth
+
+            # Updates for T_2^{in}
             new_nodes = set()
             for node in GM.core_2:
-                new_nodes.update([predecessor for predecessor in GM.G2.
-                    predecessors(node) if predecessor not in GM.core_2])
+                new_nodes.update(
+                    [
+                        predecessor
+                        for predecessor in GM.G2.predecessors(node)
+                        if predecessor not in GM.core_2
+                    ]
+                )
             for node in new_nodes:
                 if node not in GM.in_2:
                     GM.in_2[node] = self.depth
+
+            # Updates for T_1^{out}
             new_nodes = set()
             for node in GM.core_1:
-                new_nodes.update([successor for successor in GM.G1.
-                    successors(node) if successor not in GM.core_1])
+                new_nodes.update(
+                    [
+                        successor
+                        for successor in GM.G1.successors(node)
+                        if successor not in GM.core_1
+                    ]
+                )
             for node in new_nodes:
                 if node not in GM.out_1:
                     GM.out_1[node] = self.depth
+
+            # Updates for T_2^{out}
             new_nodes = set()
             for node in GM.core_2:
-                new_nodes.update([successor for successor in GM.G2.
-                    successors(node) if successor not in GM.core_2])
+                new_nodes.update(
+                    [
+                        successor
+                        for successor in GM.G2.successors(node)
+                        if successor not in GM.core_2
+                    ]
+                )
             for node in new_nodes:
                 if node not in GM.out_2:
                     GM.out_2[node] = self.depth

     def restore(self):
         """Deletes the DiGMState object and restores the class variables."""
-        pass
+
+        # First we remove the node that was added from the core vectors.
+        # Watch out! G1_node == 0 should evaluate to True.
+        if self.G1_node is not None and self.G2_node is not None:
+            del self.GM.core_1[self.G1_node]
+            del self.GM.core_2[self.G2_node]
+
+        # Now we revert the other four vectors.
+        # Thus, we delete all entries which have this depth level.
+        for vector in (self.GM.in_1, self.GM.in_2, self.GM.out_1, self.GM.out_2):
+            for node in list(vector.keys()):
+                if vector[node] == self.depth:
+                    del vector[node]
diff --git a/networkx/algorithms/isomorphism/matchhelpers.py b/networkx/algorithms/isomorphism/matchhelpers.py
index 0f9d47ddc..8185f34eb 100644
--- a/networkx/algorithms/isomorphism/matchhelpers.py
+++ b/networkx/algorithms/isomorphism/matchhelpers.py
@@ -4,18 +4,28 @@ edge_match functions to use during isomorphism checks.
 import math
 import types
 from itertools import permutations
-__all__ = ['categorical_node_match', 'categorical_edge_match',
-    'categorical_multiedge_match', 'numerical_node_match',
-    'numerical_edge_match', 'numerical_multiedge_match',
-    'generic_node_match', 'generic_edge_match', 'generic_multiedge_match']
+
+__all__ = [
+    "categorical_node_match",
+    "categorical_edge_match",
+    "categorical_multiedge_match",
+    "numerical_node_match",
+    "numerical_edge_match",
+    "numerical_multiedge_match",
+    "generic_node_match",
+    "generic_edge_match",
+    "generic_multiedge_match",
+]


 def copyfunc(f, name=None):
     """Returns a deepcopy of a function."""
-    pass
+    return types.FunctionType(
+        f.__code__, f.__globals__, name or f.__name__, f.__defaults__, f.__closure__
+    )


-def allclose(x, y, rtol=1e-05, atol=1e-08):
+def allclose(x, y, rtol=1.0000000000000001e-05, atol=1e-08):
     """Returns True if x and y are sufficiently close, elementwise.

     Parameters
@@ -26,7 +36,8 @@ def allclose(x, y, rtol=1e-05, atol=1e-08):
         The absolute error tolerance.

     """
-    pass
+    # assume finite weights, see numpy.allclose() for reference
+    return all(math.isclose(xi, yi, rel_tol=rtol, abs_tol=atol) for xi, yi in zip(x, y))


 categorical_doc = """
@@ -57,14 +68,59 @@ Examples
 >>> nm = iso.categorical_node_match(["color", "size"], ["red", 2])

 """
-categorical_edge_match = copyfunc(categorical_node_match,
-    'categorical_edge_match')
+
+
+def categorical_node_match(attr, default):
+    if isinstance(attr, str):
+
+        def match(data1, data2):
+            return data1.get(attr, default) == data2.get(attr, default)
+
+    else:
+        attrs = list(zip(attr, default))  # Python 3
+
+        def match(data1, data2):
+            return all(data1.get(attr, d) == data2.get(attr, d) for attr, d in attrs)
+
+    return match
+
+
+categorical_edge_match = copyfunc(categorical_node_match, "categorical_edge_match")
+
+
+def categorical_multiedge_match(attr, default):
+    if isinstance(attr, str):
+
+        def match(datasets1, datasets2):
+            values1 = {data.get(attr, default) for data in datasets1.values()}
+            values2 = {data.get(attr, default) for data in datasets2.values()}
+            return values1 == values2
+
+    else:
+        attrs = list(zip(attr, default))  # Python 3
+
+        def match(datasets1, datasets2):
+            values1 = set()
+            for data1 in datasets1.values():
+                x = tuple(data1.get(attr, d) for attr, d in attrs)
+                values1.add(x)
+            values2 = set()
+            for data2 in datasets2.values():
+                x = tuple(data2.get(attr, d) for attr, d in attrs)
+                values2.add(x)
+            return values1 == values2
+
+    return match
+
+
+# Docstrings for categorical functions.
 categorical_node_match.__doc__ = categorical_doc
-categorical_edge_match.__doc__ = categorical_doc.replace('node', 'edge')
-tmpdoc = categorical_doc.replace('node', 'edge')
-tmpdoc = tmpdoc.replace('categorical_edge_match', 'categorical_multiedge_match'
-    )
+categorical_edge_match.__doc__ = categorical_doc.replace("node", "edge")
+tmpdoc = categorical_doc.replace("node", "edge")
+tmpdoc = tmpdoc.replace("categorical_edge_match", "categorical_multiedge_match")
 categorical_multiedge_match.__doc__ = tmpdoc
+
+
 numerical_doc = """
 Returns a comparison function for a numerical node attribute.

@@ -97,12 +153,72 @@ Examples
 >>> nm = iso.numerical_node_match(["weight", "linewidth"], [0.25, 0.5])

 """
-numerical_edge_match = copyfunc(numerical_node_match, 'numerical_edge_match')
+
+
+def numerical_node_match(attr, default, rtol=1.0000000000000001e-05, atol=1e-08):
+    if isinstance(attr, str):
+
+        def match(data1, data2):
+            return math.isclose(
+                data1.get(attr, default),
+                data2.get(attr, default),
+                rel_tol=rtol,
+                abs_tol=atol,
+            )
+
+    else:
+        attrs = list(zip(attr, default))  # Python 3
+
+        def match(data1, data2):
+            values1 = [data1.get(attr, d) for attr, d in attrs]
+            values2 = [data2.get(attr, d) for attr, d in attrs]
+            return allclose(values1, values2, rtol=rtol, atol=atol)
+
+    return match
+
+
+numerical_edge_match = copyfunc(numerical_node_match, "numerical_edge_match")
+
+
+def numerical_multiedge_match(attr, default, rtol=1.0000000000000001e-05, atol=1e-08):
+    if isinstance(attr, str):
+
+        def match(datasets1, datasets2):
+            values1 = sorted(data.get(attr, default) for data in datasets1.values())
+            values2 = sorted(data.get(attr, default) for data in datasets2.values())
+            return allclose(values1, values2, rtol=rtol, atol=atol)
+
+    else:
+        attrs = list(zip(attr, default))  # Python 3
+
+        def match(datasets1, datasets2):
+            values1 = []
+            for data1 in datasets1.values():
+                x = tuple(data1.get(attr, d) for attr, d in attrs)
+                values1.append(x)
+            values2 = []
+            for data2 in datasets2.values():
+                x = tuple(data2.get(attr, d) for attr, d in attrs)
+                values2.append(x)
+            values1.sort()
+            values2.sort()
+            for xi, yi in zip(values1, values2):
+                if not allclose(xi, yi, rtol=rtol, atol=atol):
+                    return False
+            else:
+                return True
+
+    return match
+
+
+# Docstrings for numerical functions.
 numerical_node_match.__doc__ = numerical_doc
-numerical_edge_match.__doc__ = numerical_doc.replace('node', 'edge')
-tmpdoc = numerical_doc.replace('node', 'edge')
-tmpdoc = tmpdoc.replace('numerical_edge_match', 'numerical_multiedge_match')
+numerical_edge_match.__doc__ = numerical_doc.replace("node", "edge")
+tmpdoc = numerical_doc.replace("node", "edge")
+tmpdoc = tmpdoc.replace("numerical_edge_match", "numerical_multiedge_match")
 numerical_multiedge_match.__doc__ = tmpdoc
+
+
 generic_doc = """
 Returns a comparison function for a generic attribute.

@@ -137,7 +253,28 @@ Examples
 >>> nm = generic_node_match(["weight", "color"], [1.0, "red"], [isclose, eq])

 """
-generic_edge_match = copyfunc(generic_node_match, 'generic_edge_match')
+
+
+def generic_node_match(attr, default, op):
+    if isinstance(attr, str):
+
+        def match(data1, data2):
+            return op(data1.get(attr, default), data2.get(attr, default))
+
+    else:
+        attrs = list(zip(attr, default, op))  # Python 3
+
+        def match(data1, data2):
+            for attr, d, operator in attrs:
+                if not operator(data1.get(attr, d), data2.get(attr, d)):
+                    return False
+            else:
+                return True
+
+    return match
+
+
+generic_edge_match = copyfunc(generic_node_match, "generic_edge_match")


 def generic_multiedge_match(attr, default, op):
@@ -176,8 +313,39 @@ def generic_multiedge_match(attr, default, op):
     >>> nm = generic_node_match(["weight", "color"], [1.0, "red"], [isclose, eq])

     """
-    pass
-

+    # This is slow, but generic.
+    # We must test every possible isomorphism between the edges.
+    if isinstance(attr, str):
+        attr = [attr]
+        default = [default]
+        op = [op]
+    attrs = list(zip(attr, default))  # Python 3
+
+    def match(datasets1, datasets2):
+        values1 = []
+        for data1 in datasets1.values():
+            x = tuple(data1.get(attr, d) for attr, d in attrs)
+            values1.append(x)
+        values2 = []
+        for data2 in datasets2.values():
+            x = tuple(data2.get(attr, d) for attr, d in attrs)
+            values2.append(x)
+        for vals2 in permutations(values2):
+            for xi, yi in zip(values1, vals2):
+                if not all(map(lambda x, y, z: z(x, y), xi, yi, op)):
+                    # This is not an isomorphism, go to next permutation.
+                    break
+            else:
+                # Then we found an isomorphism.
+                return True
+        else:
+            # Then there are no isomorphisms between the multiedges.
+            return False
+
+    return match
+
+
+# Docstrings for numerical functions.
 generic_node_match.__doc__ = generic_doc
-generic_edge_match.__doc__ = generic_doc.replace('node', 'edge')
+generic_edge_match.__doc__ = generic_doc.replace("node", "edge")
diff --git a/networkx/algorithms/isomorphism/temporalisomorphvf2.py b/networkx/algorithms/isomorphism/temporalisomorphvf2.py
index b0d999d9e..b78ecf149 100644
--- a/networkx/algorithms/isomorphism/temporalisomorphvf2.py
+++ b/networkx/algorithms/isomorphism/temporalisomorphvf2.py
@@ -64,13 +64,15 @@ Notes
 Handles directed and undirected graphs and graphs with parallel edges.

 """
+
 import networkx as nx
+
 from .isomorphvf2 import DiGraphMatcher, GraphMatcher
-__all__ = ['TimeRespectingGraphMatcher', 'TimeRespectingDiGraphMatcher']

+__all__ = ["TimeRespectingGraphMatcher", "TimeRespectingDiGraphMatcher"]

-class TimeRespectingGraphMatcher(GraphMatcher):

+class TimeRespectingGraphMatcher(GraphMatcher):
     def __init__(self, G1, G2, temporal_attribute_name, delta):
         """Initialize TimeRespectingGraphMatcher.

@@ -98,13 +100,27 @@ class TimeRespectingGraphMatcher(GraphMatcher):
         Edges one hop out from a node in the mapping should be
         time-respecting with respect to each other.
         """
-        pass
+        dates = []
+        for n in neighbors:
+            if isinstance(Gx, nx.Graph):  # Graph G[u][v] returns the data dictionary.
+                dates.append(Gx[Gx_node][n][self.temporal_attribute_name])
+            else:  # MultiGraph G[u][v] returns a dictionary of key -> data dictionary.
+                for edge in Gx[Gx_node][
+                    n
+                ].values():  # Iterates all edges between node pair.
+                    dates.append(edge[self.temporal_attribute_name])
+        if any(x is None for x in dates):
+            raise ValueError("Datetime not supplied for at least one edge.")
+        return not dates or max(dates) - min(dates) <= self.delta

     def two_hop(self, Gx, core_x, Gx_node, neighbors):
         """
         Paths of length 2 from Gx_node should be time-respecting.
         """
-        pass
+        return all(
+            self.one_hop(Gx, v, [n for n in Gx[v] if n in core_x] + [Gx_node])
+            for v in neighbors
+        )

     def semantic_feasibility(self, G1_node, G2_node):
         """Returns True if adding (G1_node, G2_node) is semantically
@@ -114,11 +130,16 @@ class TimeRespectingGraphMatcher(GraphMatcher):
         maintain the self.tests if needed, to keep the match() method
         functional. Implementations should consider multigraphs.
         """
-        pass
+        neighbors = [n for n in self.G1[G1_node] if n in self.core_1]
+        if not self.one_hop(self.G1, G1_node, neighbors):  # Fail fast on first node.
+            return False
+        if not self.two_hop(self.G1, self.core_1, G1_node, neighbors):
+            return False
+        # Otherwise, this node is semantically feasible!
+        return True


 class TimeRespectingDiGraphMatcher(DiGraphMatcher):
-
     def __init__(self, G1, G2, temporal_attribute_name, delta):
         """Initialize TimeRespectingDiGraphMatcher.

@@ -145,31 +166,85 @@ class TimeRespectingDiGraphMatcher(DiGraphMatcher):
         """
         Get the dates of edges from predecessors.
         """
-        pass
+        pred_dates = []
+        if isinstance(Gx, nx.DiGraph):  # Graph G[u][v] returns the data dictionary.
+            for n in pred:
+                pred_dates.append(Gx[n][Gx_node][self.temporal_attribute_name])
+        else:  # MultiGraph G[u][v] returns a dictionary of key -> data dictionary.
+            for n in pred:
+                for edge in Gx[n][
+                    Gx_node
+                ].values():  # Iterates all edge data between node pair.
+                    pred_dates.append(edge[self.temporal_attribute_name])
+        return pred_dates

     def get_succ_dates(self, Gx, Gx_node, core_x, succ):
         """
         Get the dates of edges to successors.
         """
-        pass
+        succ_dates = []
+        if isinstance(Gx, nx.DiGraph):  # Graph G[u][v] returns the data dictionary.
+            for n in succ:
+                succ_dates.append(Gx[Gx_node][n][self.temporal_attribute_name])
+        else:  # MultiGraph G[u][v] returns a dictionary of key -> data dictionary.
+            for n in succ:
+                for edge in Gx[Gx_node][
+                    n
+                ].values():  # Iterates all edge data between node pair.
+                    succ_dates.append(edge[self.temporal_attribute_name])
+        return succ_dates

     def one_hop(self, Gx, Gx_node, core_x, pred, succ):
         """
         The ego node.
         """
-        pass
+        pred_dates = self.get_pred_dates(Gx, Gx_node, core_x, pred)
+        succ_dates = self.get_succ_dates(Gx, Gx_node, core_x, succ)
+        return self.test_one(pred_dates, succ_dates) and self.test_two(
+            pred_dates, succ_dates
+        )

     def two_hop_pred(self, Gx, Gx_node, core_x, pred):
         """
         The predecessors of the ego node.
         """
-        pass
+        return all(
+            self.one_hop(
+                Gx,
+                p,
+                core_x,
+                self.preds(Gx, core_x, p),
+                self.succs(Gx, core_x, p, Gx_node),
+            )
+            for p in pred
+        )

     def two_hop_succ(self, Gx, Gx_node, core_x, succ):
         """
         The successors of the ego node.
         """
-        pass
+        return all(
+            self.one_hop(
+                Gx,
+                s,
+                core_x,
+                self.preds(Gx, core_x, s, Gx_node),
+                self.succs(Gx, core_x, s),
+            )
+            for s in succ
+        )
+
+    def preds(self, Gx, core_x, v, Gx_node=None):
+        pred = [n for n in Gx.predecessors(v) if n in core_x]
+        if Gx_node:
+            pred.append(Gx_node)
+        return pred
+
+    def succs(self, Gx, core_x, v, Gx_node=None):
+        succ = [n for n in Gx.successors(v) if n in core_x]
+        if Gx_node:
+            succ.append(Gx_node)
+        return succ

     def test_one(self, pred_dates, succ_dates):
         """
@@ -177,14 +252,33 @@ class TimeRespectingDiGraphMatcher(DiGraphMatcher):
         time-respecting with respect to each other, regardless of
         direction.
         """
-        pass
+        time_respecting = True
+        dates = pred_dates + succ_dates
+
+        if any(x is None for x in dates):
+            raise ValueError("Date or datetime not supplied for at least one edge.")
+
+        dates.sort()  # Small to large.
+        if 0 < len(dates) and not (dates[-1] - dates[0] <= self.delta):
+            time_respecting = False
+        return time_respecting

     def test_two(self, pred_dates, succ_dates):
         """
         Edges from a dual Gx_node in the mapping should be ordered in
         a time-respecting manner.
         """
-        pass
+        time_respecting = True
+        pred_dates.sort()
+        succ_dates.sort()
+        # First out before last in; negative of the necessary condition for time-respect.
+        if (
+            0 < len(succ_dates)
+            and 0 < len(pred_dates)
+            and succ_dates[0] < pred_dates[-1]
+        ):
+            time_respecting = False
+        return time_respecting

     def semantic_feasibility(self, G1_node, G2_node):
         """Returns True if adding (G1_node, G2_node) is semantically
@@ -194,4 +288,17 @@ class TimeRespectingDiGraphMatcher(DiGraphMatcher):
         maintain the self.tests if needed, to keep the match() method
         functional. Implementations should consider multigraphs.
         """
-        pass
+        pred, succ = (
+            [n for n in self.G1.predecessors(G1_node) if n in self.core_1],
+            [n for n in self.G1.successors(G1_node) if n in self.core_1],
+        )
+        if not self.one_hop(
+            self.G1, G1_node, self.core_1, pred, succ
+        ):  # Fail fast on first node.
+            return False
+        if not self.two_hop_pred(self.G1, G1_node, self.core_1, pred):
+            return False
+        if not self.two_hop_succ(self.G1, G1_node, self.core_1, succ):
+            return False
+        # Otherwise, this node is semantically feasible!
+        return True
diff --git a/networkx/algorithms/isomorphism/tree_isomorphism.py b/networkx/algorithms/isomorphism/tree_isomorphism.py
index e4c940843..e409d515f 100644
--- a/networkx/algorithms/isomorphism/tree_isomorphism.py
+++ b/networkx/algorithms/isomorphism/tree_isomorphism.py
@@ -17,12 +17,14 @@ McGill University SOCS 308-250B, Winter 2002
 by Matthew Suderman
 http://crypto.cs.mcgill.ca/~crepeau/CS250/2004/HW5+.pdf
 """
+
 import networkx as nx
 from networkx.utils.decorators import not_implemented_for
-__all__ = ['rooted_tree_isomorphism', 'tree_isomorphism']
+
+__all__ = ["rooted_tree_isomorphism", "tree_isomorphism"]


-@nx._dispatchable(graphs={'t1': 0, 't2': 2}, returns_graph=True)
+@nx._dispatchable(graphs={"t1": 0, "t2": 2}, returns_graph=True)
 def root_trees(t1, root1, t2, root2):
     """Create a single digraph dT of free trees t1 and t2
     #   with roots root1 and root2 respectively
@@ -33,10 +35,74 @@ def root_trees(t1, root1, t2, root2):
     # t1 is numbers from 1 ... n
     # t2 is numbered from n+1 to 2n
     """
-    pass

+    dT = nx.DiGraph()
+
+    newroot1 = 1  # left root will be 1
+    newroot2 = nx.number_of_nodes(t1) + 1  # right will be n+1
+
+    # may be overlap in node names here so need separate maps
+    # given the old name, what is the new
+    namemap1 = {root1: newroot1}
+    namemap2 = {root2: newroot2}
+
+    # add an edge from our new root to root1 and root2
+    dT.add_edge(0, namemap1[root1])
+    dT.add_edge(0, namemap2[root2])
+
+    for i, (v1, v2) in enumerate(nx.bfs_edges(t1, root1)):
+        namemap1[v2] = i + namemap1[root1] + 1
+        dT.add_edge(namemap1[v1], namemap1[v2])
+
+    for i, (v1, v2) in enumerate(nx.bfs_edges(t2, root2)):
+        namemap2[v2] = i + namemap2[root2] + 1
+        dT.add_edge(namemap2[v1], namemap2[v2])
+
+    # now we really want the inverse of namemap1 and namemap2
+    # giving the old name given the new
+    # since the values of namemap1 and namemap2 are unique
+    # there won't be collisions
+    namemap = {}
+    for old, new in namemap1.items():
+        namemap[new] = old
+    for old, new in namemap2.items():
+        namemap[new] = old
+
+    return (dT, namemap, newroot1, newroot2)
+
+
+# figure out the level of each node, with 0 at root
+@nx._dispatchable
+def assign_levels(G, root):
+    level = {}
+    level[root] = 0
+    for v1, v2 in nx.bfs_edges(G, root):
+        level[v2] = level[v1] + 1

-@nx._dispatchable(graphs={'t1': 0, 't2': 2})
+    return level
+
+
+# now group the nodes at each level
+def group_by_levels(levels):
+    L = {}
+    for n, lev in levels.items():
+        if lev not in L:
+            L[lev] = []
+        L[lev].append(n)
+
+    return L
+
+
+# now lets get the isomorphism by walking the ordered_children
+def generate_isomorphism(v, w, M, ordered_children):
+    # make sure tree1 comes first
+    assert v < w
+    M.append((v, w))
+    for i, (x, y) in enumerate(zip(ordered_children[v], ordered_children[w])):
+        generate_isomorphism(x, y, M, ordered_children)
+
+
+@nx._dispatchable(graphs={"t1": 0, "t2": 2})
 def rooted_tree_isomorphism(t1, root1, t2, root2):
     """
     Given two rooted trees `t1` and `t2`,
@@ -78,12 +144,74 @@ def rooted_tree_isomorphism(t1, root1, t2, root2):

         If `t1` and `t2` are not isomorphic, then it returns the empty list.
     """
-    pass

+    assert nx.is_tree(t1)
+    assert nx.is_tree(t2)
+
+    # get the rooted tree formed by combining them
+    # with unique names
+    (dT, namemap, newroot1, newroot2) = root_trees(t1, root1, t2, root2)
+
+    # compute the distance from the root, with 0 for our
+    levels = assign_levels(dT, 0)
+
+    # height
+    h = max(levels.values())

-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
-@nx._dispatchable(graphs={'t1': 0, 't2': 1})
+    # collect nodes into a dict by level
+    L = group_by_levels(levels)
+
+    # each node has a label, initially set to 0
+    label = {v: 0 for v in dT}
+    # and also ordered_labels and ordered_children
+    # which will store ordered tuples
+    ordered_labels = {v: () for v in dT}
+    ordered_children = {v: () for v in dT}
+
+    # nothing to do on last level so start on h-1
+    # also nothing to do for our fake level 0, so skip that
+    for i in range(h - 1, 0, -1):
+        # update the ordered_labels and ordered_children
+        # for any children
+        for v in L[i]:
+            # nothing to do if no children
+            if dT.out_degree(v) > 0:
+                # get all the pairs of labels and nodes of children
+                # and sort by labels
+                s = sorted((label[u], u) for u in dT.successors(v))
+
+                # invert to give a list of two tuples
+                # the sorted labels, and the corresponding children
+                ordered_labels[v], ordered_children[v] = list(zip(*s))
+
+        # now collect and sort the sorted ordered_labels
+        # for all nodes in L[i], carrying along the node
+        forlabel = sorted((ordered_labels[v], v) for v in L[i])
+
+        # now assign labels to these nodes, according to the sorted order
+        # starting from 0, where identical ordered_labels get the same label
+        current = 0
+        for i, (ol, v) in enumerate(forlabel):
+            # advance to next label if not 0, and different from previous
+            if (i != 0) and (ol != forlabel[i - 1][0]):
+                current += 1
+            label[v] = current
+
+    # they are isomorphic if the labels of newroot1 and newroot2 are 0
+    isomorphism = []
+    if label[newroot1] == 0 and label[newroot2] == 0:
+        generate_isomorphism(newroot1, newroot2, isomorphism, ordered_children)
+
+        # get the mapping back in terms of the old names
+        # return in sorted order for neatness
+        isomorphism = [(namemap[u], namemap[v]) for (u, v) in isomorphism]
+
+    return isomorphism
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
+@nx._dispatchable(graphs={"t1": 0, "t2": 1})
 def tree_isomorphism(t1, t2):
     """
     Given two undirected (or free) trees `t1` and `t2`,
@@ -117,4 +245,40 @@ def tree_isomorphism(t1, t2):
     -----
     This runs in O(n*log(n)) time for trees with n nodes.
     """
-    pass
+
+    assert nx.is_tree(t1)
+    assert nx.is_tree(t2)
+
+    # To be isomorphic, t1 and t2 must have the same number of nodes.
+    if nx.number_of_nodes(t1) != nx.number_of_nodes(t2):
+        return []
+
+    # Another shortcut is that the sorted degree sequences need to be the same.
+    degree_sequence1 = sorted(d for (n, d) in t1.degree())
+    degree_sequence2 = sorted(d for (n, d) in t2.degree())
+
+    if degree_sequence1 != degree_sequence2:
+        return []
+
+    # A tree can have either 1 or 2 centers.
+    # If the number doesn't match then t1 and t2 are not isomorphic.
+    center1 = nx.center(t1)
+    center2 = nx.center(t2)
+
+    if len(center1) != len(center2):
+        return []
+
+    # If there is only 1 center in each, then use it.
+    if len(center1) == 1:
+        return rooted_tree_isomorphism(t1, center1[0], t2, center2[0])
+
+    # If there both have 2 centers,  then try the first for t1
+    # with the first for t2.
+    attempts = rooted_tree_isomorphism(t1, center1[0], t2, center2[0])
+
+    # If that worked we're done.
+    if len(attempts) > 0:
+        return attempts
+
+    # Otherwise, try center1[0] with the center2[1], and see if that works
+    return rooted_tree_isomorphism(t1, center1[0], t2, center2[1])
diff --git a/networkx/algorithms/isomorphism/vf2pp.py b/networkx/algorithms/isomorphism/vf2pp.py
index 13e668d89..589e06447 100644
--- a/networkx/algorithms/isomorphism/vf2pp.py
+++ b/networkx/algorithms/isomorphism/vf2pp.py
@@ -62,19 +62,42 @@ References

 """
 import collections
+
 import networkx as nx
-__all__ = ['vf2pp_isomorphism', 'vf2pp_is_isomorphic', 'vf2pp_all_isomorphisms'
-    ]
-_GraphParameters = collections.namedtuple('_GraphParameters', ['G1', 'G2',
-    'G1_labels', 'G2_labels', 'nodes_of_G1Labels', 'nodes_of_G2Labels',
-    'G2_nodes_of_degree'])
-_StateParameters = collections.namedtuple('_StateParameters', ['mapping',
-    'reverse_mapping', 'T1', 'T1_in', 'T1_tilde', 'T1_tilde_in', 'T2',
-    'T2_in', 'T2_tilde', 'T2_tilde_in'])
-
-
-@nx._dispatchable(graphs={'G1': 0, 'G2': 1}, node_attrs={'node_label':
-    'default_label'})
+
+__all__ = ["vf2pp_isomorphism", "vf2pp_is_isomorphic", "vf2pp_all_isomorphisms"]
+
+_GraphParameters = collections.namedtuple(
+    "_GraphParameters",
+    [
+        "G1",
+        "G2",
+        "G1_labels",
+        "G2_labels",
+        "nodes_of_G1Labels",
+        "nodes_of_G2Labels",
+        "G2_nodes_of_degree",
+    ],
+)
+
+_StateParameters = collections.namedtuple(
+    "_StateParameters",
+    [
+        "mapping",
+        "reverse_mapping",
+        "T1",
+        "T1_in",
+        "T1_tilde",
+        "T1_tilde_in",
+        "T2",
+        "T2_in",
+        "T2_tilde",
+        "T2_tilde_in",
+    ],
+)
+
+
+@nx._dispatchable(graphs={"G1": 0, "G2": 1}, node_attrs={"node_label": "default_label"})
 def vf2pp_isomorphism(G1, G2, node_label=None, default_label=None):
     """Return an isomorphic mapping between `G1` and `G2` if it exists.

@@ -98,11 +121,14 @@ def vf2pp_isomorphism(G1, G2, node_label=None, default_label=None):
     dict or None
         Node mapping if the two graphs are isomorphic. None otherwise.
     """
-    pass
+    try:
+        mapping = next(vf2pp_all_isomorphisms(G1, G2, node_label, default_label))
+        return mapping
+    except StopIteration:
+        return None


-@nx._dispatchable(graphs={'G1': 0, 'G2': 1}, node_attrs={'node_label':
-    'default_label'})
+@nx._dispatchable(graphs={"G1": 0, "G2": 1}, node_attrs={"node_label": "default_label"})
 def vf2pp_is_isomorphic(G1, G2, node_label=None, default_label=None):
     """Examines whether G1 and G2 are isomorphic.

@@ -126,11 +152,12 @@ def vf2pp_is_isomorphic(G1, G2, node_label=None, default_label=None):
     bool
         True if the two graphs are isomorphic, False otherwise.
     """
-    pass
+    if vf2pp_isomorphism(G1, G2, node_label, default_label) is not None:
+        return True
+    return False


-@nx._dispatchable(graphs={'G1': 0, 'G2': 1}, node_attrs={'node_label':
-    'default_label'})
+@nx._dispatchable(graphs={"G1": 0, "G2": 1}, node_attrs={"node_label": "default_label"})
 def vf2pp_all_isomorphisms(G1, G2, node_label=None, default_label=None):
     """Yields all the possible mappings between G1 and G2.

@@ -154,11 +181,112 @@ def vf2pp_all_isomorphisms(G1, G2, node_label=None, default_label=None):
     dict
         Isomorphic mapping between the nodes in `G1` and `G2`.
     """
-    pass
+    if G1.number_of_nodes() == 0 or G2.number_of_nodes() == 0:
+        return False
+
+    # Create the degree dicts based on graph type
+    if G1.is_directed():
+        G1_degree = {
+            n: (in_degree, out_degree)
+            for (n, in_degree), (_, out_degree) in zip(G1.in_degree, G1.out_degree)
+        }
+        G2_degree = {
+            n: (in_degree, out_degree)
+            for (n, in_degree), (_, out_degree) in zip(G2.in_degree, G2.out_degree)
+        }
+    else:
+        G1_degree = dict(G1.degree)
+        G2_degree = dict(G2.degree)
+
+    if not G1.is_directed():
+        find_candidates = _find_candidates
+        restore_Tinout = _restore_Tinout
+    else:
+        find_candidates = _find_candidates_Di
+        restore_Tinout = _restore_Tinout_Di
+
+    # Check that both graphs have the same number of nodes and degree sequence
+    if G1.order() != G2.order():
+        return False
+    if sorted(G1_degree.values()) != sorted(G2_degree.values()):
+        return False
+
+    # Initialize parameters and cache necessary information about degree and labels
+    graph_params, state_params = _initialize_parameters(
+        G1, G2, G2_degree, node_label, default_label
+    )
+
+    # Check if G1 and G2 have the same labels, and that number of nodes per label is equal between the two graphs
+    if not _precheck_label_properties(graph_params):
+        return False
+
+    # Calculate the optimal node ordering
+    node_order = _matching_order(graph_params)
+
+    # Initialize the stack
+    stack = []
+    candidates = iter(
+        find_candidates(node_order[0], graph_params, state_params, G1_degree)
+    )
+    stack.append((node_order[0], candidates))
+
+    mapping = state_params.mapping
+    reverse_mapping = state_params.reverse_mapping
+
+    # Index of the node from the order, currently being examined
+    matching_node = 1
+
+    while stack:
+        current_node, candidate_nodes = stack[-1]
+
+        try:
+            candidate = next(candidate_nodes)
+        except StopIteration:
+            # If no remaining candidates, return to a previous state, and follow another branch
+            stack.pop()
+            matching_node -= 1
+            if stack:
+                # Pop the previously added u-v pair, and look for a different candidate _v for u
+                popped_node1, _ = stack[-1]
+                popped_node2 = mapping[popped_node1]
+                mapping.pop(popped_node1)
+                reverse_mapping.pop(popped_node2)
+                restore_Tinout(popped_node1, popped_node2, graph_params, state_params)
+            continue
+
+        if _feasibility(current_node, candidate, graph_params, state_params):
+            # Terminate if mapping is extended to its full
+            if len(mapping) == G2.number_of_nodes() - 1:
+                cp_mapping = mapping.copy()
+                cp_mapping[current_node] = candidate
+                yield cp_mapping
+                continue
+
+            # Feasibility rules pass, so extend the mapping and update the parameters
+            mapping[current_node] = candidate
+            reverse_mapping[candidate] = current_node
+            _update_Tinout(current_node, candidate, graph_params, state_params)
+            # Append the next node and its candidates to the stack
+            candidates = iter(
+                find_candidates(
+                    node_order[matching_node], graph_params, state_params, G1_degree
+                )
+            )
+            stack.append((node_order[matching_node], candidates))
+            matching_node += 1
+
+
+def _precheck_label_properties(graph_params):
+    G1, G2, G1_labels, G2_labels, nodes_of_G1Labels, nodes_of_G2Labels, _ = graph_params
+    if any(
+        label not in nodes_of_G1Labels or len(nodes_of_G1Labels[label]) != len(nodes)
+        for label, nodes in nodes_of_G2Labels.items()
+    ):
+        return False
+    return True


-def _initialize_parameters(G1, G2, G2_degree, node_label=None, default_label=-1
-    ):
+def _initialize_parameters(G1, G2, G2_degree, node_label=None, default_label=-1):
     """Initializes all the necessary parameters for VF2++

     Parameters
@@ -193,7 +321,45 @@ def _initialize_parameters(G1, G2, G2_degree, node_label=None, default_label=-1
         T1_out, T2_out: set
             Ti_out contains all the nodes from Gi, that are neither in the mapping nor in Ti
     """
-    pass
+    G1_labels = dict(G1.nodes(data=node_label, default=default_label))
+    G2_labels = dict(G2.nodes(data=node_label, default=default_label))
+
+    graph_params = _GraphParameters(
+        G1,
+        G2,
+        G1_labels,
+        G2_labels,
+        nx.utils.groups(G1_labels),
+        nx.utils.groups(G2_labels),
+        nx.utils.groups(G2_degree),
+    )
+
+    T1, T1_in = set(), set()
+    T2, T2_in = set(), set()
+    if G1.is_directed():
+        T1_tilde, T1_tilde_in = (
+            set(G1.nodes()),
+            set(),
+        )  # todo: do we need Ti_tilde_in? What nodes does it have?
+        T2_tilde, T2_tilde_in = set(G2.nodes()), set()
+    else:
+        T1_tilde, T1_tilde_in = set(G1.nodes()), set()
+        T2_tilde, T2_tilde_in = set(G2.nodes()), set()
+
+    state_params = _StateParameters(
+        {},
+        {},
+        T1,
+        T1_in,
+        T1_tilde,
+        T1_tilde_in,
+        T2,
+        T2_in,
+        T2_tilde,
+        T2_tilde_in,
+    )
+
+    return graph_params, state_params


 def _matching_order(graph_params):
@@ -222,10 +388,54 @@ def _matching_order(graph_params):
     node_order: list
         The ordering of the nodes.
     """
-    pass
-
-
-def _find_candidates(u, graph_params, state_params, G1_degree):
+    G1, G2, G1_labels, _, _, nodes_of_G2Labels, _ = graph_params
+    if not G1 and not G2:
+        return {}
+
+    if G1.is_directed():
+        G1 = G1.to_undirected(as_view=True)
+
+    V1_unordered = set(G1.nodes())
+    label_rarity = {label: len(nodes) for label, nodes in nodes_of_G2Labels.items()}
+    used_degrees = {node: 0 for node in G1}
+    node_order = []
+
+    while V1_unordered:
+        max_rarity = min(label_rarity[G1_labels[x]] for x in V1_unordered)
+        rarest_nodes = [
+            n for n in V1_unordered if label_rarity[G1_labels[n]] == max_rarity
+        ]
+        max_node = max(rarest_nodes, key=G1.degree)
+
+        for dlevel_nodes in nx.bfs_layers(G1, max_node):
+            nodes_to_add = dlevel_nodes.copy()
+            while nodes_to_add:
+                max_used_degree = max(used_degrees[n] for n in nodes_to_add)
+                max_used_degree_nodes = [
+                    n for n in nodes_to_add if used_degrees[n] == max_used_degree
+                ]
+                max_degree = max(G1.degree[n] for n in max_used_degree_nodes)
+                max_degree_nodes = [
+                    n for n in max_used_degree_nodes if G1.degree[n] == max_degree
+                ]
+                next_node = min(
+                    max_degree_nodes, key=lambda x: label_rarity[G1_labels[x]]
+                )
+
+                node_order.append(next_node)
+                for node in G1.neighbors(next_node):
+                    used_degrees[node] += 1
+
+                nodes_to_add.remove(next_node)
+                label_rarity[G1_labels[next_node]] -= 1
+                V1_unordered.discard(next_node)
+
+    return node_order
+
+
+def _find_candidates(
+    u, graph_params, state_params, G1_degree
+):  # todo: make the 4th argument the degree of u
     """Given node u of G1, finds the candidates of u from G2.

     Parameters
@@ -263,7 +473,92 @@ def _find_candidates(u, graph_params, state_params, G1_degree):
     candidates: set
         The nodes from G2 which are candidates for u.
     """
-    pass
+    G1, G2, G1_labels, _, _, nodes_of_G2Labels, G2_nodes_of_degree = graph_params
+    mapping, reverse_mapping, _, _, _, _, _, _, T2_tilde, _ = state_params
+
+    covered_nbrs = [nbr for nbr in G1[u] if nbr in mapping]
+    if not covered_nbrs:
+        candidates = set(nodes_of_G2Labels[G1_labels[u]])
+        candidates.intersection_update(G2_nodes_of_degree[G1_degree[u]])
+        candidates.intersection_update(T2_tilde)
+        candidates.difference_update(reverse_mapping)
+        if G1.is_multigraph():
+            candidates.difference_update(
+                {
+                    node
+                    for node in candidates
+                    if G1.number_of_edges(u, u) != G2.number_of_edges(node, node)
+                }
+            )
+        return candidates
+
+    nbr1 = covered_nbrs[0]
+    common_nodes = set(G2[mapping[nbr1]])
+
+    for nbr1 in covered_nbrs[1:]:
+        common_nodes.intersection_update(G2[mapping[nbr1]])
+
+    common_nodes.difference_update(reverse_mapping)
+    common_nodes.intersection_update(G2_nodes_of_degree[G1_degree[u]])
+    common_nodes.intersection_update(nodes_of_G2Labels[G1_labels[u]])
+    if G1.is_multigraph():
+        common_nodes.difference_update(
+            {
+                node
+                for node in common_nodes
+                if G1.number_of_edges(u, u) != G2.number_of_edges(node, node)
+            }
+        )
+    return common_nodes
+
+
+def _find_candidates_Di(u, graph_params, state_params, G1_degree):
+    G1, G2, G1_labels, _, _, nodes_of_G2Labels, G2_nodes_of_degree = graph_params
+    mapping, reverse_mapping, _, _, _, _, _, _, T2_tilde, _ = state_params
+
+    covered_successors = [succ for succ in G1[u] if succ in mapping]
+    covered_predecessors = [pred for pred in G1.pred[u] if pred in mapping]
+
+    if not (covered_successors or covered_predecessors):
+        candidates = set(nodes_of_G2Labels[G1_labels[u]])
+        candidates.intersection_update(G2_nodes_of_degree[G1_degree[u]])
+        candidates.intersection_update(T2_tilde)
+        candidates.difference_update(reverse_mapping)
+        if G1.is_multigraph():
+            candidates.difference_update(
+                {
+                    node
+                    for node in candidates
+                    if G1.number_of_edges(u, u) != G2.number_of_edges(node, node)
+                }
+            )
+        return candidates
+
+    if covered_successors:
+        succ1 = covered_successors[0]
+        common_nodes = set(G2.pred[mapping[succ1]])
+
+        for succ1 in covered_successors[1:]:
+            common_nodes.intersection_update(G2.pred[mapping[succ1]])
+    else:
+        pred1 = covered_predecessors.pop()
+        common_nodes = set(G2[mapping[pred1]])
+
+    for pred1 in covered_predecessors:
+        common_nodes.intersection_update(G2[mapping[pred1]])
+
+    common_nodes.difference_update(reverse_mapping)
+    common_nodes.intersection_update(G2_nodes_of_degree[G1_degree[u]])
+    common_nodes.intersection_update(nodes_of_G2Labels[G1_labels[u]])
+    if G1.is_multigraph():
+        common_nodes.difference_update(
+            {
+                node
+                for node in common_nodes
+                if G1.number_of_edges(u, u) != G2.number_of_edges(node, node)
+            }
+        )
+    return common_nodes


 def _feasibility(node1, node2, graph_params, state_params):
@@ -308,7 +603,16 @@ def _feasibility(node1, node2, graph_params, state_params):
     -------
     True if all checks are successful, False otherwise.
     """
-    pass
+    G1 = graph_params.G1
+
+    if _cut_PT(node1, node2, graph_params, state_params):
+        return False
+
+    if G1.is_multigraph():
+        if not _consistent_PT(node1, node2, graph_params, state_params):
+            return False
+
+    return True


 def _cut_PT(u, v, graph_params, state_params):
@@ -348,7 +652,85 @@ def _cut_PT(u, v, graph_params, state_params):
     -------
     True if we should prune this branch, i.e. the node pair failed the cutting checks. False otherwise.
     """
-    pass
+    G1, G2, G1_labels, G2_labels, _, _, _ = graph_params
+    (
+        _,
+        _,
+        T1,
+        T1_in,
+        T1_tilde,
+        _,
+        T2,
+        T2_in,
+        T2_tilde,
+        _,
+    ) = state_params
+
+    u_labels_predecessors, v_labels_predecessors = {}, {}
+    if G1.is_directed():
+        u_labels_predecessors = nx.utils.groups(
+            {n1: G1_labels[n1] for n1 in G1.pred[u]}
+        )
+        v_labels_predecessors = nx.utils.groups(
+            {n2: G2_labels[n2] for n2 in G2.pred[v]}
+        )
+
+        if set(u_labels_predecessors.keys()) != set(v_labels_predecessors.keys()):
+            return True
+
+    u_labels_successors = nx.utils.groups({n1: G1_labels[n1] for n1 in G1[u]})
+    v_labels_successors = nx.utils.groups({n2: G2_labels[n2] for n2 in G2[v]})
+
+    # if the neighbors of u, do not have the same labels as those of v, NOT feasible.
+    if set(u_labels_successors.keys()) != set(v_labels_successors.keys()):
+        return True
+
+    for label, G1_nbh in u_labels_successors.items():
+        G2_nbh = v_labels_successors[label]
+
+        if G1.is_multigraph():
+            # Check for every neighbor in the neighborhood, if u-nbr1 has same edges as v-nbr2
+            u_nbrs_edges = sorted(G1.number_of_edges(u, x) for x in G1_nbh)
+            v_nbrs_edges = sorted(G2.number_of_edges(v, x) for x in G2_nbh)
+            if any(
+                u_nbr_edges != v_nbr_edges
+                for u_nbr_edges, v_nbr_edges in zip(u_nbrs_edges, v_nbrs_edges)
+            ):
+                return True
+
+        if len(T1.intersection(G1_nbh)) != len(T2.intersection(G2_nbh)):
+            return True
+        if len(T1_tilde.intersection(G1_nbh)) != len(T2_tilde.intersection(G2_nbh)):
+            return True
+        if G1.is_directed() and len(T1_in.intersection(G1_nbh)) != len(
+            T2_in.intersection(G2_nbh)
+        ):
+            return True
+
+    if not G1.is_directed():
+        return False
+
+    for label, G1_pred in u_labels_predecessors.items():
+        G2_pred = v_labels_predecessors[label]
+
+        if G1.is_multigraph():
+            # Check for every neighbor in the neighborhood, if u-nbr1 has same edges as v-nbr2
+            u_pred_edges = sorted(G1.number_of_edges(u, x) for x in G1_pred)
+            v_pred_edges = sorted(G2.number_of_edges(v, x) for x in G2_pred)
+            if any(
+                u_nbr_edges != v_nbr_edges
+                for u_nbr_edges, v_nbr_edges in zip(u_pred_edges, v_pred_edges)
+            ):
+                return True
+
+        if len(T1.intersection(G1_pred)) != len(T2.intersection(G2_pred)):
+            return True
+        if len(T1_tilde.intersection(G1_pred)) != len(T2_tilde.intersection(G2_pred)):
+            return True
+        if len(T1_in.intersection(G1_pred)) != len(T2_in.intersection(G2_pred)):
+            return True
+
+    return False


 def _consistent_PT(u, v, graph_params, state_params):
@@ -388,7 +770,41 @@ def _consistent_PT(u, v, graph_params, state_params):
     -------
     True if the pair passes all the consistency checks successfully. False otherwise.
     """
-    pass
+    G1, G2 = graph_params.G1, graph_params.G2
+    mapping, reverse_mapping = state_params.mapping, state_params.reverse_mapping
+
+    for neighbor in G1[u]:
+        if neighbor in mapping:
+            if G1.number_of_edges(u, neighbor) != G2.number_of_edges(
+                v, mapping[neighbor]
+            ):
+                return False
+
+    for neighbor in G2[v]:
+        if neighbor in reverse_mapping:
+            if G1.number_of_edges(u, reverse_mapping[neighbor]) != G2.number_of_edges(
+                v, neighbor
+            ):
+                return False
+
+    if not G1.is_directed():
+        return True
+
+    for predecessor in G1.pred[u]:
+        if predecessor in mapping:
+            if G1.number_of_edges(predecessor, u) != G2.number_of_edges(
+                mapping[predecessor], v
+            ):
+                return False
+
+    for predecessor in G2.pred[v]:
+        if predecessor in reverse_mapping:
+            if G1.number_of_edges(
+                reverse_mapping[predecessor], u
+            ) != G2.number_of_edges(predecessor, v):
+                return False
+
+    return True


 def _update_Tinout(new_node1, new_node2, graph_params, state_params):
@@ -431,7 +847,55 @@ def _update_Tinout(new_node1, new_node2, graph_params, state_params):
         T1_tilde, T2_tilde: set
             Ti_out contains all the nodes from Gi, that are neither in the mapping nor in Ti
     """
-    pass
+    G1, G2, _, _, _, _, _ = graph_params
+    (
+        mapping,
+        reverse_mapping,
+        T1,
+        T1_in,
+        T1_tilde,
+        T1_tilde_in,
+        T2,
+        T2_in,
+        T2_tilde,
+        T2_tilde_in,
+    ) = state_params
+
+    uncovered_successors_G1 = {succ for succ in G1[new_node1] if succ not in mapping}
+    uncovered_successors_G2 = {
+        succ for succ in G2[new_node2] if succ not in reverse_mapping
+    }
+
+    # Add the uncovered neighbors of node1 and node2 in T1 and T2 respectively
+    T1.update(uncovered_successors_G1)
+    T2.update(uncovered_successors_G2)
+    T1.discard(new_node1)
+    T2.discard(new_node2)
+
+    T1_tilde.difference_update(uncovered_successors_G1)
+    T2_tilde.difference_update(uncovered_successors_G2)
+    T1_tilde.discard(new_node1)
+    T2_tilde.discard(new_node2)
+
+    if not G1.is_directed():
+        return
+
+    uncovered_predecessors_G1 = {
+        pred for pred in G1.pred[new_node1] if pred not in mapping
+    }
+    uncovered_predecessors_G2 = {
+        pred for pred in G2.pred[new_node2] if pred not in reverse_mapping
+    }
+
+    T1_in.update(uncovered_predecessors_G1)
+    T2_in.update(uncovered_predecessors_G2)
+    T1_in.discard(new_node1)
+    T2_in.discard(new_node2)
+
+    T1_tilde.difference_update(uncovered_predecessors_G1)
+    T2_tilde.difference_update(uncovered_predecessors_G2)
+    T1_tilde.discard(new_node1)
+    T2_tilde.discard(new_node2)


 def _restore_Tinout(popped_node1, popped_node2, graph_params, state_params):
@@ -467,4 +931,138 @@ def _restore_Tinout(popped_node1, popped_node2, graph_params, state_params):
         T1_tilde, T2_tilde: set
             Ti_out contains all the nodes from Gi, that are neither in the mapping nor in Ti
     """
-    pass
+    # If the node we want to remove from the mapping, has at least one covered neighbor, add it to T1.
+    G1, G2, _, _, _, _, _ = graph_params
+    (
+        mapping,
+        reverse_mapping,
+        T1,
+        T1_in,
+        T1_tilde,
+        T1_tilde_in,
+        T2,
+        T2_in,
+        T2_tilde,
+        T2_tilde_in,
+    ) = state_params
+
+    is_added = False
+    for neighbor in G1[popped_node1]:
+        if neighbor in mapping:
+            # if a neighbor of the excluded node1 is in the mapping, keep node1 in T1
+            is_added = True
+            T1.add(popped_node1)
+        else:
+            # check if its neighbor has another connection with a covered node. If not, only then exclude it from T1
+            if any(nbr in mapping for nbr in G1[neighbor]):
+                continue
+            T1.discard(neighbor)
+            T1_tilde.add(neighbor)
+
+    # Case where the node is not present in neither the mapping nor T1. By definition, it should belong to T1_tilde
+    if not is_added:
+        T1_tilde.add(popped_node1)
+
+    is_added = False
+    for neighbor in G2[popped_node2]:
+        if neighbor in reverse_mapping:
+            is_added = True
+            T2.add(popped_node2)
+        else:
+            if any(nbr in reverse_mapping for nbr in G2[neighbor]):
+                continue
+            T2.discard(neighbor)
+            T2_tilde.add(neighbor)
+
+    if not is_added:
+        T2_tilde.add(popped_node2)
+
+
+def _restore_Tinout_Di(popped_node1, popped_node2, graph_params, state_params):
+    # If the node we want to remove from the mapping, has at least one covered neighbor, add it to T1.
+    G1, G2, _, _, _, _, _ = graph_params
+    (
+        mapping,
+        reverse_mapping,
+        T1,
+        T1_in,
+        T1_tilde,
+        T1_tilde_in,
+        T2,
+        T2_in,
+        T2_tilde,
+        T2_tilde_in,
+    ) = state_params
+
+    is_added = False
+    for successor in G1[popped_node1]:
+        if successor in mapping:
+            # if a neighbor of the excluded node1 is in the mapping, keep node1 in T1
+            is_added = True
+            T1_in.add(popped_node1)
+        else:
+            # check if its neighbor has another connection with a covered node. If not, only then exclude it from T1
+            if not any(pred in mapping for pred in G1.pred[successor]):
+                T1.discard(successor)
+
+            if not any(succ in mapping for succ in G1[successor]):
+                T1_in.discard(successor)
+
+            if successor not in T1:
+                if successor not in T1_in:
+                    T1_tilde.add(successor)
+
+    for predecessor in G1.pred[popped_node1]:
+        if predecessor in mapping:
+            # if a neighbor of the excluded node1 is in the mapping, keep node1 in T1
+            is_added = True
+            T1.add(popped_node1)
+        else:
+            # check if its neighbor has another connection with a covered node. If not, only then exclude it from T1
+            if not any(pred in mapping for pred in G1.pred[predecessor]):
+                T1.discard(predecessor)
+
+            if not any(succ in mapping for succ in G1[predecessor]):
+                T1_in.discard(predecessor)
+
+            if not (predecessor in T1 or predecessor in T1_in):
+                T1_tilde.add(predecessor)
+
+    # Case where the node is not present in neither the mapping nor T1. By definition it should belong to T1_tilde
+    if not is_added:
+        T1_tilde.add(popped_node1)
+
+    is_added = False
+    for successor in G2[popped_node2]:
+        if successor in reverse_mapping:
+            is_added = True
+            T2_in.add(popped_node2)
+        else:
+            if not any(pred in reverse_mapping for pred in G2.pred[successor]):
+                T2.discard(successor)
+
+            if not any(succ in reverse_mapping for succ in G2[successor]):
+                T2_in.discard(successor)
+
+            if successor not in T2:
+                if successor not in T2_in:
+                    T2_tilde.add(successor)
+
+    for predecessor in G2.pred[popped_node2]:
+        if predecessor in reverse_mapping:
+            # if a neighbor of the excluded node1 is in the mapping, keep node1 in T1
+            is_added = True
+            T2.add(popped_node2)
+        else:
+            # check if its neighbor has another connection with a covered node. If not, only then exclude it from T1
+            if not any(pred in reverse_mapping for pred in G2.pred[predecessor]):
+                T2.discard(predecessor)
+
+            if not any(succ in reverse_mapping for succ in G2[predecessor]):
+                T2_in.discard(predecessor)
+
+            if not (predecessor in T2 or predecessor in T2_in):
+                T2_tilde.add(predecessor)
+
+    if not is_added:
+        T2_tilde.add(popped_node2)
diff --git a/networkx/algorithms/isomorphism/vf2userfunc.py b/networkx/algorithms/isomorphism/vf2userfunc.py
index 0a09206b2..9484edc04 100644
--- a/networkx/algorithms/isomorphism/vf2userfunc.py
+++ b/networkx/algorithms/isomorphism/vf2userfunc.py
@@ -30,14 +30,44 @@
     So, all of the edge attribute dictionaries are passed to edge_match, and
     it must determine if there is an isomorphism between the two sets of edges.
 """
+
 from . import isomorphvf2 as vf2
-__all__ = ['GraphMatcher', 'DiGraphMatcher', 'MultiGraphMatcher',
-    'MultiDiGraphMatcher']
+
+__all__ = ["GraphMatcher", "DiGraphMatcher", "MultiGraphMatcher", "MultiDiGraphMatcher"]


 def _semantic_feasibility(self, G1_node, G2_node):
     """Returns True if mapping G1_node to G2_node is semantically feasible."""
-    pass
+    # Make sure the nodes match
+    if self.node_match is not None:
+        nm = self.node_match(self.G1.nodes[G1_node], self.G2.nodes[G2_node])
+        if not nm:
+            return False
+
+    # Make sure the edges match
+    if self.edge_match is not None:
+        # Cached lookups
+        G1nbrs = self.G1_adj[G1_node]
+        G2nbrs = self.G2_adj[G2_node]
+        core_1 = self.core_1
+        edge_match = self.edge_match
+
+        for neighbor in G1nbrs:
+            # G1_node is not in core_1, so we must handle R_self separately
+            if neighbor == G1_node:
+                if G2_node in G2nbrs and not edge_match(
+                    G1nbrs[G1_node], G2nbrs[G2_node]
+                ):
+                    return False
+            elif neighbor in core_1:
+                G2_nbr = core_1[neighbor]
+                if G2_nbr in G2nbrs and not edge_match(
+                    G1nbrs[neighbor], G2nbrs[G2_nbr]
+                ):
+                    return False
+        # syntactic check has already verified that neighbors are symmetric
+
+    return True


 class GraphMatcher(vf2.GraphMatcher):
@@ -76,10 +106,14 @@ class GraphMatcher(vf2.GraphMatcher):

         """
         vf2.GraphMatcher.__init__(self, G1, G2)
+
         self.node_match = node_match
         self.edge_match = edge_match
+
+        # These will be modified during checks to minimize code repeat.
         self.G1_adj = self.G1.adj
         self.G2_adj = self.G2.adj
+
     semantic_feasibility = _semantic_feasibility


@@ -119,14 +153,35 @@ class DiGraphMatcher(vf2.DiGraphMatcher):

         """
         vf2.DiGraphMatcher.__init__(self, G1, G2)
+
         self.node_match = node_match
         self.edge_match = edge_match
+
+        # These will be modified during checks to minimize code repeat.
         self.G1_adj = self.G1.adj
         self.G2_adj = self.G2.adj

     def semantic_feasibility(self, G1_node, G2_node):
         """Returns True if mapping G1_node to G2_node is semantically feasible."""
-        pass
+
+        # Test node_match and also test edge_match on successors
+        feasible = _semantic_feasibility(self, G1_node, G2_node)
+        if not feasible:
+            return False
+
+        # Test edge_match on predecessors
+        self.G1_adj = self.G1.pred
+        self.G2_adj = self.G2.pred
+        feasible = _semantic_feasibility(self, G1_node, G2_node)
+        self.G1_adj = self.G1.adj
+        self.G2_adj = self.G2.adj
+
+        return feasible
+
+
+# The "semantics" of edge_match are different for multi(di)graphs, but
+# the implementation is the same.  So, technically we do not need to
+# provide "multi" versions, but we do so to match NetworkX's base classes.


 class MultiGraphMatcher(GraphMatcher):
diff --git a/networkx/algorithms/link_analysis/hits_alg.py b/networkx/algorithms/link_analysis/hits_alg.py
index ec8d0cf04..e7b5141aa 100644
--- a/networkx/algorithms/link_analysis/hits_alg.py
+++ b/networkx/algorithms/link_analysis/hits_alg.py
@@ -1,11 +1,12 @@
 """Hubs and authorities analysis of graph structure.
 """
 import networkx as nx
-__all__ = ['hits']

+__all__ = ["hits"]

-@nx._dispatchable(preserve_edge_attrs={'G': {'weight': 1}})
-def hits(G, max_iter=100, tol=1e-08, nstart=None, normalized=True):
+
+@nx._dispatchable(preserve_edge_attrs={"G": {"weight": 1}})
+def hits(G, max_iter=100, tol=1.0e-8, nstart=None, normalized=True):
     """Returns HITS hubs and authorities values for nodes.

     The HITS algorithm computes two numbers for a node.
@@ -69,7 +70,81 @@ def hits(G, max_iter=100, tol=1e-08, nstart=None, normalized=True):
        doi:10.1145/324133.324140.
        http://www.cs.cornell.edu/home/kleinber/auth.pdf.
     """
-    pass
+    import numpy as np
+    import scipy as sp
+
+    if len(G) == 0:
+        return {}, {}
+    A = nx.adjacency_matrix(G, nodelist=list(G), dtype=float)
+
+    if nstart is not None:
+        nstart = np.array(list(nstart.values()))
+    if max_iter <= 0:
+        raise nx.PowerIterationFailedConvergence(max_iter)
+    try:
+        _, _, vt = sp.sparse.linalg.svds(A, k=1, v0=nstart, maxiter=max_iter, tol=tol)
+    except sp.sparse.linalg.ArpackNoConvergence as exc:
+        raise nx.PowerIterationFailedConvergence(max_iter) from exc
+
+    a = vt.flatten().real
+    h = A @ a
+    if normalized:
+        h /= h.sum()
+        a /= a.sum()
+    hubs = dict(zip(G, map(float, h)))
+    authorities = dict(zip(G, map(float, a)))
+    return hubs, authorities
+
+
+def _hits_python(G, max_iter=100, tol=1.0e-8, nstart=None, normalized=True):
+    if isinstance(G, nx.MultiGraph | nx.MultiDiGraph):
+        raise Exception("hits() not defined for graphs with multiedges.")
+    if len(G) == 0:
+        return {}, {}
+    # choose fixed starting vector if not given
+    if nstart is None:
+        h = dict.fromkeys(G, 1.0 / G.number_of_nodes())
+    else:
+        h = nstart
+        # normalize starting vector
+        s = 1.0 / sum(h.values())
+        for k in h:
+            h[k] *= s
+    for _ in range(max_iter):  # power iteration: make up to max_iter iterations
+        hlast = h
+        h = dict.fromkeys(hlast.keys(), 0)
+        a = dict.fromkeys(hlast.keys(), 0)
+        # this "matrix multiply" looks odd because it is
+        # doing a left multiply a^T=hlast^T*G
+        for n in h:
+            for nbr in G[n]:
+                a[nbr] += hlast[n] * G[n][nbr].get("weight", 1)
+        # now multiply h=Ga
+        for n in h:
+            for nbr in G[n]:
+                h[n] += a[nbr] * G[n][nbr].get("weight", 1)
+        # normalize vector
+        s = 1.0 / max(h.values())
+        for n in h:
+            h[n] *= s
+        # normalize vector
+        s = 1.0 / max(a.values())
+        for n in a:
+            a[n] *= s
+        # check convergence, l1 norm
+        err = sum(abs(h[n] - hlast[n]) for n in h)
+        if err < tol:
+            break
+    else:
+        raise nx.PowerIterationFailedConvergence(max_iter)
+    if normalized:
+        s = 1.0 / sum(a.values())
+        for n in a:
+            a[n] *= s
+        s = 1.0 / sum(h.values())
+        for n in h:
+            h[n] *= s
+    return h, a


 def _hits_numpy(G, normalized=True):
@@ -132,10 +207,31 @@ def _hits_numpy(G, normalized=True):
        doi:10.1145/324133.324140.
        http://www.cs.cornell.edu/home/kleinber/auth.pdf.
     """
-    pass
-
-
-def _hits_scipy(G, max_iter=100, tol=1e-06, nstart=None, normalized=True):
+    import numpy as np
+
+    if len(G) == 0:
+        return {}, {}
+    adj_ary = nx.to_numpy_array(G)
+    # Hub matrix
+    H = adj_ary @ adj_ary.T
+    e, ev = np.linalg.eig(H)
+    h = ev[:, np.argmax(e)]  # eigenvector corresponding to the maximum eigenvalue
+    # Authority matrix
+    A = adj_ary.T @ adj_ary
+    e, ev = np.linalg.eig(A)
+    a = ev[:, np.argmax(e)]  # eigenvector corresponding to the maximum eigenvalue
+    if normalized:
+        h /= h.sum()
+        a /= a.sum()
+    else:
+        h /= h.max()
+        a /= a.max()
+    hubs = dict(zip(G, map(float, h)))
+    authorities = dict(zip(G, map(float, a)))
+    return hubs, authorities
+
+
+def _hits_scipy(G, max_iter=100, tol=1.0e-6, nstart=None, normalized=True):
     """Returns HITS hubs and authorities values for nodes.


@@ -203,4 +299,39 @@ def _hits_scipy(G, max_iter=100, tol=1e-06, nstart=None, normalized=True):
        doi:10.1145/324133.324140.
        http://www.cs.cornell.edu/home/kleinber/auth.pdf.
     """
-    pass
+    import numpy as np
+
+    if len(G) == 0:
+        return {}, {}
+    A = nx.to_scipy_sparse_array(G, nodelist=list(G))
+    (n, _) = A.shape  # should be square
+    ATA = A.T @ A  # authority matrix
+    # choose fixed starting vector if not given
+    if nstart is None:
+        x = np.ones((n, 1)) / n
+    else:
+        x = np.array([nstart.get(n, 0) for n in list(G)], dtype=float)
+        x /= x.sum()
+
+    # power iteration on authority matrix
+    i = 0
+    while True:
+        xlast = x
+        x = ATA @ x
+        x /= x.max()
+        # check convergence, l1 norm
+        err = np.absolute(x - xlast).sum()
+        if err < tol:
+            break
+        if i > max_iter:
+            raise nx.PowerIterationFailedConvergence(max_iter)
+        i += 1
+
+    a = x.flatten()
+    h = A @ a
+    if normalized:
+        h /= h.sum()
+        a /= a.sum()
+    hubs = dict(zip(G, map(float, h)))
+    authorities = dict(zip(G, map(float, a)))
+    return hubs, authorities
diff --git a/networkx/algorithms/link_analysis/pagerank_alg.py b/networkx/algorithms/link_analysis/pagerank_alg.py
index e4b75265e..2a4af6e5f 100644
--- a/networkx/algorithms/link_analysis/pagerank_alg.py
+++ b/networkx/algorithms/link_analysis/pagerank_alg.py
@@ -1,12 +1,22 @@
 """PageRank analysis of graph structure. """
 from warnings import warn
+
 import networkx as nx
-__all__ = ['pagerank', 'google_matrix']
+
+__all__ = ["pagerank", "google_matrix"]


-@nx._dispatchable(edge_attrs='weight')
-def pagerank(G, alpha=0.85, personalization=None, max_iter=100, tol=1e-06,
-    nstart=None, weight='weight', dangling=None):
+@nx._dispatchable(edge_attrs="weight")
+def pagerank(
+    G,
+    alpha=0.85,
+    personalization=None,
+    max_iter=100,
+    tol=1.0e-6,
+    nstart=None,
+    weight="weight",
+    dangling=None,
+):
     """Returns the PageRank of the nodes in the graph.

     PageRank computes a ranking of the nodes in the graph G based on
@@ -97,12 +107,75 @@ def pagerank(G, alpha=0.85, personalization=None, max_iter=100, tol=1e-06,
        http://dbpubs.stanford.edu:8090/pub/showDoc.Fulltext?lang=en&doc=1999-66&format=pdf

     """
-    pass
-
-
-@nx._dispatchable(edge_attrs='weight')
-def google_matrix(G, alpha=0.85, personalization=None, nodelist=None,
-    weight='weight', dangling=None):
+    return _pagerank_scipy(
+        G, alpha, personalization, max_iter, tol, nstart, weight, dangling
+    )
+
+
+def _pagerank_python(
+    G,
+    alpha=0.85,
+    personalization=None,
+    max_iter=100,
+    tol=1.0e-6,
+    nstart=None,
+    weight="weight",
+    dangling=None,
+):
+    if len(G) == 0:
+        return {}
+
+    D = G.to_directed()
+
+    # Create a copy in (right) stochastic form
+    W = nx.stochastic_graph(D, weight=weight)
+    N = W.number_of_nodes()
+
+    # Choose fixed starting vector if not given
+    if nstart is None:
+        x = dict.fromkeys(W, 1.0 / N)
+    else:
+        # Normalized nstart vector
+        s = sum(nstart.values())
+        x = {k: v / s for k, v in nstart.items()}
+
+    if personalization is None:
+        # Assign uniform personalization vector if not given
+        p = dict.fromkeys(W, 1.0 / N)
+    else:
+        s = sum(personalization.values())
+        p = {k: v / s for k, v in personalization.items()}
+
+    if dangling is None:
+        # Use personalization vector if dangling vector not specified
+        dangling_weights = p
+    else:
+        s = sum(dangling.values())
+        dangling_weights = {k: v / s for k, v in dangling.items()}
+    dangling_nodes = [n for n in W if W.out_degree(n, weight=weight) == 0.0]
+
+    # power iteration: make up to max_iter iterations
+    for _ in range(max_iter):
+        xlast = x
+        x = dict.fromkeys(xlast.keys(), 0)
+        danglesum = alpha * sum(xlast[n] for n in dangling_nodes)
+        for n in x:
+            # this matrix multiply looks odd because it is
+            # doing a left multiply x^T=xlast^T*W
+            for _, nbr, wt in W.edges(n, data=weight):
+                x[nbr] += alpha * xlast[n] * wt
+            x[n] += danglesum * dangling_weights.get(n, 0) + (1.0 - alpha) * p.get(n, 0)
+        # check convergence, l1 norm
+        err = sum(abs(x[n] - xlast[n]) for n in x)
+        if err < N * tol:
+            return x
+    raise nx.PowerIterationFailedConvergence(max_iter)
+
+
+@nx._dispatchable(edge_attrs="weight")
+def google_matrix(
+    G, alpha=0.85, personalization=None, nodelist=None, weight="weight", dangling=None
+):
     """Returns the Google matrix of the graph.

     Parameters
@@ -159,11 +232,45 @@ def google_matrix(G, alpha=0.85, personalization=None, nodelist=None,
     --------
     pagerank
     """
-    pass
+    import numpy as np
+
+    if nodelist is None:
+        nodelist = list(G)
+
+    A = nx.to_numpy_array(G, nodelist=nodelist, weight=weight)
+    N = len(G)
+    if N == 0:
+        return A
+
+    # Personalization vector
+    if personalization is None:
+        p = np.repeat(1.0 / N, N)
+    else:
+        p = np.array([personalization.get(n, 0) for n in nodelist], dtype=float)
+        if p.sum() == 0:
+            raise ZeroDivisionError
+        p /= p.sum()

+    # Dangling nodes
+    if dangling is None:
+        dangling_weights = p
+    else:
+        # Convert the dangling dictionary into an array in nodelist order
+        dangling_weights = np.array([dangling.get(n, 0) for n in nodelist], dtype=float)
+        dangling_weights /= dangling_weights.sum()
+    dangling_nodes = np.where(A.sum(axis=1) == 0)[0]

-def _pagerank_numpy(G, alpha=0.85, personalization=None, weight='weight',
-    dangling=None):
+    # Assign dangling_weights to any dangling nodes (nodes with no out links)
+    A[dangling_nodes] = dangling_weights
+
+    A /= A.sum(axis=1)[:, np.newaxis]  # Normalize rows to sum to 1
+
+    return alpha * A + (1 - alpha) * p
+
+
+def _pagerank_numpy(
+    G, alpha=0.85, personalization=None, weight="weight", dangling=None
+):
     """Returns the PageRank of the nodes in the graph.

     PageRank computes a ranking of the nodes in the graph G based on
@@ -232,11 +339,32 @@ def _pagerank_numpy(G, alpha=0.85, personalization=None, weight='weight',
        The PageRank citation ranking: Bringing order to the Web. 1999
        http://dbpubs.stanford.edu:8090/pub/showDoc.Fulltext?lang=en&doc=1999-66&format=pdf
     """
-    pass
-
-
-def _pagerank_scipy(G, alpha=0.85, personalization=None, max_iter=100, tol=
-    1e-06, nstart=None, weight='weight', dangling=None):
+    import numpy as np
+
+    if len(G) == 0:
+        return {}
+    M = google_matrix(
+        G, alpha, personalization=personalization, weight=weight, dangling=dangling
+    )
+    # use numpy LAPACK solver
+    eigenvalues, eigenvectors = np.linalg.eig(M.T)
+    ind = np.argmax(eigenvalues)
+    # eigenvector of largest eigenvalue is at ind, normalized
+    largest = np.array(eigenvectors[:, ind]).flatten().real
+    norm = largest.sum()
+    return dict(zip(G, map(float, largest / norm)))
+
+
+def _pagerank_scipy(
+    G,
+    alpha=0.85,
+    personalization=None,
+    max_iter=100,
+    tol=1.0e-6,
+    nstart=None,
+    weight="weight",
+    dangling=None,
+):
     """Returns the PageRank of the nodes in the graph.

     PageRank computes a ranking of the nodes in the graph G based on
@@ -321,4 +449,51 @@ def _pagerank_scipy(G, alpha=0.85, personalization=None, max_iter=100, tol=
        The PageRank citation ranking: Bringing order to the Web. 1999
        http://dbpubs.stanford.edu:8090/pub/showDoc.Fulltext?lang=en&doc=1999-66&format=pdf
     """
-    pass
+    import numpy as np
+    import scipy as sp
+
+    N = len(G)
+    if N == 0:
+        return {}
+
+    nodelist = list(G)
+    A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, dtype=float)
+    S = A.sum(axis=1)
+    S[S != 0] = 1.0 / S[S != 0]
+    # TODO: csr_array
+    Q = sp.sparse.csr_array(sp.sparse.spdiags(S.T, 0, *A.shape))
+    A = Q @ A
+
+    # initial vector
+    if nstart is None:
+        x = np.repeat(1.0 / N, N)
+    else:
+        x = np.array([nstart.get(n, 0) for n in nodelist], dtype=float)
+        x /= x.sum()
+
+    # Personalization vector
+    if personalization is None:
+        p = np.repeat(1.0 / N, N)
+    else:
+        p = np.array([personalization.get(n, 0) for n in nodelist], dtype=float)
+        if p.sum() == 0:
+            raise ZeroDivisionError
+        p /= p.sum()
+    # Dangling nodes
+    if dangling is None:
+        dangling_weights = p
+    else:
+        # Convert the dangling dictionary into an array in nodelist order
+        dangling_weights = np.array([dangling.get(n, 0) for n in nodelist], dtype=float)
+        dangling_weights /= dangling_weights.sum()
+    is_dangling = np.where(S == 0)[0]
+
+    # power iteration: make up to max_iter iterations
+    for _ in range(max_iter):
+        xlast = x
+        x = alpha * (x @ A + sum(x[is_dangling]) * dangling_weights) + (1 - alpha) * p
+        # check convergence, l1 norm
+        err = np.absolute(x - xlast).sum()
+        if err < N * tol:
+            return dict(zip(nodelist, map(float, x)))
+    raise nx.PowerIterationFailedConvergence(max_iter)
diff --git a/networkx/algorithms/link_prediction.py b/networkx/algorithms/link_prediction.py
index daa47abeb..1fb24243a 100644
--- a/networkx/algorithms/link_prediction.py
+++ b/networkx/algorithms/link_prediction.py
@@ -1,13 +1,23 @@
 """
 Link prediction algorithms.
 """
+
+
 from math import log
+
 import networkx as nx
 from networkx.utils import not_implemented_for
-__all__ = ['resource_allocation_index', 'jaccard_coefficient',
-    'adamic_adar_index', 'preferential_attachment',
-    'cn_soundarajan_hopcroft', 'ra_index_soundarajan_hopcroft',
-    'within_inter_cluster', 'common_neighbor_centrality']
+
+__all__ = [
+    "resource_allocation_index",
+    "jaccard_coefficient",
+    "adamic_adar_index",
+    "preferential_attachment",
+    "cn_soundarajan_hopcroft",
+    "ra_index_soundarajan_hopcroft",
+    "within_inter_cluster",
+    "common_neighbor_centrality",
+]


 def _apply_prediction(G, func, ebunch=None):
@@ -25,22 +35,30 @@ def _apply_prediction(G, func, ebunch=None):
     non-edges in the graph `G` will be used.

     """
-    pass
-
-
-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+    if ebunch is None:
+        ebunch = nx.non_edges(G)
+    else:
+        for u, v in ebunch:
+            if u not in G:
+                raise nx.NodeNotFound(f"Node {u} not in G.")
+            if v not in G:
+                raise nx.NodeNotFound(f"Node {v} not in G.")
+    return ((u, v, func(u, v)) for u, v in ebunch)
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def resource_allocation_index(G, ebunch=None):
-    """Compute the resource allocation index of all node pairs in ebunch.
+    r"""Compute the resource allocation index of all node pairs in ebunch.

     Resource allocation index of `u` and `v` is defined as

     .. math::

-        \\sum_{w \\in \\Gamma(u) \\cap \\Gamma(v)} \\frac{1}{|\\Gamma(w)|}
+        \sum_{w \in \Gamma(u) \cap \Gamma(v)} \frac{1}{|\Gamma(w)|}

-    where $\\Gamma(u)$ denotes the set of neighbors of $u$.
+    where $\Gamma(u)$ denotes the set of neighbors of $u$.

     Parameters
     ----------
@@ -84,22 +102,26 @@ def resource_allocation_index(G, ebunch=None):
        Eur. Phys. J. B 71 (2009) 623.
        https://arxiv.org/pdf/0901.0553.pdf
     """
-    pass

+    def predict(u, v):
+        return sum(1 / G.degree(w) for w in nx.common_neighbors(G, u, v))
+
+    return _apply_prediction(G, predict, ebunch)

-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def jaccard_coefficient(G, ebunch=None):
-    """Compute the Jaccard coefficient of all node pairs in ebunch.
+    r"""Compute the Jaccard coefficient of all node pairs in ebunch.

     Jaccard coefficient of nodes `u` and `v` is defined as

     .. math::

-        \\frac{|\\Gamma(u) \\cap \\Gamma(v)|}{|\\Gamma(u) \\cup \\Gamma(v)|}
+        \frac{|\Gamma(u) \cap \Gamma(v)|}{|\Gamma(u) \cup \Gamma(v)|}

-    where $\\Gamma(u)$ denotes the set of neighbors of $u$.
+    where $\Gamma(u)$ denotes the set of neighbors of $u$.

     Parameters
     ----------
@@ -142,22 +164,29 @@ def jaccard_coefficient(G, ebunch=None):
            The Link Prediction Problem for Social Networks (2004).
            http://www.cs.cornell.edu/home/kleinber/link-pred.pdf
     """
-    pass

+    def predict(u, v):
+        union_size = len(set(G[u]) | set(G[v]))
+        if union_size == 0:
+            return 0
+        return len(nx.common_neighbors(G, u, v)) / union_size
+
+    return _apply_prediction(G, predict, ebunch)

-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def adamic_adar_index(G, ebunch=None):
-    """Compute the Adamic-Adar index of all node pairs in ebunch.
+    r"""Compute the Adamic-Adar index of all node pairs in ebunch.

     Adamic-Adar index of `u` and `v` is defined as

     .. math::

-        \\sum_{w \\in \\Gamma(u) \\cap \\Gamma(v)} \\frac{1}{\\log |\\Gamma(w)|}
+        \sum_{w \in \Gamma(u) \cap \Gamma(v)} \frac{1}{\log |\Gamma(w)|}

-    where $\\Gamma(u)$ denotes the set of neighbors of $u$.
+    where $\Gamma(u)$ denotes the set of neighbors of $u$.
     This index leads to zero-division for nodes only connected via self-loops.
     It is intended to be used when no self-loops are present.

@@ -202,14 +231,18 @@ def adamic_adar_index(G, ebunch=None):
            The Link Prediction Problem for Social Networks (2004).
            http://www.cs.cornell.edu/home/kleinber/link-pred.pdf
     """
-    pass

+    def predict(u, v):
+        return sum(1 / log(G.degree(w)) for w in nx.common_neighbors(G, u, v))
+
+    return _apply_prediction(G, predict, ebunch)

-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def common_neighbor_centrality(G, ebunch=None, alpha=0.8):
-    """Return the CCPA score for each pair of nodes.
+    r"""Return the CCPA score for each pair of nodes.

     Compute the Common Neighbor and Centrality based Parameterized Algorithm(CCPA)
     score of all node pairs in ebunch.
@@ -218,10 +251,10 @@ def common_neighbor_centrality(G, ebunch=None, alpha=0.8):

     .. math::

-        \\alpha \\cdot (|\\Gamma (u){\\cap }^{}\\Gamma (v)|)+(1-\\alpha )\\cdot \\frac{N}{{d}_{uv}}
+        \alpha \cdot (|\Gamma (u){\cap }^{}\Gamma (v)|)+(1-\alpha )\cdot \frac{N}{{d}_{uv}}

-    where $\\Gamma(u)$ denotes the set of neighbors of $u$, $\\Gamma(v)$ denotes the
-    set of neighbors of $v$, $\\alpha$ is  parameter varies between [0,1], $N$ denotes
+    where $\Gamma(u)$ denotes the set of neighbors of $u$, $\Gamma(v)$ denotes the
+    set of neighbors of $v$, $\alpha$ is  parameter varies between [0,1], $N$ denotes
     total number of nodes in the Graph and ${d}_{uv}$ denotes shortest distance
     between $u$ and $v$.

@@ -288,22 +321,44 @@ def common_neighbor_centrality(G, ebunch=None, alpha=0.8):
            Sci Rep 10, 364 (2020).
            https://doi.org/10.1038/s41598-019-57304-y
     """
-    pass

+    # When alpha == 1, the CCPA score simplifies to the number of common neighbors.
+    if alpha == 1:
+
+        def predict(u, v):
+            if u == v:
+                raise nx.NetworkXAlgorithmError("Self loops are not supported")
+
+            return len(nx.common_neighbors(G, u, v))

-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+    else:
+        spl = dict(nx.shortest_path_length(G))
+        inf = float("inf")
+
+        def predict(u, v):
+            if u == v:
+                raise nx.NetworkXAlgorithmError("Self loops are not supported")
+            path_len = spl[u].get(v, inf)
+
+            n_nbrs = len(nx.common_neighbors(G, u, v))
+            return alpha * n_nbrs + (1 - alpha) * len(G) / path_len
+
+    return _apply_prediction(G, predict, ebunch)
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def preferential_attachment(G, ebunch=None):
-    """Compute the preferential attachment score of all node pairs in ebunch.
+    r"""Compute the preferential attachment score of all node pairs in ebunch.

     Preferential attachment score of `u` and `v` is defined as

     .. math::

-        |\\Gamma(u)| |\\Gamma(v)|
+        |\Gamma(u)| |\Gamma(v)|

-    where $\\Gamma(u)$ denotes the set of neighbors of $u$.
+    where $\Gamma(u)$ denotes the set of neighbors of $u$.

     Parameters
     ----------
@@ -346,14 +401,18 @@ def preferential_attachment(G, ebunch=None):
            The Link Prediction Problem for Social Networks (2004).
            http://www.cs.cornell.edu/home/kleinber/link-pred.pdf
     """
-    pass
+
+    def predict(u, v):
+        return G.degree(u) * G.degree(v)
+
+    return _apply_prediction(G, predict, ebunch)


-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
-@nx._dispatchable(node_attrs='community')
-def cn_soundarajan_hopcroft(G, ebunch=None, community='community'):
-    """Count the number of common neighbors of all node pairs in ebunch
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
+@nx._dispatchable(node_attrs="community")
+def cn_soundarajan_hopcroft(G, ebunch=None, community="community"):
+    r"""Count the number of common neighbors of all node pairs in ebunch
         using community information.

     For two nodes $u$ and $v$, this function computes the number of
@@ -362,10 +421,10 @@ def cn_soundarajan_hopcroft(G, ebunch=None, community='community'):

     .. math::

-        |\\Gamma(u) \\cap \\Gamma(v)| + \\sum_{w \\in \\Gamma(u) \\cap \\Gamma(v)} f(w)
+        |\Gamma(u) \cap \Gamma(v)| + \sum_{w \in \Gamma(u) \cap \Gamma(v)} f(w)

     where $f(w)$ equals 1 if $w$ belongs to the same community as $u$
-    and $v$ or 0 otherwise and $\\Gamma(u)$ denotes the set of
+    and $v$ or 0 otherwise and $\Gamma(u)$ denotes the set of
     neighbors of $u$.

     Parameters
@@ -422,14 +481,24 @@ def cn_soundarajan_hopcroft(G, ebunch=None, community='community'):
        World Wide Web (WWW '12 Companion). ACM, New York, NY, USA, 607-608.
        http://doi.acm.org/10.1145/2187980.2188150
     """
-    pass

+    def predict(u, v):
+        Cu = _community(G, u, community)
+        Cv = _community(G, v, community)
+        cnbors = nx.common_neighbors(G, u, v)
+        neighbors = (
+            sum(_community(G, w, community) == Cu for w in cnbors) if Cu == Cv else 0
+        )
+        return len(cnbors) + neighbors

-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
-@nx._dispatchable(node_attrs='community')
-def ra_index_soundarajan_hopcroft(G, ebunch=None, community='community'):
-    """Compute the resource allocation index of all node pairs in
+    return _apply_prediction(G, predict, ebunch)
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
+@nx._dispatchable(node_attrs="community")
+def ra_index_soundarajan_hopcroft(G, ebunch=None, community="community"):
+    r"""Compute the resource allocation index of all node pairs in
     ebunch using community information.

     For two nodes $u$ and $v$, this function computes the resource
@@ -438,10 +507,10 @@ def ra_index_soundarajan_hopcroft(G, ebunch=None, community='community'):

     .. math::

-        \\sum_{w \\in \\Gamma(u) \\cap \\Gamma(v)} \\frac{f(w)}{|\\Gamma(w)|}
+        \sum_{w \in \Gamma(u) \cap \Gamma(v)} \frac{f(w)}{|\Gamma(w)|}

     where $f(w)$ equals 1 if $w$ belongs to the same community as $u$
-    and $v$ or 0 otherwise and $\\Gamma(u)$ denotes the set of
+    and $v$ or 0 otherwise and $\Gamma(u)$ denotes the set of
     neighbors of $u$.

     Parameters
@@ -500,13 +569,22 @@ def ra_index_soundarajan_hopcroft(G, ebunch=None, community='community'):
        World Wide Web (WWW '12 Companion). ACM, New York, NY, USA, 607-608.
        http://doi.acm.org/10.1145/2187980.2188150
     """
-    pass

+    def predict(u, v):
+        Cu = _community(G, u, community)
+        Cv = _community(G, v, community)
+        if Cu != Cv:
+            return 0
+        cnbors = nx.common_neighbors(G, u, v)
+        return sum(1 / G.degree(w) for w in cnbors if _community(G, w, community) == Cu)
+
+    return _apply_prediction(G, predict, ebunch)

-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
-@nx._dispatchable(node_attrs='community')
-def within_inter_cluster(G, ebunch=None, delta=0.001, community='community'):
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
+@nx._dispatchable(node_attrs="community")
+def within_inter_cluster(G, ebunch=None, delta=0.001, community="community"):
     """Compute the ratio of within- and inter-cluster common neighbors
     of all node pairs in ebunch.

@@ -583,9 +661,28 @@ def within_inter_cluster(G, ebunch=None, delta=0.001, community='community'):
        Artificial Intelligence (SBIA'12)
        https://doi.org/10.1007/978-3-642-34459-6_10
     """
-    pass
+    if delta <= 0:
+        raise nx.NetworkXAlgorithmError("Delta must be greater than zero")
+
+    def predict(u, v):
+        Cu = _community(G, u, community)
+        Cv = _community(G, v, community)
+        if Cu != Cv:
+            return 0
+        cnbors = nx.common_neighbors(G, u, v)
+        within = {w for w in cnbors if _community(G, w, community) == Cu}
+        inter = cnbors - within
+        return len(within) / (len(inter) + delta)
+
+    return _apply_prediction(G, predict, ebunch)


 def _community(G, u, community):
     """Get the community of the given node."""
-    pass
+    node_u = G.nodes[u]
+    try:
+        return node_u[community]
+    except KeyError as err:
+        raise nx.NetworkXAlgorithmError(
+            f"No community information available for Node {u}"
+        ) from err
diff --git a/networkx/algorithms/lowest_common_ancestors.py b/networkx/algorithms/lowest_common_ancestors.py
index eada700b2..f695ec208 100644
--- a/networkx/algorithms/lowest_common_ancestors.py
+++ b/networkx/algorithms/lowest_common_ancestors.py
@@ -2,13 +2,18 @@
 from collections import defaultdict
 from collections.abc import Mapping, Set
 from itertools import combinations_with_replacement
+
 import networkx as nx
 from networkx.utils import UnionFind, arbitrary_element, not_implemented_for
-__all__ = ['all_pairs_lowest_common_ancestor',
-    'tree_all_pairs_lowest_common_ancestor', 'lowest_common_ancestor']
+
+__all__ = [
+    "all_pairs_lowest_common_ancestor",
+    "tree_all_pairs_lowest_common_ancestor",
+    "lowest_common_ancestor",
+]


-@not_implemented_for('undirected')
+@not_implemented_for("undirected")
 @nx._dispatchable
 def all_pairs_lowest_common_ancestor(G, pairs=None):
     """Return the lowest common ancestor of all pairs or the provided pairs
@@ -58,10 +63,55 @@ def all_pairs_lowest_common_ancestor(G, pairs=None):
     --------
     lowest_common_ancestor
     """
-    pass
-
-
-@not_implemented_for('undirected')
+    if not nx.is_directed_acyclic_graph(G):
+        raise nx.NetworkXError("LCA only defined on directed acyclic graphs.")
+    if len(G) == 0:
+        raise nx.NetworkXPointlessConcept("LCA meaningless on null graphs.")
+
+    if pairs is None:
+        pairs = combinations_with_replacement(G, 2)
+    else:
+        # Convert iterator to iterable, if necessary. Trim duplicates.
+        pairs = dict.fromkeys(pairs)
+        # Verify that each of the nodes in the provided pairs is in G
+        nodeset = set(G)
+        for pair in pairs:
+            if set(pair) - nodeset:
+                raise nx.NodeNotFound(
+                    f"Node(s) {set(pair) - nodeset} from pair {pair} not in G."
+                )
+
+    # Once input validation is done, construct the generator
+    def generate_lca_from_pairs(G, pairs):
+        ancestor_cache = {}
+
+        for v, w in pairs:
+            if v not in ancestor_cache:
+                ancestor_cache[v] = nx.ancestors(G, v)
+                ancestor_cache[v].add(v)
+            if w not in ancestor_cache:
+                ancestor_cache[w] = nx.ancestors(G, w)
+                ancestor_cache[w].add(w)
+
+            common_ancestors = ancestor_cache[v] & ancestor_cache[w]
+
+            if common_ancestors:
+                common_ancestor = next(iter(common_ancestors))
+                while True:
+                    successor = None
+                    for lower_ancestor in G.successors(common_ancestor):
+                        if lower_ancestor in common_ancestors:
+                            successor = lower_ancestor
+                            break
+                    if successor is None:
+                        break
+                    common_ancestor = successor
+                yield ((v, w), common_ancestor)
+
+    return generate_lca_from_pairs(G, pairs)
+
+
+@not_implemented_for("undirected")
 @nx._dispatchable
 def lowest_common_ancestor(G, node1, node2, default=None):
     """Compute the lowest common ancestor of the given pair of nodes.
@@ -91,13 +141,18 @@ def lowest_common_ancestor(G, node1, node2, default=None):
     See Also
     --------
     all_pairs_lowest_common_ancestor"""
-    pass
+
+    ans = list(all_pairs_lowest_common_ancestor(G, pairs=[(node1, node2)]))
+    if ans:
+        assert len(ans) == 1
+        return ans[0][1]
+    return default


-@not_implemented_for('undirected')
+@not_implemented_for("undirected")
 @nx._dispatchable
 def tree_all_pairs_lowest_common_ancestor(G, root=None, pairs=None):
-    """Yield the lowest common ancestor for sets of pairs in a tree.
+    r"""Yield the lowest common ancestor for sets of pairs in a tree.

     Parameters
     ----------
@@ -142,7 +197,7 @@ def tree_all_pairs_lowest_common_ancestor(G, root=None, pairs=None):
     -----
     Only defined on non-null trees represented with directed edges from
     parents to children. Uses Tarjan's off-line lowest-common-ancestors
-    algorithm. Runs in time $O(4 \\times (V + E + P))$ time, where 4 is the largest
+    algorithm. Runs in time $O(4 \times (V + E + P))$ time, where 4 is the largest
     value of the inverse Ackermann function likely to ever come up in actual
     use, and $P$ is the number of pairs requested (or $V^2$ if all are needed).

@@ -154,4 +209,60 @@ def tree_all_pairs_lowest_common_ancestor(G, root=None, pairs=None):
     all_pairs_lowest_common_ancestor: similar routine for general DAGs
     lowest_common_ancestor: just a single pair for general DAGs
     """
-    pass
+    if len(G) == 0:
+        raise nx.NetworkXPointlessConcept("LCA meaningless on null graphs.")
+
+    # Index pairs of interest for efficient lookup from either side.
+    if pairs is not None:
+        pair_dict = defaultdict(set)
+        # See note on all_pairs_lowest_common_ancestor.
+        if not isinstance(pairs, Mapping | Set):
+            pairs = set(pairs)
+        for u, v in pairs:
+            for n in (u, v):
+                if n not in G:
+                    msg = f"The node {str(n)} is not in the digraph."
+                    raise nx.NodeNotFound(msg)
+            pair_dict[u].add(v)
+            pair_dict[v].add(u)
+
+    # If root is not specified, find the exactly one node with in degree 0 and
+    # use it. Raise an error if none are found, or more than one is. Also check
+    # for any nodes with in degree larger than 1, which would imply G is not a
+    # tree.
+    if root is None:
+        for n, deg in G.in_degree:
+            if deg == 0:
+                if root is not None:
+                    msg = "No root specified and tree has multiple sources."
+                    raise nx.NetworkXError(msg)
+                root = n
+            # checking deg>1 is not sufficient for MultiDiGraphs
+            elif deg > 1 and len(G.pred[n]) > 1:
+                msg = "Tree LCA only defined on trees; use DAG routine."
+                raise nx.NetworkXError(msg)
+    if root is None:
+        raise nx.NetworkXError("Graph contains a cycle.")
+
+    # Iterative implementation of Tarjan's offline lca algorithm
+    # as described in CLRS on page 521 (2nd edition)/page 584 (3rd edition)
+    uf = UnionFind()
+    ancestors = {}
+    for node in G:
+        ancestors[node] = uf[node]
+
+    colors = defaultdict(bool)
+    for node in nx.dfs_postorder_nodes(G, root):
+        colors[node] = True
+        for v in pair_dict[node] if pairs is not None else G:
+            if colors[v]:
+                # If the user requested both directions of a pair, give it.
+                # Otherwise, just give one.
+                if pairs is not None and (node, v) in pairs:
+                    yield (node, v), ancestors[uf[v]]
+                if pairs is None or (v, node) in pairs:
+                    yield (v, node), ancestors[uf[v]]
+        if node != root:
+            parent = arbitrary_element(G.pred[node])
+            uf.union(parent, node)
+            ancestors[uf[parent]] = parent
diff --git a/networkx/algorithms/matching.py b/networkx/algorithms/matching.py
index 5ab7a21ea..f346c2e05 100644
--- a/networkx/algorithms/matching.py
+++ b/networkx/algorithms/matching.py
@@ -1,17 +1,25 @@
 """Functions for computing and verifying matchings in a graph."""
 from collections import Counter
 from itertools import combinations, repeat
+
 import networkx as nx
 from networkx.utils import not_implemented_for
-__all__ = ['is_matching', 'is_maximal_matching', 'is_perfect_matching',
-    'max_weight_matching', 'min_weight_matching', 'maximal_matching']
+
+__all__ = [
+    "is_matching",
+    "is_maximal_matching",
+    "is_perfect_matching",
+    "max_weight_matching",
+    "min_weight_matching",
+    "maximal_matching",
+]


-@not_implemented_for('multigraph')
-@not_implemented_for('directed')
+@not_implemented_for("multigraph")
+@not_implemented_for("directed")
 @nx._dispatchable
 def maximal_matching(G):
-    """Find a maximal matching in the graph.
+    r"""Find a maximal matching in the graph.

     A matching is a subset of edges in which no node occurs more than once.
     A maximal matching cannot add more edges and still be a matching.
@@ -37,7 +45,16 @@ def maximal_matching(G):
     The algorithm greedily selects a maximal matching M of the graph G
     (i.e. no superset of M exists). It runs in $O(|E|)$ time.
     """
-    pass
+    matching = set()
+    nodes = set()
+    for edge in G.edges():
+        # If the edge isn't covered, add it to the matching
+        # then remove neighborhood of u and v from consideration.
+        u, v = edge
+        if u not in nodes and v not in nodes and u != v:
+            matching.add(edge)
+            nodes.update(edge)
+    return matching


 def matching_dict_to_set(matching):
@@ -54,7 +71,15 @@ def matching_dict_to_set(matching):
     example, key ``u`` with value ``v`` and key ``v`` with value ``u``.

     """
-    pass
+    edges = set()
+    for edge in matching.items():
+        u, v = edge
+        if (v, u) in edges or edge in edges:
+            continue
+        if u == v:
+            raise nx.NetworkXError(f"Selfloops cannot appear in matchings {edge}")
+        edges.add(edge)
+    return edges


 @nx._dispatchable
@@ -98,7 +123,24 @@ def is_matching(G, matching):
     True

     """
-    pass
+    if isinstance(matching, dict):
+        matching = matching_dict_to_set(matching)
+
+    nodes = set()
+    for edge in matching:
+        if len(edge) != 2:
+            raise nx.NetworkXError(f"matching has non-2-tuple edge {edge}")
+        u, v = edge
+        if u not in G or v not in G:
+            raise nx.NetworkXError(f"matching contains edge {edge} with node not in G")
+        if u == v:
+            return False
+        if not G.has_edge(u, v):
+            return False
+        if u in nodes or v in nodes:
+            return False
+        nodes.update(edge)
+    return True


 @nx._dispatchable
@@ -132,7 +174,35 @@ def is_maximal_matching(G, matching):
     True

     """
-    pass
+    if isinstance(matching, dict):
+        matching = matching_dict_to_set(matching)
+    # If the given set is not a matching, then it is not a maximal matching.
+    edges = set()
+    nodes = set()
+    for edge in matching:
+        if len(edge) != 2:
+            raise nx.NetworkXError(f"matching has non-2-tuple edge {edge}")
+        u, v = edge
+        if u not in G or v not in G:
+            raise nx.NetworkXError(f"matching contains edge {edge} with node not in G")
+        if u == v:
+            return False
+        if not G.has_edge(u, v):
+            return False
+        if u in nodes or v in nodes:
+            return False
+        nodes.update(edge)
+        edges.add(edge)
+        edges.add((v, u))
+    # A matching is maximal if adding any new edge from G to it
+    # causes the resulting set to match some node twice.
+    # Be careful to check for adding selfloops
+    for u, v in G.edges:
+        if (u, v) not in edges:
+            # could add edge (u, v) to edges and have a bigger matching
+            if u not in nodes and v not in nodes and u != v:
+                return False
+    return True


 @nx._dispatchable
@@ -167,13 +237,30 @@ def is_perfect_matching(G, matching):
     True

     """
-    pass
-
-
-@not_implemented_for('multigraph')
-@not_implemented_for('directed')
-@nx._dispatchable(edge_attrs='weight')
-def min_weight_matching(G, weight='weight'):
+    if isinstance(matching, dict):
+        matching = matching_dict_to_set(matching)
+
+    nodes = set()
+    for edge in matching:
+        if len(edge) != 2:
+            raise nx.NetworkXError(f"matching has non-2-tuple edge {edge}")
+        u, v = edge
+        if u not in G or v not in G:
+            raise nx.NetworkXError(f"matching contains edge {edge} with node not in G")
+        if u == v:
+            return False
+        if not G.has_edge(u, v):
+            return False
+        if u in nodes or v in nodes:
+            return False
+        nodes.update(edge)
+    return len(nodes) == len(G)
+
+
+@not_implemented_for("multigraph")
+@not_implemented_for("directed")
+@nx._dispatchable(edge_attrs="weight")
+def min_weight_matching(G, weight="weight"):
     """Computing a minimum-weight maximal matching of G.

     Use the maximum-weight algorithm with edge weights subtracted
@@ -221,13 +308,20 @@ def min_weight_matching(G, weight='weight'):
     --------
     max_weight_matching
     """
-    pass
-
-
-@not_implemented_for('multigraph')
-@not_implemented_for('directed')
-@nx._dispatchable(edge_attrs='weight')
-def max_weight_matching(G, maxcardinality=False, weight='weight'):
+    if len(G.edges) == 0:
+        return max_weight_matching(G, maxcardinality=True, weight=weight)
+    G_edges = G.edges(data=weight, default=1)
+    max_weight = 1 + max(w for _, _, w in G_edges)
+    InvG = nx.Graph()
+    edges = ((u, v, max_weight - w) for u, v, w in G_edges)
+    InvG.add_weighted_edges_from(edges, weight=weight)
+    return max_weight_matching(InvG, maxcardinality=True, weight=weight)
+
+
+@not_implemented_for("multigraph")
+@not_implemented_for("directed")
+@nx._dispatchable(edge_attrs="weight")
+def max_weight_matching(G, maxcardinality=False, weight="weight"):
     """Compute a maximum-weighted matching of G.

     A matching is a subset of edges in which no node occurs more than once.
@@ -286,4 +380,772 @@ def max_weight_matching(G, maxcardinality=False, weight='weight'):
     .. [1] "Efficient Algorithms for Finding Maximum Matching in Graphs",
        Zvi Galil, ACM Computing Surveys, 1986.
     """
-    pass
+    #
+    # The algorithm is taken from "Efficient Algorithms for Finding Maximum
+    # Matching in Graphs" by Zvi Galil, ACM Computing Surveys, 1986.
+    # It is based on the "blossom" method for finding augmenting paths and
+    # the "primal-dual" method for finding a matching of maximum weight, both
+    # methods invented by Jack Edmonds.
+    #
+    # A C program for maximum weight matching by Ed Rothberg was used
+    # extensively to validate this new code.
+    #
+    # Many terms used in the code comments are explained in the paper
+    # by Galil. You will probably need the paper to make sense of this code.
+    #
+
+    class NoNode:
+        """Dummy value which is different from any node."""
+
+    class Blossom:
+        """Representation of a non-trivial blossom or sub-blossom."""
+
+        __slots__ = ["childs", "edges", "mybestedges"]
+
+        # b.childs is an ordered list of b's sub-blossoms, starting with
+        # the base and going round the blossom.
+
+        # b.edges is the list of b's connecting edges, such that
+        # b.edges[i] = (v, w) where v is a vertex in b.childs[i]
+        # and w is a vertex in b.childs[wrap(i+1)].
+
+        # If b is a top-level S-blossom,
+        # b.mybestedges is a list of least-slack edges to neighboring
+        # S-blossoms, or None if no such list has been computed yet.
+        # This is used for efficient computation of delta3.
+
+        # Generate the blossom's leaf vertices.
+        def leaves(self):
+            stack = [*self.childs]
+            while stack:
+                t = stack.pop()
+                if isinstance(t, Blossom):
+                    stack.extend(t.childs)
+                else:
+                    yield t
+
+    # Get a list of vertices.
+    gnodes = list(G)
+    if not gnodes:
+        return set()  # don't bother with empty graphs
+
+    # Find the maximum edge weight.
+    maxweight = 0
+    allinteger = True
+    for i, j, d in G.edges(data=True):
+        wt = d.get(weight, 1)
+        if i != j and wt > maxweight:
+            maxweight = wt
+        allinteger = allinteger and (str(type(wt)).split("'")[1] in ("int", "long"))
+
+    # If v is a matched vertex, mate[v] is its partner vertex.
+    # If v is a single vertex, v does not occur as a key in mate.
+    # Initially all vertices are single; updated during augmentation.
+    mate = {}
+
+    # If b is a top-level blossom,
+    # label.get(b) is None if b is unlabeled (free),
+    #                 1 if b is an S-blossom,
+    #                 2 if b is a T-blossom.
+    # The label of a vertex is found by looking at the label of its top-level
+    # containing blossom.
+    # If v is a vertex inside a T-blossom, label[v] is 2 iff v is reachable
+    # from an S-vertex outside the blossom.
+    # Labels are assigned during a stage and reset after each augmentation.
+    label = {}
+
+    # If b is a labeled top-level blossom,
+    # labeledge[b] = (v, w) is the edge through which b obtained its label
+    # such that w is a vertex in b, or None if b's base vertex is single.
+    # If w is a vertex inside a T-blossom and label[w] == 2,
+    # labeledge[w] = (v, w) is an edge through which w is reachable from
+    # outside the blossom.
+    labeledge = {}
+
+    # If v is a vertex, inblossom[v] is the top-level blossom to which v
+    # belongs.
+    # If v is a top-level vertex, inblossom[v] == v since v is itself
+    # a (trivial) top-level blossom.
+    # Initially all vertices are top-level trivial blossoms.
+    inblossom = dict(zip(gnodes, gnodes))
+
+    # If b is a sub-blossom,
+    # blossomparent[b] is its immediate parent (sub-)blossom.
+    # If b is a top-level blossom, blossomparent[b] is None.
+    blossomparent = dict(zip(gnodes, repeat(None)))
+
+    # If b is a (sub-)blossom,
+    # blossombase[b] is its base VERTEX (i.e. recursive sub-blossom).
+    blossombase = dict(zip(gnodes, gnodes))
+
+    # If w is a free vertex (or an unreached vertex inside a T-blossom),
+    # bestedge[w] = (v, w) is the least-slack edge from an S-vertex,
+    # or None if there is no such edge.
+    # If b is a (possibly trivial) top-level S-blossom,
+    # bestedge[b] = (v, w) is the least-slack edge to a different S-blossom
+    # (v inside b), or None if there is no such edge.
+    # This is used for efficient computation of delta2 and delta3.
+    bestedge = {}
+
+    # If v is a vertex,
+    # dualvar[v] = 2 * u(v) where u(v) is the v's variable in the dual
+    # optimization problem (if all edge weights are integers, multiplication
+    # by two ensures that all values remain integers throughout the algorithm).
+    # Initially, u(v) = maxweight / 2.
+    dualvar = dict(zip(gnodes, repeat(maxweight)))
+
+    # If b is a non-trivial blossom,
+    # blossomdual[b] = z(b) where z(b) is b's variable in the dual
+    # optimization problem.
+    blossomdual = {}
+
+    # If (v, w) in allowedge or (w, v) in allowedg, then the edge
+    # (v, w) is known to have zero slack in the optimization problem;
+    # otherwise the edge may or may not have zero slack.
+    allowedge = {}
+
+    # Queue of newly discovered S-vertices.
+    queue = []
+
+    # Return 2 * slack of edge (v, w) (does not work inside blossoms).
+    def slack(v, w):
+        return dualvar[v] + dualvar[w] - 2 * G[v][w].get(weight, 1)
+
+    # Assign label t to the top-level blossom containing vertex w,
+    # coming through an edge from vertex v.
+    def assignLabel(w, t, v):
+        b = inblossom[w]
+        assert label.get(w) is None and label.get(b) is None
+        label[w] = label[b] = t
+        if v is not None:
+            labeledge[w] = labeledge[b] = (v, w)
+        else:
+            labeledge[w] = labeledge[b] = None
+        bestedge[w] = bestedge[b] = None
+        if t == 1:
+            # b became an S-vertex/blossom; add it(s vertices) to the queue.
+            if isinstance(b, Blossom):
+                queue.extend(b.leaves())
+            else:
+                queue.append(b)
+        elif t == 2:
+            # b became a T-vertex/blossom; assign label S to its mate.
+            # (If b is a non-trivial blossom, its base is the only vertex
+            # with an external mate.)
+            base = blossombase[b]
+            assignLabel(mate[base], 1, base)
+
+    # Trace back from vertices v and w to discover either a new blossom
+    # or an augmenting path. Return the base vertex of the new blossom,
+    # or NoNode if an augmenting path was found.
+    def scanBlossom(v, w):
+        # Trace back from v and w, placing breadcrumbs as we go.
+        path = []
+        base = NoNode
+        while v is not NoNode:
+            # Look for a breadcrumb in v's blossom or put a new breadcrumb.
+            b = inblossom[v]
+            if label[b] & 4:
+                base = blossombase[b]
+                break
+            assert label[b] == 1
+            path.append(b)
+            label[b] = 5
+            # Trace one step back.
+            if labeledge[b] is None:
+                # The base of blossom b is single; stop tracing this path.
+                assert blossombase[b] not in mate
+                v = NoNode
+            else:
+                assert labeledge[b][0] == mate[blossombase[b]]
+                v = labeledge[b][0]
+                b = inblossom[v]
+                assert label[b] == 2
+                # b is a T-blossom; trace one more step back.
+                v = labeledge[b][0]
+            # Swap v and w so that we alternate between both paths.
+            if w is not NoNode:
+                v, w = w, v
+        # Remove breadcrumbs.
+        for b in path:
+            label[b] = 1
+        # Return base vertex, if we found one.
+        return base
+
+    # Construct a new blossom with given base, through S-vertices v and w.
+    # Label the new blossom as S; set its dual variable to zero;
+    # relabel its T-vertices to S and add them to the queue.
+    def addBlossom(base, v, w):
+        bb = inblossom[base]
+        bv = inblossom[v]
+        bw = inblossom[w]
+        # Create blossom.
+        b = Blossom()
+        blossombase[b] = base
+        blossomparent[b] = None
+        blossomparent[bb] = b
+        # Make list of sub-blossoms and their interconnecting edge endpoints.
+        b.childs = path = []
+        b.edges = edgs = [(v, w)]
+        # Trace back from v to base.
+        while bv != bb:
+            # Add bv to the new blossom.
+            blossomparent[bv] = b
+            path.append(bv)
+            edgs.append(labeledge[bv])
+            assert label[bv] == 2 or (
+                label[bv] == 1 and labeledge[bv][0] == mate[blossombase[bv]]
+            )
+            # Trace one step back.
+            v = labeledge[bv][0]
+            bv = inblossom[v]
+        # Add base sub-blossom; reverse lists.
+        path.append(bb)
+        path.reverse()
+        edgs.reverse()
+        # Trace back from w to base.
+        while bw != bb:
+            # Add bw to the new blossom.
+            blossomparent[bw] = b
+            path.append(bw)
+            edgs.append((labeledge[bw][1], labeledge[bw][0]))
+            assert label[bw] == 2 or (
+                label[bw] == 1 and labeledge[bw][0] == mate[blossombase[bw]]
+            )
+            # Trace one step back.
+            w = labeledge[bw][0]
+            bw = inblossom[w]
+        # Set label to S.
+        assert label[bb] == 1
+        label[b] = 1
+        labeledge[b] = labeledge[bb]
+        # Set dual variable to zero.
+        blossomdual[b] = 0
+        # Relabel vertices.
+        for v in b.leaves():
+            if label[inblossom[v]] == 2:
+                # This T-vertex now turns into an S-vertex because it becomes
+                # part of an S-blossom; add it to the queue.
+                queue.append(v)
+            inblossom[v] = b
+        # Compute b.mybestedges.
+        bestedgeto = {}
+        for bv in path:
+            if isinstance(bv, Blossom):
+                if bv.mybestedges is not None:
+                    # Walk this subblossom's least-slack edges.
+                    nblist = bv.mybestedges
+                    # The sub-blossom won't need this data again.
+                    bv.mybestedges = None
+                else:
+                    # This subblossom does not have a list of least-slack
+                    # edges; get the information from the vertices.
+                    nblist = [
+                        (v, w) for v in bv.leaves() for w in G.neighbors(v) if v != w
+                    ]
+            else:
+                nblist = [(bv, w) for w in G.neighbors(bv) if bv != w]
+            for k in nblist:
+                (i, j) = k
+                if inblossom[j] == b:
+                    i, j = j, i
+                bj = inblossom[j]
+                if (
+                    bj != b
+                    and label.get(bj) == 1
+                    and ((bj not in bestedgeto) or slack(i, j) < slack(*bestedgeto[bj]))
+                ):
+                    bestedgeto[bj] = k
+            # Forget about least-slack edge of the subblossom.
+            bestedge[bv] = None
+        b.mybestedges = list(bestedgeto.values())
+        # Select bestedge[b].
+        mybestedge = None
+        bestedge[b] = None
+        for k in b.mybestedges:
+            kslack = slack(*k)
+            if mybestedge is None or kslack < mybestslack:
+                mybestedge = k
+                mybestslack = kslack
+        bestedge[b] = mybestedge
+
+    # Expand the given top-level blossom.
+    def expandBlossom(b, endstage):
+        # This is an obnoxiously complicated recursive function for the sake of
+        # a stack-transformation.  So, we hack around the complexity by using
+        # a trampoline pattern.  By yielding the arguments to each recursive
+        # call, we keep the actual callstack flat.
+
+        def _recurse(b, endstage):
+            # Convert sub-blossoms into top-level blossoms.
+            for s in b.childs:
+                blossomparent[s] = None
+                if isinstance(s, Blossom):
+                    if endstage and blossomdual[s] == 0:
+                        # Recursively expand this sub-blossom.
+                        yield s
+                    else:
+                        for v in s.leaves():
+                            inblossom[v] = s
+                else:
+                    inblossom[s] = s
+            # If we expand a T-blossom during a stage, its sub-blossoms must be
+            # relabeled.
+            if (not endstage) and label.get(b) == 2:
+                # Start at the sub-blossom through which the expanding
+                # blossom obtained its label, and relabel sub-blossoms untili
+                # we reach the base.
+                # Figure out through which sub-blossom the expanding blossom
+                # obtained its label initially.
+                entrychild = inblossom[labeledge[b][1]]
+                # Decide in which direction we will go round the blossom.
+                j = b.childs.index(entrychild)
+                if j & 1:
+                    # Start index is odd; go forward and wrap.
+                    j -= len(b.childs)
+                    jstep = 1
+                else:
+                    # Start index is even; go backward.
+                    jstep = -1
+                # Move along the blossom until we get to the base.
+                v, w = labeledge[b]
+                while j != 0:
+                    # Relabel the T-sub-blossom.
+                    if jstep == 1:
+                        p, q = b.edges[j]
+                    else:
+                        q, p = b.edges[j - 1]
+                    label[w] = None
+                    label[q] = None
+                    assignLabel(w, 2, v)
+                    # Step to the next S-sub-blossom and note its forward edge.
+                    allowedge[(p, q)] = allowedge[(q, p)] = True
+                    j += jstep
+                    if jstep == 1:
+                        v, w = b.edges[j]
+                    else:
+                        w, v = b.edges[j - 1]
+                    # Step to the next T-sub-blossom.
+                    allowedge[(v, w)] = allowedge[(w, v)] = True
+                    j += jstep
+                # Relabel the base T-sub-blossom WITHOUT stepping through to
+                # its mate (so don't call assignLabel).
+                bw = b.childs[j]
+                label[w] = label[bw] = 2
+                labeledge[w] = labeledge[bw] = (v, w)
+                bestedge[bw] = None
+                # Continue along the blossom until we get back to entrychild.
+                j += jstep
+                while b.childs[j] != entrychild:
+                    # Examine the vertices of the sub-blossom to see whether
+                    # it is reachable from a neighboring S-vertex outside the
+                    # expanding blossom.
+                    bv = b.childs[j]
+                    if label.get(bv) == 1:
+                        # This sub-blossom just got label S through one of its
+                        # neighbors; leave it be.
+                        j += jstep
+                        continue
+                    if isinstance(bv, Blossom):
+                        for v in bv.leaves():
+                            if label.get(v):
+                                break
+                    else:
+                        v = bv
+                    # If the sub-blossom contains a reachable vertex, assign
+                    # label T to the sub-blossom.
+                    if label.get(v):
+                        assert label[v] == 2
+                        assert inblossom[v] == bv
+                        label[v] = None
+                        label[mate[blossombase[bv]]] = None
+                        assignLabel(v, 2, labeledge[v][0])
+                    j += jstep
+            # Remove the expanded blossom entirely.
+            label.pop(b, None)
+            labeledge.pop(b, None)
+            bestedge.pop(b, None)
+            del blossomparent[b]
+            del blossombase[b]
+            del blossomdual[b]
+
+        # Now, we apply the trampoline pattern.  We simulate a recursive
+        # callstack by maintaining a stack of generators, each yielding a
+        # sequence of function arguments.  We grow the stack by appending a call
+        # to _recurse on each argument tuple, and shrink the stack whenever a
+        # generator is exhausted.
+        stack = [_recurse(b, endstage)]
+        while stack:
+            top = stack[-1]
+            for s in top:
+                stack.append(_recurse(s, endstage))
+                break
+            else:
+                stack.pop()
+
+    # Swap matched/unmatched edges over an alternating path through blossom b
+    # between vertex v and the base vertex. Keep blossom bookkeeping
+    # consistent.
+    def augmentBlossom(b, v):
+        # This is an obnoxiously complicated recursive function for the sake of
+        # a stack-transformation.  So, we hack around the complexity by using
+        # a trampoline pattern.  By yielding the arguments to each recursive
+        # call, we keep the actual callstack flat.
+
+        def _recurse(b, v):
+            # Bubble up through the blossom tree from vertex v to an immediate
+            # sub-blossom of b.
+            t = v
+            while blossomparent[t] != b:
+                t = blossomparent[t]
+            # Recursively deal with the first sub-blossom.
+            if isinstance(t, Blossom):
+                yield (t, v)
+            # Decide in which direction we will go round the blossom.
+            i = j = b.childs.index(t)
+            if i & 1:
+                # Start index is odd; go forward and wrap.
+                j -= len(b.childs)
+                jstep = 1
+            else:
+                # Start index is even; go backward.
+                jstep = -1
+            # Move along the blossom until we get to the base.
+            while j != 0:
+                # Step to the next sub-blossom and augment it recursively.
+                j += jstep
+                t = b.childs[j]
+                if jstep == 1:
+                    w, x = b.edges[j]
+                else:
+                    x, w = b.edges[j - 1]
+                if isinstance(t, Blossom):
+                    yield (t, w)
+                # Step to the next sub-blossom and augment it recursively.
+                j += jstep
+                t = b.childs[j]
+                if isinstance(t, Blossom):
+                    yield (t, x)
+                # Match the edge connecting those sub-blossoms.
+                mate[w] = x
+                mate[x] = w
+            # Rotate the list of sub-blossoms to put the new base at the front.
+            b.childs = b.childs[i:] + b.childs[:i]
+            b.edges = b.edges[i:] + b.edges[:i]
+            blossombase[b] = blossombase[b.childs[0]]
+            assert blossombase[b] == v
+
+        # Now, we apply the trampoline pattern.  We simulate a recursive
+        # callstack by maintaining a stack of generators, each yielding a
+        # sequence of function arguments.  We grow the stack by appending a call
+        # to _recurse on each argument tuple, and shrink the stack whenever a
+        # generator is exhausted.
+        stack = [_recurse(b, v)]
+        while stack:
+            top = stack[-1]
+            for args in top:
+                stack.append(_recurse(*args))
+                break
+            else:
+                stack.pop()
+
+    # Swap matched/unmatched edges over an alternating path between two
+    # single vertices. The augmenting path runs through S-vertices v and w.
+    def augmentMatching(v, w):
+        for s, j in ((v, w), (w, v)):
+            # Match vertex s to vertex j. Then trace back from s
+            # until we find a single vertex, swapping matched and unmatched
+            # edges as we go.
+            while 1:
+                bs = inblossom[s]
+                assert label[bs] == 1
+                assert (labeledge[bs] is None and blossombase[bs] not in mate) or (
+                    labeledge[bs][0] == mate[blossombase[bs]]
+                )
+                # Augment through the S-blossom from s to base.
+                if isinstance(bs, Blossom):
+                    augmentBlossom(bs, s)
+                # Update mate[s]
+                mate[s] = j
+                # Trace one step back.
+                if labeledge[bs] is None:
+                    # Reached single vertex; stop.
+                    break
+                t = labeledge[bs][0]
+                bt = inblossom[t]
+                assert label[bt] == 2
+                # Trace one more step back.
+                s, j = labeledge[bt]
+                # Augment through the T-blossom from j to base.
+                assert blossombase[bt] == t
+                if isinstance(bt, Blossom):
+                    augmentBlossom(bt, j)
+                # Update mate[j]
+                mate[j] = s
+
+    # Verify that the optimum solution has been reached.
+    def verifyOptimum():
+        if maxcardinality:
+            # Vertices may have negative dual;
+            # find a constant non-negative number to add to all vertex duals.
+            vdualoffset = max(0, -min(dualvar.values()))
+        else:
+            vdualoffset = 0
+        # 0. all dual variables are non-negative
+        assert min(dualvar.values()) + vdualoffset >= 0
+        assert len(blossomdual) == 0 or min(blossomdual.values()) >= 0
+        # 0. all edges have non-negative slack and
+        # 1. all matched edges have zero slack;
+        for i, j, d in G.edges(data=True):
+            wt = d.get(weight, 1)
+            if i == j:
+                continue  # ignore self-loops
+            s = dualvar[i] + dualvar[j] - 2 * wt
+            iblossoms = [i]
+            jblossoms = [j]
+            while blossomparent[iblossoms[-1]] is not None:
+                iblossoms.append(blossomparent[iblossoms[-1]])
+            while blossomparent[jblossoms[-1]] is not None:
+                jblossoms.append(blossomparent[jblossoms[-1]])
+            iblossoms.reverse()
+            jblossoms.reverse()
+            for bi, bj in zip(iblossoms, jblossoms):
+                if bi != bj:
+                    break
+                s += 2 * blossomdual[bi]
+            assert s >= 0
+            if mate.get(i) == j or mate.get(j) == i:
+                assert mate[i] == j and mate[j] == i
+                assert s == 0
+        # 2. all single vertices have zero dual value;
+        for v in gnodes:
+            assert (v in mate) or dualvar[v] + vdualoffset == 0
+        # 3. all blossoms with positive dual value are full.
+        for b in blossomdual:
+            if blossomdual[b] > 0:
+                assert len(b.edges) % 2 == 1
+                for i, j in b.edges[1::2]:
+                    assert mate[i] == j and mate[j] == i
+        # Ok.
+
+    # Main loop: continue until no further improvement is possible.
+    while 1:
+        # Each iteration of this loop is a "stage".
+        # A stage finds an augmenting path and uses that to improve
+        # the matching.
+
+        # Remove labels from top-level blossoms/vertices.
+        label.clear()
+        labeledge.clear()
+
+        # Forget all about least-slack edges.
+        bestedge.clear()
+        for b in blossomdual:
+            b.mybestedges = None
+
+        # Loss of labeling means that we can not be sure that currently
+        # allowable edges remain allowable throughout this stage.
+        allowedge.clear()
+
+        # Make queue empty.
+        queue[:] = []
+
+        # Label single blossoms/vertices with S and put them in the queue.
+        for v in gnodes:
+            if (v not in mate) and label.get(inblossom[v]) is None:
+                assignLabel(v, 1, None)
+
+        # Loop until we succeed in augmenting the matching.
+        augmented = 0
+        while 1:
+            # Each iteration of this loop is a "substage".
+            # A substage tries to find an augmenting path;
+            # if found, the path is used to improve the matching and
+            # the stage ends. If there is no augmenting path, the
+            # primal-dual method is used to pump some slack out of
+            # the dual variables.
+
+            # Continue labeling until all vertices which are reachable
+            # through an alternating path have got a label.
+            while queue and not augmented:
+                # Take an S vertex from the queue.
+                v = queue.pop()
+                assert label[inblossom[v]] == 1
+
+                # Scan its neighbors:
+                for w in G.neighbors(v):
+                    if w == v:
+                        continue  # ignore self-loops
+                    # w is a neighbor to v
+                    bv = inblossom[v]
+                    bw = inblossom[w]
+                    if bv == bw:
+                        # this edge is internal to a blossom; ignore it
+                        continue
+                    if (v, w) not in allowedge:
+                        kslack = slack(v, w)
+                        if kslack <= 0:
+                            # edge k has zero slack => it is allowable
+                            allowedge[(v, w)] = allowedge[(w, v)] = True
+                    if (v, w) in allowedge:
+                        if label.get(bw) is None:
+                            # (C1) w is a free vertex;
+                            # label w with T and label its mate with S (R12).
+                            assignLabel(w, 2, v)
+                        elif label.get(bw) == 1:
+                            # (C2) w is an S-vertex (not in the same blossom);
+                            # follow back-links to discover either an
+                            # augmenting path or a new blossom.
+                            base = scanBlossom(v, w)
+                            if base is not NoNode:
+                                # Found a new blossom; add it to the blossom
+                                # bookkeeping and turn it into an S-blossom.
+                                addBlossom(base, v, w)
+                            else:
+                                # Found an augmenting path; augment the
+                                # matching and end this stage.
+                                augmentMatching(v, w)
+                                augmented = 1
+                                break
+                        elif label.get(w) is None:
+                            # w is inside a T-blossom, but w itself has not
+                            # yet been reached from outside the blossom;
+                            # mark it as reached (we need this to relabel
+                            # during T-blossom expansion).
+                            assert label[bw] == 2
+                            label[w] = 2
+                            labeledge[w] = (v, w)
+                    elif label.get(bw) == 1:
+                        # keep track of the least-slack non-allowable edge to
+                        # a different S-blossom.
+                        if bestedge.get(bv) is None or kslack < slack(*bestedge[bv]):
+                            bestedge[bv] = (v, w)
+                    elif label.get(w) is None:
+                        # w is a free vertex (or an unreached vertex inside
+                        # a T-blossom) but we can not reach it yet;
+                        # keep track of the least-slack edge that reaches w.
+                        if bestedge.get(w) is None or kslack < slack(*bestedge[w]):
+                            bestedge[w] = (v, w)
+
+            if augmented:
+                break
+
+            # There is no augmenting path under these constraints;
+            # compute delta and reduce slack in the optimization problem.
+            # (Note that our vertex dual variables, edge slacks and delta's
+            # are pre-multiplied by two.)
+            deltatype = -1
+            delta = deltaedge = deltablossom = None
+
+            # Compute delta1: the minimum value of any vertex dual.
+            if not maxcardinality:
+                deltatype = 1
+                delta = min(dualvar.values())
+
+            # Compute delta2: the minimum slack on any edge between
+            # an S-vertex and a free vertex.
+            for v in G.nodes():
+                if label.get(inblossom[v]) is None and bestedge.get(v) is not None:
+                    d = slack(*bestedge[v])
+                    if deltatype == -1 or d < delta:
+                        delta = d
+                        deltatype = 2
+                        deltaedge = bestedge[v]
+
+            # Compute delta3: half the minimum slack on any edge between
+            # a pair of S-blossoms.
+            for b in blossomparent:
+                if (
+                    blossomparent[b] is None
+                    and label.get(b) == 1
+                    and bestedge.get(b) is not None
+                ):
+                    kslack = slack(*bestedge[b])
+                    if allinteger:
+                        assert (kslack % 2) == 0
+                        d = kslack // 2
+                    else:
+                        d = kslack / 2.0
+                    if deltatype == -1 or d < delta:
+                        delta = d
+                        deltatype = 3
+                        deltaedge = bestedge[b]
+
+            # Compute delta4: minimum z variable of any T-blossom.
+            for b in blossomdual:
+                if (
+                    blossomparent[b] is None
+                    and label.get(b) == 2
+                    and (deltatype == -1 or blossomdual[b] < delta)
+                ):
+                    delta = blossomdual[b]
+                    deltatype = 4
+                    deltablossom = b
+
+            if deltatype == -1:
+                # No further improvement possible; max-cardinality optimum
+                # reached. Do a final delta update to make the optimum
+                # verifiable.
+                assert maxcardinality
+                deltatype = 1
+                delta = max(0, min(dualvar.values()))
+
+            # Update dual variables according to delta.
+            for v in gnodes:
+                if label.get(inblossom[v]) == 1:
+                    # S-vertex: 2*u = 2*u - 2*delta
+                    dualvar[v] -= delta
+                elif label.get(inblossom[v]) == 2:
+                    # T-vertex: 2*u = 2*u + 2*delta
+                    dualvar[v] += delta
+            for b in blossomdual:
+                if blossomparent[b] is None:
+                    if label.get(b) == 1:
+                        # top-level S-blossom: z = z + 2*delta
+                        blossomdual[b] += delta
+                    elif label.get(b) == 2:
+                        # top-level T-blossom: z = z - 2*delta
+                        blossomdual[b] -= delta
+
+            # Take action at the point where minimum delta occurred.
+            if deltatype == 1:
+                # No further improvement possible; optimum reached.
+                break
+            elif deltatype == 2:
+                # Use the least-slack edge to continue the search.
+                (v, w) = deltaedge
+                assert label[inblossom[v]] == 1
+                allowedge[(v, w)] = allowedge[(w, v)] = True
+                queue.append(v)
+            elif deltatype == 3:
+                # Use the least-slack edge to continue the search.
+                (v, w) = deltaedge
+                allowedge[(v, w)] = allowedge[(w, v)] = True
+                assert label[inblossom[v]] == 1
+                queue.append(v)
+            elif deltatype == 4:
+                # Expand the least-z blossom.
+                expandBlossom(deltablossom, False)
+
+            # End of a this substage.
+
+        # Paranoia check that the matching is symmetric.
+        for v in mate:
+            assert mate[mate[v]] == v
+
+        # Stop when no more augmenting path can be found.
+        if not augmented:
+            break
+
+        # End of a stage; expand all S-blossoms which have zero dual.
+        for b in list(blossomdual.keys()):
+            if b not in blossomdual:
+                continue  # already expanded
+            if blossomparent[b] is None and label.get(b) == 1 and blossomdual[b] == 0:
+                expandBlossom(b, True)
+
+    # Verify that we reached the optimum solution (only for integer weights).
+    if allinteger:
+        verifyOptimum()
+
+    return matching_dict_to_set(mate)
diff --git a/networkx/algorithms/minors/contraction.py b/networkx/algorithms/minors/contraction.py
index 0af897e59..9f4d89fa8 100644
--- a/networkx/algorithms/minors/contraction.py
+++ b/networkx/algorithms/minors/contraction.py
@@ -1,11 +1,19 @@
 """Provides functions for computing minors of a graph."""
 from itertools import chain, combinations, permutations, product
+
 import networkx as nx
 from networkx import density
 from networkx.exception import NetworkXException
 from networkx.utils import arbitrary_element
-__all__ = ['contracted_edge', 'contracted_nodes', 'equivalence_classes',
-    'identified_nodes', 'quotient_graph']
+
+__all__ = [
+    "contracted_edge",
+    "contracted_nodes",
+    "equivalence_classes",
+    "identified_nodes",
+    "quotient_graph",
+]
+
 chaini = chain.from_iterable


@@ -65,12 +73,39 @@ def equivalence_classes(iterable, relation):
     >>> equivalence_classes(X, mod3)  # doctest: +SKIP
     {frozenset({1, 4, 7}), frozenset({8, 2, 5}), frozenset({0, 9, 3, 6})}
     """
-    pass
-
-
-@nx._dispatchable(edge_attrs='weight', returns_graph=True)
-def quotient_graph(G, partition, edge_relation=None, node_data=None,
-    edge_data=None, weight='weight', relabel=False, create_using=None):
+    # For simplicity of implementation, we initialize the return value as a
+    # list of lists, then convert it to a set of sets at the end of the
+    # function.
+    blocks = []
+    # Determine the equivalence class for each element of the iterable.
+    for y in iterable:
+        # Each element y must be in *exactly one* equivalence class.
+        #
+        # Each block is guaranteed to be non-empty
+        for block in blocks:
+            x = arbitrary_element(block)
+            if relation(x, y):
+                block.append(y)
+                break
+        else:
+            # If the element y is not part of any known equivalence class, it
+            # must be in its own, so we create a new singleton equivalence
+            # class for it.
+            blocks.append([y])
+    return {frozenset(block) for block in blocks}
+
+
+@nx._dispatchable(edge_attrs="weight", returns_graph=True)
+def quotient_graph(
+    G,
+    partition,
+    edge_relation=None,
+    node_data=None,
+    edge_data=None,
+    weight="weight",
+    relabel=False,
+    create_using=None,
+):
     """Returns the quotient graph of `G` under the specified equivalence
     relation on nodes.

@@ -261,17 +296,137 @@ def quotient_graph(G, partition, edge_relation=None, node_data=None,
            Cambridge University Press, 2004.

     """
-    pass
-
-
-def _quotient_graph(G, partition, edge_relation, node_data, edge_data,
-    weight, relabel, create_using):
+    # If the user provided an equivalence relation as a function to compute
+    # the blocks of the partition on the nodes of G induced by the
+    # equivalence relation.
+    if callable(partition):
+        # equivalence_classes always return partition of whole G.
+        partition = equivalence_classes(G, partition)
+        if not nx.community.is_partition(G, partition):
+            raise nx.NetworkXException(
+                "Input `partition` is not an equivalence relation for nodes of G"
+            )
+        return _quotient_graph(
+            G,
+            partition,
+            edge_relation,
+            node_data,
+            edge_data,
+            weight,
+            relabel,
+            create_using,
+        )
+
+    # If the partition is a dict, it is assumed to be one where the keys are
+    # user-defined block labels, and values are block lists, tuples or sets.
+    if isinstance(partition, dict):
+        partition = list(partition.values())
+
+    # If the user provided partition as a collection of sets. Then we
+    # need to check if partition covers all of G nodes. If the answer
+    # is 'No' then we need to prepare suitable subgraph view.
+    partition_nodes = set().union(*partition)
+    if len(partition_nodes) != len(G):
+        G = G.subgraph(partition_nodes)
+    # Each node in the graph/subgraph must be in exactly one block.
+    if not nx.community.is_partition(G, partition):
+        raise NetworkXException("each node must be in exactly one part of `partition`")
+    return _quotient_graph(
+        G,
+        partition,
+        edge_relation,
+        node_data,
+        edge_data,
+        weight,
+        relabel,
+        create_using,
+    )
+
+
+def _quotient_graph(
+    G, partition, edge_relation, node_data, edge_data, weight, relabel, create_using
+):
     """Construct the quotient graph assuming input has been checked"""
-    pass
-
-
-@nx._dispatchable(preserve_all_attrs=True, mutates_input={'not copy': 4},
-    returns_graph=True)
+    if create_using is None:
+        H = G.__class__()
+    else:
+        H = nx.empty_graph(0, create_using)
+    # By default set some basic information about the subgraph that each block
+    # represents on the nodes in the quotient graph.
+    if node_data is None:
+
+        def node_data(b):
+            S = G.subgraph(b)
+            return {
+                "graph": S,
+                "nnodes": len(S),
+                "nedges": S.number_of_edges(),
+                "density": density(S),
+            }
+
+    # Each block of the partition becomes a node in the quotient graph.
+    partition = [frozenset(b) for b in partition]
+    H.add_nodes_from((b, node_data(b)) for b in partition)
+    # By default, the edge relation is the relation defined as follows. B is
+    # adjacent to C if a node in B is adjacent to a node in C, according to the
+    # edge set of G.
+    #
+    # This is not a particularly efficient implementation of this relation:
+    # there are O(n^2) pairs to check and each check may require O(log n) time
+    # (to check set membership). This can certainly be parallelized.
+    if edge_relation is None:
+
+        def edge_relation(b, c):
+            return any(v in G[u] for u, v in product(b, c))
+
+    # By default, sum the weights of the edges joining pairs of nodes across
+    # blocks to get the weight of the edge joining those two blocks.
+    if edge_data is None:
+
+        def edge_data(b, c):
+            edgedata = (
+                d
+                for u, v, d in G.edges(b | c, data=True)
+                if (u in b and v in c) or (u in c and v in b)
+            )
+            return {"weight": sum(d.get(weight, 1) for d in edgedata)}
+
+    block_pairs = permutations(H, 2) if H.is_directed() else combinations(H, 2)
+    # In a multigraph, add one edge in the quotient graph for each edge
+    # in the original graph.
+    if H.is_multigraph():
+        edges = chaini(
+            (
+                (b, c, G.get_edge_data(u, v, default={}))
+                for u, v in product(b, c)
+                if v in G[u]
+            )
+            for b, c in block_pairs
+            if edge_relation(b, c)
+        )
+    # In a simple graph, apply the edge data function to each pair of
+    # blocks to determine the edge data attributes to apply to each edge
+    # in the quotient graph.
+    else:
+        edges = (
+            (b, c, edge_data(b, c)) for (b, c) in block_pairs if edge_relation(b, c)
+        )
+    H.add_edges_from(edges)
+    # If requested by the user, relabel the nodes to be integers,
+    # numbered in increasing order from zero in the same order as the
+    # iteration order of `partition`.
+    if relabel:
+        # Can't use nx.convert_node_labels_to_integers() here since we
+        # want the order of iteration to be the same for backward
+        # compatibility with the nx.blockmodel() function.
+        labels = {b: i for i, b in enumerate(partition)}
+        H = nx.relabel_nodes(H, labels)
+    return H
+
+
+@nx._dispatchable(
+    preserve_all_attrs=True, mutates_input={"not copy": 4}, returns_graph=True
+)
 def contracted_nodes(G, u, v, self_loops=True, copy=True):
     """Returns the graph that results from contracting `u` and `v`.

@@ -361,14 +516,54 @@ def contracted_nodes(G, u, v, self_loops=True, copy=True):
     quotient_graph

     """
-    pass
+    # Copying has significant overhead and can be disabled if needed
+    if copy:
+        H = G.copy()
+    else:
+        H = G
+
+    # edge code uses G.edges(v) instead of G.adj[v] to handle multiedges
+    if H.is_directed():
+        edges_to_remap = chain(G.in_edges(v, data=True), G.out_edges(v, data=True))
+    else:
+        edges_to_remap = G.edges(v, data=True)
+
+    # If the H=G, the generators change as H changes
+    # This makes the edges_to_remap independent of H
+    if not copy:
+        edges_to_remap = list(edges_to_remap)
+
+    v_data = H.nodes[v]
+    H.remove_node(v)
+
+    for prev_w, prev_x, d in edges_to_remap:
+        w = prev_w if prev_w != v else u
+        x = prev_x if prev_x != v else u
+
+        if ({prev_w, prev_x} == {u, v}) and not self_loops:
+            continue
+
+        if not H.has_edge(w, x) or G.is_multigraph():
+            H.add_edge(w, x, **d)
+        else:
+            if "contraction" in H.edges[(w, x)]:
+                H.edges[(w, x)]["contraction"][(prev_w, prev_x)] = d
+            else:
+                H.edges[(w, x)]["contraction"] = {(prev_w, prev_x): d}
+
+    if "contraction" in H.nodes[u]:
+        H.nodes[u]["contraction"][v] = v_data
+    else:
+        H.nodes[u]["contraction"] = {v: v_data}
+    return H


 identified_nodes = contracted_nodes


-@nx._dispatchable(preserve_edge_attrs=True, mutates_input={'not copy': 3},
-    returns_graph=True)
+@nx._dispatchable(
+    preserve_edge_attrs=True, mutates_input={"not copy": 3}, returns_graph=True
+)
 def contracted_edge(G, edge, self_loops=True, copy=True):
     """Returns the graph that results from contracting the specified edge.

@@ -432,4 +627,7 @@ def contracted_edge(G, edge, self_loops=True, copy=True):
     quotient_graph

     """
-    pass
+    u, v = edge[:2]
+    if not G.has_edge(u, v):
+        raise ValueError(f"Edge {edge} does not exist in graph G; cannot contract it")
+    return contracted_nodes(G, u, v, self_loops=self_loops, copy=copy)
diff --git a/networkx/algorithms/mis.py b/networkx/algorithms/mis.py
index ad5d704ff..fc70514d9 100644
--- a/networkx/algorithms/mis.py
+++ b/networkx/algorithms/mis.py
@@ -4,10 +4,11 @@ Algorithm to find a maximal (not maximum) independent set.
 """
 import networkx as nx
 from networkx.utils import not_implemented_for, py_random_state
-__all__ = ['maximal_independent_set']

+__all__ = ["maximal_independent_set"]

-@not_implemented_for('directed')
+
+@not_implemented_for("directed")
 @py_random_state(2)
 @nx._dispatchable
 def maximal_independent_set(G, nodes=None, seed=None):
@@ -58,4 +59,19 @@ def maximal_independent_set(G, nodes=None, seed=None):
     This algorithm does not solve the maximum independent set problem.

     """
-    pass
+    if not nodes:
+        nodes = {seed.choice(list(G))}
+    else:
+        nodes = set(nodes)
+    if not nodes.issubset(G):
+        raise nx.NetworkXUnfeasible(f"{nodes} is not a subset of the nodes of G")
+    neighbors = set.union(*[set(G.adj[v]) for v in nodes])
+    if set.intersection(neighbors, nodes):
+        raise nx.NetworkXUnfeasible(f"{nodes} is not an independent set of G")
+    indep_nodes = list(nodes)
+    available_nodes = set(G.nodes()).difference(neighbors.union(nodes))
+    while available_nodes:
+        node = seed.choice(list(available_nodes))
+        indep_nodes.append(node)
+        available_nodes.difference_update(list(G.adj[node]) + [node])
+    return indep_nodes
diff --git a/networkx/algorithms/moral.py b/networkx/algorithms/moral.py
index 12b42bcc6..e2acf80f6 100644
--- a/networkx/algorithms/moral.py
+++ b/networkx/algorithms/moral.py
@@ -1,14 +1,17 @@
-"""Function for computing the moral graph of a directed graph."""
+r"""Function for computing the moral graph of a directed graph."""
+
 import itertools
+
 import networkx as nx
 from networkx.utils import not_implemented_for
-__all__ = ['moral_graph']
+
+__all__ = ["moral_graph"]


-@not_implemented_for('undirected')
+@not_implemented_for("undirected")
 @nx._dispatchable(returns_graph=True)
 def moral_graph(G):
-    """Return the Moral Graph
+    r"""Return the Moral Graph

     Returns the moralized graph of a given directed graph.

@@ -49,4 +52,8 @@ def moral_graph(G):
            In Proceedings of the Eleventh conference on Uncertainty
            in artificial intelligence (UAI'95)
     """
-    pass
+    H = G.to_undirected()
+    for preds in G.pred.values():
+        predecessors_combinations = itertools.combinations(preds, r=2)
+        H.add_edges_from(predecessors_combinations)
+    return H
diff --git a/networkx/algorithms/node_classification.py b/networkx/algorithms/node_classification.py
index 2b44f241e..42e7e6ba2 100644
--- a/networkx/algorithms/node_classification.py
+++ b/networkx/algorithms/node_classification.py
@@ -23,12 +23,13 @@ Semi-supervised learning using gaussian fields and harmonic functions.
 In ICML (Vol. 3, pp. 912-919).
 """
 import networkx as nx
-__all__ = ['harmonic_function', 'local_and_global_consistency']

+__all__ = ["harmonic_function", "local_and_global_consistency"]

-@nx.utils.not_implemented_for('directed')
-@nx._dispatchable(node_attrs='label_name')
-def harmonic_function(G, max_iter=30, label_name='label'):
+
+@nx.utils.not_implemented_for("directed")
+@nx._dispatchable(node_attrs="label_name")
+def harmonic_function(G, max_iter=30, label_name="label"):
     """Node classification by Harmonic function

     Function for computing Harmonic function algorithm by Zhu et al.
@@ -71,13 +72,41 @@ def harmonic_function(G, max_iter=30, label_name='label'):
     Semi-supervised learning using gaussian fields and harmonic functions.
     In ICML (Vol. 3, pp. 912-919).
     """
-    pass
+    import numpy as np
+    import scipy as sp
+
+    X = nx.to_scipy_sparse_array(G)  # adjacency matrix
+    labels, label_dict = _get_label_info(G, label_name)
+
+    if labels.shape[0] == 0:
+        raise nx.NetworkXError(
+            f"No node on the input graph is labeled by '{label_name}'."
+        )
+
+    n_samples = X.shape[0]
+    n_classes = label_dict.shape[0]
+    F = np.zeros((n_samples, n_classes))
+
+    # Build propagation matrix
+    degrees = X.sum(axis=0)
+    degrees[degrees == 0] = 1  # Avoid division by 0
+    # TODO: csr_array
+    D = sp.sparse.csr_array(sp.sparse.diags((1.0 / degrees), offsets=0))
+    P = (D @ X).tolil()
+    P[labels[:, 0]] = 0  # labels[:, 0] indicates IDs of labeled nodes
+    # Build base matrix
+    B = np.zeros((n_samples, n_classes))
+    B[labels[:, 0], labels[:, 1]] = 1
+
+    for _ in range(max_iter):
+        F = (P @ F) + B
+
+    return label_dict[np.argmax(F, axis=1)].tolist()


-@nx.utils.not_implemented_for('directed')
-@nx._dispatchable(node_attrs='label_name')
-def local_and_global_consistency(G, alpha=0.99, max_iter=30, label_name='label'
-    ):
+@nx.utils.not_implemented_for("directed")
+@nx._dispatchable(node_attrs="label_name")
+def local_and_global_consistency(G, alpha=0.99, max_iter=30, label_name="label"):
     """Node classification by Local and Global Consistency

     Function for computing Local and global consistency algorithm by Zhou et al.
@@ -122,7 +151,35 @@ def local_and_global_consistency(G, alpha=0.99, max_iter=30, label_name='label'
     Learning with local and global consistency.
     Advances in neural information processing systems, 16(16), 321-328.
     """
-    pass
+    import numpy as np
+    import scipy as sp
+
+    X = nx.to_scipy_sparse_array(G)  # adjacency matrix
+    labels, label_dict = _get_label_info(G, label_name)
+
+    if labels.shape[0] == 0:
+        raise nx.NetworkXError(
+            f"No node on the input graph is labeled by '{label_name}'."
+        )
+
+    n_samples = X.shape[0]
+    n_classes = label_dict.shape[0]
+    F = np.zeros((n_samples, n_classes))
+
+    # Build propagation matrix
+    degrees = X.sum(axis=0)
+    degrees[degrees == 0] = 1  # Avoid division by 0
+    # TODO: csr_array
+    D2 = np.sqrt(sp.sparse.csr_array(sp.sparse.diags((1.0 / degrees), offsets=0)))
+    P = alpha * ((D2 @ X) @ D2)
+    # Build base matrix
+    B = np.zeros((n_samples, n_classes))
+    B[labels[:, 0], labels[:, 1]] = 1 - alpha
+
+    for _ in range(max_iter):
+        F = (P @ F) + B
+
+    return label_dict[np.argmax(F, axis=1)].tolist()


 def _get_label_info(G, label_name):
@@ -142,4 +199,20 @@ def _get_label_info(G, label_name):
         Array of labels
         i-th element contains the label corresponding label ID `i`
     """
-    pass
+    import numpy as np
+
+    labels = []
+    label_to_id = {}
+    lid = 0
+    for i, n in enumerate(G.nodes(data=True)):
+        if label_name in n[1]:
+            label = n[1][label_name]
+            if label not in label_to_id:
+                label_to_id[label] = lid
+                lid += 1
+            labels.append([i, label_to_id[label]])
+    labels = np.array(labels)
+    label_dict = np.array(
+        [label for label, _ in sorted(label_to_id.items(), key=lambda x: x[1])]
+    )
+    return (labels, label_dict)
diff --git a/networkx/algorithms/non_randomness.py b/networkx/algorithms/non_randomness.py
index 5270ca312..85483d330 100644
--- a/networkx/algorithms/non_randomness.py
+++ b/networkx/algorithms/non_randomness.py
@@ -1,15 +1,18 @@
-""" Computation of graph non-randomness
+r""" Computation of graph non-randomness
 """
+
 import math
+
 import networkx as nx
 from networkx.utils import not_implemented_for
-__all__ = ['non_randomness']
+
+__all__ = ["non_randomness"]


-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
-@nx._dispatchable(edge_attrs='weight')
-def non_randomness(G, k=None, weight='weight'):
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
+@nx._dispatchable(edge_attrs="weight")
+def non_randomness(G, k=None, weight="weight"):
     """Compute the non-randomness of graph G.

     The first returned value nr is the sum of non-randomness values of all
@@ -69,4 +72,25 @@ def non_randomness(G, k=None, weight='weight'):
            On Randomness Measures for Social Networks,
            SIAM International Conference on Data Mining. 2009
     """
-    pass
+    import numpy as np
+
+    if not nx.is_connected(G):
+        raise nx.NetworkXException("Non connected graph.")
+    if len(list(nx.selfloop_edges(G))) > 0:
+        raise nx.NetworkXError("Graph must not contain self-loops")
+
+    if k is None:
+        k = len(tuple(nx.community.label_propagation_communities(G)))
+
+    # eq. 4.4
+    eigenvalues = np.linalg.eigvals(nx.to_numpy_array(G, weight=weight))
+    nr = float(np.real(np.sum(eigenvalues[:k])))
+
+    n = G.number_of_nodes()
+    m = G.number_of_edges()
+    p = (2 * k * m) / (n * (n - k))
+
+    # eq. 4.5
+    nr_rd = (nr - ((n - 2 * k) * p + k)) / math.sqrt(2 * k * p * (1 - p))
+
+    return nr, nr_rd
diff --git a/networkx/algorithms/operators/all.py b/networkx/algorithms/operators/all.py
index f28c3f056..ba1304b6c 100644
--- a/networkx/algorithms/operators/all.py
+++ b/networkx/algorithms/operators/all.py
@@ -1,13 +1,13 @@
 """Operations on many graphs.
 """
 from itertools import chain, repeat
+
 import networkx as nx
-__all__ = ['union_all', 'compose_all', 'disjoint_union_all', 'intersection_all'
-    ]
+
+__all__ = ["union_all", "compose_all", "disjoint_union_all", "intersection_all"]


-@nx._dispatchable(graphs='[graphs]', preserve_all_attrs=True, returns_graph
-    =True)
+@nx._dispatchable(graphs="[graphs]", preserve_all_attrs=True, returns_graph=True)
 def union_all(graphs, rename=()):
     """Returns the union of all graphs.

@@ -65,11 +65,52 @@ def union_all(graphs, rename=()):
     union
     disjoint_union_all
     """
-    pass
-
-
-@nx._dispatchable(graphs='[graphs]', preserve_all_attrs=True, returns_graph
-    =True)
+    R = None
+    seen_nodes = set()
+
+    # rename graph to obtain disjoint node labels
+    def add_prefix(graph, prefix):
+        if prefix is None:
+            return graph
+
+        def label(x):
+            return f"{prefix}{x}"
+
+        return nx.relabel_nodes(graph, label)
+
+    rename = chain(rename, repeat(None))
+    graphs = (add_prefix(G, name) for G, name in zip(graphs, rename))
+
+    for i, G in enumerate(graphs):
+        G_nodes_set = set(G.nodes)
+        if i == 0:
+            # Union is the same type as first graph
+            R = G.__class__()
+        elif G.is_directed() != R.is_directed():
+            raise nx.NetworkXError("All graphs must be directed or undirected.")
+        elif G.is_multigraph() != R.is_multigraph():
+            raise nx.NetworkXError("All graphs must be graphs or multigraphs.")
+        elif not seen_nodes.isdisjoint(G_nodes_set):
+            raise nx.NetworkXError(
+                "The node sets of the graphs are not disjoint.\n"
+                "Use `rename` to specify prefixes for the graphs or use\n"
+                "disjoint_union(G1, G2, ..., GN)."
+            )
+
+        seen_nodes |= G_nodes_set
+        R.graph.update(G.graph)
+        R.add_nodes_from(G.nodes(data=True))
+        R.add_edges_from(
+            G.edges(keys=True, data=True) if G.is_multigraph() else G.edges(data=True)
+        )
+
+    if R is None:
+        raise ValueError("cannot apply union_all to an empty list")
+
+    return R
+
+
+@nx._dispatchable(graphs="[graphs]", preserve_all_attrs=True, returns_graph=True)
 def disjoint_union_all(graphs):
     """Returns the disjoint union of all graphs.

@@ -111,11 +152,19 @@ def disjoint_union_all(graphs):
     If a graph attribute is present in multiple graphs, then the value
     from the last graph in the list with that attribute is used.
     """
-    pass

+    def yield_relabeled(graphs):
+        first_label = 0
+        for G in graphs:
+            yield nx.convert_node_labels_to_integers(G, first_label=first_label)
+            first_label += len(G)
+
+    R = union_all(yield_relabeled(graphs))
+
+    return R

-@nx._dispatchable(graphs='[graphs]', preserve_all_attrs=True, returns_graph
-    =True)
+
+@nx._dispatchable(graphs="[graphs]", preserve_all_attrs=True, returns_graph=True)
 def compose_all(graphs):
     """Returns the composition of all graphs.

@@ -157,10 +206,31 @@ def compose_all(graphs):
     If a graph attribute is present in multiple graphs, then the value
     from the last graph in the list with that attribute is used.
     """
-    pass
+    R = None
+
+    # add graph attributes, H attributes take precedent over G attributes
+    for i, G in enumerate(graphs):
+        if i == 0:
+            # create new graph
+            R = G.__class__()
+        elif G.is_directed() != R.is_directed():
+            raise nx.NetworkXError("All graphs must be directed or undirected.")
+        elif G.is_multigraph() != R.is_multigraph():
+            raise nx.NetworkXError("All graphs must be graphs or multigraphs.")
+
+        R.graph.update(G.graph)
+        R.add_nodes_from(G.nodes(data=True))
+        R.add_edges_from(
+            G.edges(keys=True, data=True) if G.is_multigraph() else G.edges(data=True)
+        )
+
+    if R is None:
+        raise ValueError("cannot apply compose_all to an empty list")
+
+    return R


-@nx._dispatchable(graphs='[graphs]', returns_graph=True)
+@nx._dispatchable(graphs="[graphs]", returns_graph=True)
 def intersection_all(graphs):
     """Returns a new graph that contains only the nodes and the edges that exist in
     all graphs.
@@ -219,4 +289,33 @@ def intersection_all(graphs):
     [(2, 3)]

     """
-    pass
+    R = None
+
+    for i, G in enumerate(graphs):
+        G_nodes_set = set(G.nodes)
+        G_edges_set = set(G.edges)
+        if not G.is_directed():
+            if G.is_multigraph():
+                G_edges_set.update((v, u, k) for u, v, k in list(G_edges_set))
+            else:
+                G_edges_set.update((v, u) for u, v in list(G_edges_set))
+        if i == 0:
+            # create new graph
+            R = G.__class__()
+            node_intersection = G_nodes_set
+            edge_intersection = G_edges_set
+        elif G.is_directed() != R.is_directed():
+            raise nx.NetworkXError("All graphs must be directed or undirected.")
+        elif G.is_multigraph() != R.is_multigraph():
+            raise nx.NetworkXError("All graphs must be graphs or multigraphs.")
+        else:
+            node_intersection &= G_nodes_set
+            edge_intersection &= G_edges_set
+
+    if R is None:
+        raise ValueError("cannot apply intersection_all to an empty list")
+
+    R.add_nodes_from(node_intersection)
+    R.add_edges_from(edge_intersection)
+
+    return R
diff --git a/networkx/algorithms/operators/binary.py b/networkx/algorithms/operators/binary.py
index 714600916..0ca3a7b6b 100644
--- a/networkx/algorithms/operators/binary.py
+++ b/networkx/algorithms/operators/binary.py
@@ -2,9 +2,17 @@
 Operations on graphs including union, intersection, difference.
 """
 import networkx as nx
-__all__ = ['union', 'compose', 'disjoint_union', 'intersection',
-    'difference', 'symmetric_difference', 'full_join']
-_G_H = {'G': 0, 'H': 1}
+
+__all__ = [
+    "union",
+    "compose",
+    "disjoint_union",
+    "intersection",
+    "difference",
+    "symmetric_difference",
+    "full_join",
+]
+_G_H = {"G": 0, "H": 1}


 @nx._dispatchable(graphs=_G_H, preserve_all_attrs=True, returns_graph=True)
@@ -60,7 +68,7 @@ def union(G, H, rename=()):


     """
-    pass
+    return nx.union_all([G, H], rename)


 @nx._dispatchable(graphs=_G_H, preserve_all_attrs=True, returns_graph=True)
@@ -114,7 +122,7 @@ def disjoint_union(G, H):
     >>> U.edges
     EdgeView([(0, 1), (0, 2), (1, 2), (3, 4), (4, 6), (5, 6)])
     """
-    pass
+    return nx.disjoint_union_all([G, H])


 @nx._dispatchable(graphs=_G_H, returns_graph=True)
@@ -159,7 +167,7 @@ def intersection(G, H):
     >>> R.edges
     EdgeView([(1, 2)])
     """
-    pass
+    return nx.intersection_all([G, H])


 @nx._dispatchable(graphs=_G_H, returns_graph=True)
@@ -199,7 +207,22 @@ def difference(G, H):
     >>> R.edges
     EdgeView([(0, 2), (1, 3)])
     """
-    pass
+    # create new graph
+    if not G.is_multigraph() == H.is_multigraph():
+        raise nx.NetworkXError("G and H must both be graphs or multigraphs.")
+    R = nx.create_empty_copy(G, with_data=False)
+
+    if set(G) != set(H):
+        raise nx.NetworkXError("Node sets of graphs not equal")
+
+    if G.is_multigraph():
+        edges = G.edges(keys=True)
+    else:
+        edges = G.edges()
+    for e in edges:
+        if not H.has_edge(*e):
+            R.add_edge(*e)
+    return R


 @nx._dispatchable(graphs=_G_H, returns_graph=True)
@@ -232,7 +255,37 @@ def symmetric_difference(G, H):
     >>> R.edges
     EdgeView([(0, 2), (0, 3), (1, 3)])
     """
-    pass
+    # create new graph
+    if not G.is_multigraph() == H.is_multigraph():
+        raise nx.NetworkXError("G and H must both be graphs or multigraphs.")
+    R = nx.create_empty_copy(G, with_data=False)
+
+    if set(G) != set(H):
+        raise nx.NetworkXError("Node sets of graphs not equal")
+
+    gnodes = set(G)  # set of nodes in G
+    hnodes = set(H)  # set of nodes in H
+    nodes = gnodes.symmetric_difference(hnodes)
+    R.add_nodes_from(nodes)
+
+    if G.is_multigraph():
+        edges = G.edges(keys=True)
+    else:
+        edges = G.edges()
+    # we could copy the data here but then this function doesn't
+    # match intersection and difference
+    for e in edges:
+        if not H.has_edge(*e):
+            R.add_edge(*e)
+
+    if H.is_multigraph():
+        edges = H.edges(keys=True)
+    else:
+        edges = H.edges()
+    for e in edges:
+        if not G.has_edge(*e):
+            R.add_edge(*e)
+    return R


 @nx._dispatchable(graphs=_G_H, preserve_all_attrs=True, returns_graph=True)
@@ -313,7 +366,7 @@ def compose(G, H):
     >>> print(GcomposeH.edges[(3, 0)]["weight"])
     100.0
     """
-    pass
+    return nx.compose_all([G, H])


 @nx._dispatchable(graphs=_G_H, preserve_all_attrs=True, returns_graph=True)
@@ -370,4 +423,26 @@ def full_join(G, H, rename=(None, None)):
     union
     disjoint_union
     """
-    pass
+    R = union(G, H, rename)
+
+    def add_prefix(graph, prefix):
+        if prefix is None:
+            return graph
+
+        def label(x):
+            return f"{prefix}{x}"
+
+        return nx.relabel_nodes(graph, label)
+
+    G = add_prefix(G, rename[0])
+    H = add_prefix(H, rename[1])
+
+    for i in G:
+        for j in H:
+            R.add_edge(i, j)
+    if R.is_directed():
+        for i in H:
+            for j in G:
+                R.add_edge(i, j)
+
+    return R
diff --git a/networkx/algorithms/operators/product.py b/networkx/algorithms/operators/product.py
index b3ac7e77f..dc3427004 100644
--- a/networkx/algorithms/operators/product.py
+++ b/networkx/algorithms/operators/product.py
@@ -2,20 +2,134 @@
 Graph products.
 """
 from itertools import product
+
 import networkx as nx
 from networkx.utils import not_implemented_for
-__all__ = ['tensor_product', 'cartesian_product', 'lexicographic_product',
-    'strong_product', 'power', 'rooted_product', 'corona_product',
-    'modular_product']
-_G_H = {'G': 0, 'H': 1}
+
+__all__ = [
+    "tensor_product",
+    "cartesian_product",
+    "lexicographic_product",
+    "strong_product",
+    "power",
+    "rooted_product",
+    "corona_product",
+    "modular_product",
+]
+_G_H = {"G": 0, "H": 1}
+
+
+def _dict_product(d1, d2):
+    return {k: (d1.get(k), d2.get(k)) for k in set(d1) | set(d2)}
+
+
+# Generators for producing graph products
+def _node_product(G, H):
+    for u, v in product(G, H):
+        yield ((u, v), _dict_product(G.nodes[u], H.nodes[v]))
+
+
+def _directed_edges_cross_edges(G, H):
+    if not G.is_multigraph() and not H.is_multigraph():
+        for u, v, c in G.edges(data=True):
+            for x, y, d in H.edges(data=True):
+                yield (u, x), (v, y), _dict_product(c, d)
+    if not G.is_multigraph() and H.is_multigraph():
+        for u, v, c in G.edges(data=True):
+            for x, y, k, d in H.edges(data=True, keys=True):
+                yield (u, x), (v, y), k, _dict_product(c, d)
+    if G.is_multigraph() and not H.is_multigraph():
+        for u, v, k, c in G.edges(data=True, keys=True):
+            for x, y, d in H.edges(data=True):
+                yield (u, x), (v, y), k, _dict_product(c, d)
+    if G.is_multigraph() and H.is_multigraph():
+        for u, v, j, c in G.edges(data=True, keys=True):
+            for x, y, k, d in H.edges(data=True, keys=True):
+                yield (u, x), (v, y), (j, k), _dict_product(c, d)
+
+
+def _undirected_edges_cross_edges(G, H):
+    if not G.is_multigraph() and not H.is_multigraph():
+        for u, v, c in G.edges(data=True):
+            for x, y, d in H.edges(data=True):
+                yield (v, x), (u, y), _dict_product(c, d)
+    if not G.is_multigraph() and H.is_multigraph():
+        for u, v, c in G.edges(data=True):
+            for x, y, k, d in H.edges(data=True, keys=True):
+                yield (v, x), (u, y), k, _dict_product(c, d)
+    if G.is_multigraph() and not H.is_multigraph():
+        for u, v, k, c in G.edges(data=True, keys=True):
+            for x, y, d in H.edges(data=True):
+                yield (v, x), (u, y), k, _dict_product(c, d)
+    if G.is_multigraph() and H.is_multigraph():
+        for u, v, j, c in G.edges(data=True, keys=True):
+            for x, y, k, d in H.edges(data=True, keys=True):
+                yield (v, x), (u, y), (j, k), _dict_product(c, d)
+
+
+def _edges_cross_nodes(G, H):
+    if G.is_multigraph():
+        for u, v, k, d in G.edges(data=True, keys=True):
+            for x in H:
+                yield (u, x), (v, x), k, d
+    else:
+        for u, v, d in G.edges(data=True):
+            for x in H:
+                if H.is_multigraph():
+                    yield (u, x), (v, x), None, d
+                else:
+                    yield (u, x), (v, x), d
+
+
+def _nodes_cross_edges(G, H):
+    if H.is_multigraph():
+        for x in G:
+            for u, v, k, d in H.edges(data=True, keys=True):
+                yield (x, u), (x, v), k, d
+    else:
+        for x in G:
+            for u, v, d in H.edges(data=True):
+                if G.is_multigraph():
+                    yield (x, u), (x, v), None, d
+                else:
+                    yield (x, u), (x, v), d
+
+
+def _edges_cross_nodes_and_nodes(G, H):
+    if G.is_multigraph():
+        for u, v, k, d in G.edges(data=True, keys=True):
+            for x in H:
+                for y in H:
+                    yield (u, x), (v, y), k, d
+    else:
+        for u, v, d in G.edges(data=True):
+            for x in H:
+                for y in H:
+                    if H.is_multigraph():
+                        yield (u, x), (v, y), None, d
+                    else:
+                        yield (u, x), (v, y), d
+
+
+def _init_product_graph(G, H):
+    if G.is_directed() != H.is_directed():
+        msg = "G and H must be both directed or both undirected"
+        raise nx.NetworkXError(msg)
+    if G.is_multigraph() or H.is_multigraph():
+        GH = nx.MultiGraph()
+    else:
+        GH = nx.Graph()
+    if G.is_directed():
+        GH = GH.to_directed()
+    return GH


 @nx._dispatchable(graphs=_G_H, preserve_node_attrs=True, returns_graph=True)
 def tensor_product(G, H):
-    """Returns the tensor product of G and H.
+    r"""Returns the tensor product of G and H.

     The tensor product $P$ of the graphs $G$ and $H$ has a node set that
-    is the Cartesian product of the node sets, $V(P)=V(G) \\times V(H)$.
+    is the Cartesian product of the node sets, $V(P)=V(G) \times V(H)$.
     $P$ has an edge $((u,v), (x,y))$ if and only if $(u,x)$ is an edge in $G$
     and $(v,y)$ is an edge in $H$.

@@ -58,15 +172,20 @@ def tensor_product(G, H):
     Edge attributes and edge keys (for multigraphs) are also copied to the
     new product graph
     """
-    pass
+    GH = _init_product_graph(G, H)
+    GH.add_nodes_from(_node_product(G, H))
+    GH.add_edges_from(_directed_edges_cross_edges(G, H))
+    if not GH.is_directed():
+        GH.add_edges_from(_undirected_edges_cross_edges(G, H))
+    return GH


 @nx._dispatchable(graphs=_G_H, preserve_node_attrs=True, returns_graph=True)
 def cartesian_product(G, H):
-    """Returns the Cartesian product of G and H.
+    r"""Returns the Cartesian product of G and H.

     The Cartesian product $P$ of the graphs $G$ and $H$ has a node set that
-    is the Cartesian product of the node sets, $V(P)=V(G) \\times V(H)$.
+    is the Cartesian product of the node sets, $V(P)=V(G) \times V(H)$.
     $P$ has an edge $((u,v),(x,y))$ if and only if either $u$ is equal to $x$
     and both $v$ and $y$ are adjacent in $H$ or if $v$ is equal to $y$ and
     both $u$ and $x$ are adjacent in $G$.
@@ -106,15 +225,19 @@ def cartesian_product(G, H):
     Edge attributes and edge keys (for multigraphs) are also copied to the
     new product graph
     """
-    pass
+    GH = _init_product_graph(G, H)
+    GH.add_nodes_from(_node_product(G, H))
+    GH.add_edges_from(_edges_cross_nodes(G, H))
+    GH.add_edges_from(_nodes_cross_edges(G, H))
+    return GH


 @nx._dispatchable(graphs=_G_H, preserve_node_attrs=True, returns_graph=True)
 def lexicographic_product(G, H):
-    """Returns the lexicographic product of G and H.
+    r"""Returns the lexicographic product of G and H.

     The lexicographical product $P$ of the graphs $G$ and $H$ has a node set
-    that is the Cartesian product of the node sets, $V(P)=V(G) \\times V(H)$.
+    that is the Cartesian product of the node sets, $V(P)=V(G) \times V(H)$.
     $P$ has an edge $((u,v), (x,y))$ if and only if $(u,v)$ is an edge in $G$
     or $u==v$ and $(x,y)$ is an edge in $H$.

@@ -153,15 +276,21 @@ def lexicographic_product(G, H):
     Edge attributes and edge keys (for multigraphs) are also copied to the
     new product graph
     """
-    pass
+    GH = _init_product_graph(G, H)
+    GH.add_nodes_from(_node_product(G, H))
+    # Edges in G regardless of H designation
+    GH.add_edges_from(_edges_cross_nodes_and_nodes(G, H))
+    # For each x in G, only if there is an edge in H
+    GH.add_edges_from(_nodes_cross_edges(G, H))
+    return GH


 @nx._dispatchable(graphs=_G_H, preserve_node_attrs=True, returns_graph=True)
 def strong_product(G, H):
-    """Returns the strong product of G and H.
+    r"""Returns the strong product of G and H.

     The strong product $P$ of the graphs $G$ and $H$ has a node set that
-    is the Cartesian product of the node sets, $V(P)=V(G) \\times V(H)$.
+    is the Cartesian product of the node sets, $V(P)=V(G) \times V(H)$.
     $P$ has an edge $((u,v), (x,y))$ if and only if
     $u==v$ and $(x,y)$ is an edge in $H$, or
     $x==y$ and $(u,v)$ is an edge in $G$, or
@@ -202,11 +331,18 @@ def strong_product(G, H):
     Edge attributes and edge keys (for multigraphs) are also copied to the
     new product graph
     """
-    pass
-
-
-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+    GH = _init_product_graph(G, H)
+    GH.add_nodes_from(_node_product(G, H))
+    GH.add_edges_from(_nodes_cross_edges(G, H))
+    GH.add_edges_from(_edges_cross_nodes(G, H))
+    GH.add_edges_from(_directed_edges_cross_edges(G, H))
+    if not GH.is_directed():
+        GH.add_edges_from(_undirected_edges_cross_edges(G, H))
+    return GH
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable(returns_graph=True)
 def power(G, k):
     """Returns the specified power of a graph.
@@ -270,10 +406,32 @@ def power(G, k):
     *Graph Theory* by Bondy and Murty [1]_.

     """
-    pass
-
-
-@not_implemented_for('multigraph')
+    if k <= 0:
+        raise ValueError("k must be a positive integer")
+    H = nx.Graph()
+    H.add_nodes_from(G)
+    # update BFS code to ignore self loops.
+    for n in G:
+        seen = {}  # level (number of hops) when seen in BFS
+        level = 1  # the current level
+        nextlevel = G[n]
+        while nextlevel:
+            thislevel = nextlevel  # advance to next level
+            nextlevel = {}  # and start a new list (fringe)
+            for v in thislevel:
+                if v == n:  # avoid self loop
+                    continue
+                if v not in seen:
+                    seen[v] = level  # set the level of vertex v
+                    nextlevel.update(G[v])  # add neighbors of v
+            if k <= level:
+                break
+            level += 1
+        H.add_edges_from((n, nbr) for nbr in seen)
+    return H
+
+
+@not_implemented_for("multigraph")
 @nx._dispatchable(graphs=_G_H, returns_graph=True)
 def rooted_product(G, H, root):
     """Return the rooted product of graphs G and H rooted at root in H.
@@ -300,16 +458,25 @@ def rooted_product(G, H, root):
     The nodes of R are the Cartesian Product of the nodes of G and H.
     The nodes of G and H are not relabeled.
     """
-    pass
+    if root not in H:
+        raise nx.NetworkXError("root must be a vertex in H")
+
+    R = nx.Graph()
+    R.add_nodes_from(product(G, H))

+    R.add_edges_from(((e[0], root), (e[1], root)) for e in G.edges())
+    R.add_edges_from(((g, e[0]), (g, e[1])) for g in G for e in H.edges())

-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+    return R
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable(graphs=_G_H, returns_graph=True)
 def corona_product(G, H):
-    """Returns the Corona product of G and H.
+    r"""Returns the Corona product of G and H.

-    The corona product of $G$ and $H$ is the graph $C = G \\circ H$ obtained by
+    The corona product of $G$ and $H$ is the graph $C = G \circ H$ obtained by
     taking one copy of $G$, called the center graph, $|V(G)|$ copies of $H$,
     called the outer graph, and making the $i$-th vertex of $G$ adjacent to
     every vertex of the $i$-th copy of $H$, where $1 ≤ i ≤ |V(G)|$.
@@ -349,16 +516,33 @@ def corona_product(G, H):
     [2] A. Faraji, "Corona Product in Graph Theory," Ali Faraji, May 11, 2021.
         https://blog.alifaraji.ir/math/graph-theory/corona-product.html (accessed Dec. 07, 2021).
     """
-    pass
+    GH = _init_product_graph(G, H)
+    GH.add_nodes_from(G)
+    GH.add_edges_from(G.edges)
+
+    for G_node in G:
+        # copy nodes of H in GH, call it H_i
+        GH.add_nodes_from((G_node, v) for v in H)
+
+        # copy edges of H_i based on H
+        GH.add_edges_from(
+            ((G_node, e0), (G_node, e1), d) for e0, e1, d in H.edges.data()
+        )
+
+        # creating new edges between H_i and a G's node
+        GH.add_edges_from((G_node, (G_node, H_node)) for H_node in H)
+
+    return GH


-@nx._dispatchable(graphs=_G_H, preserve_edge_attrs=True,
-    preserve_node_attrs=True, returns_graph=True)
+@nx._dispatchable(
+    graphs=_G_H, preserve_edge_attrs=True, preserve_node_attrs=True, returns_graph=True
+)
 def modular_product(G, H):
-    """Returns the Modular product of G and H.
+    r"""Returns the Modular product of G and H.

-    The modular product of `G` and `H` is the graph $M = G \\nabla H$,
-    consisting of the node set $V(M) = V(G) \\times V(H)$ that is the Cartesian
+    The modular product of `G` and `H` is the graph $M = G \nabla H$,
+    consisting of the node set $V(M) = V(G) \times V(H)$ that is the Cartesian
     product of the node sets of `G` and `H`. Further, M contains an edge ((u, v), (x, y)):

     - if u is adjacent to x in `G` and v is adjacent to y in `H`, or
@@ -418,4 +602,29 @@ def modular_product(G, H):
         entrance to the task of finding the nondensity of a graph." Proc. Third
         All-Union Conference on Problems of Theoretical Cybernetics. 1974.
     """
-    pass
+    if G.is_directed() or H.is_directed():
+        raise nx.NetworkXNotImplemented(
+            "Modular product not implemented for directed graphs"
+        )
+    if G.is_multigraph() or H.is_multigraph():
+        raise nx.NetworkXNotImplemented(
+            "Modular product not implemented for multigraphs"
+        )
+
+    GH = _init_product_graph(G, H)
+    GH.add_nodes_from(_node_product(G, H))
+
+    for u, v, c in G.edges(data=True):
+        for x, y, d in H.edges(data=True):
+            GH.add_edge((u, x), (v, y), **_dict_product(c, d))
+            GH.add_edge((v, x), (u, y), **_dict_product(c, d))
+
+    G = nx.complement(G)
+    H = nx.complement(H)
+
+    for u, v, c in G.edges(data=True):
+        for x, y, d in H.edges(data=True):
+            GH.add_edge((u, x), (v, y), **_dict_product(c, d))
+            GH.add_edge((v, x), (u, y), **_dict_product(c, d))
+
+    return GH
diff --git a/networkx/algorithms/operators/unary.py b/networkx/algorithms/operators/unary.py
index 9026b3bcb..64be249f6 100644
--- a/networkx/algorithms/operators/unary.py
+++ b/networkx/algorithms/operators/unary.py
@@ -1,6 +1,7 @@
 """Unary operations on graphs"""
 import networkx as nx
-__all__ = ['complement', 'reverse']
+
+__all__ = ["complement", "reverse"]


 @nx._dispatchable(returns_graph=True)
@@ -31,7 +32,12 @@ def complement(G):
     EdgeView([(1, 4), (1, 5), (2, 4), (2, 5), (4, 5)])

     """
-    pass
+    R = G.__class__()
+    R.add_nodes_from(G)
+    R.add_edges_from(
+        ((n, n2) for n, nbrs in G.adjacency() for n2 in G if n2 not in nbrs if n != n2)
+    )
+    return R


 @nx._dispatchable(returns_graph=True)
@@ -64,4 +70,7 @@ def reverse(G, copy=True):
     OutEdgeView([(2, 1), (3, 1), (3, 2), (4, 3), (5, 3)])

     """
-    pass
+    if not G.is_directed():
+        raise nx.NetworkXError("Cannot reverse an undirected graph.")
+    else:
+        return G.reverse(copy=copy)
diff --git a/networkx/algorithms/planar_drawing.py b/networkx/algorithms/planar_drawing.py
index 2439880d2..ea25809b6 100644
--- a/networkx/algorithms/planar_drawing.py
+++ b/networkx/algorithms/planar_drawing.py
@@ -1,6 +1,8 @@
 from collections import defaultdict
+
 import networkx as nx
-__all__ = ['combinatorial_embedding_to_pos']
+
+__all__ = ["combinatorial_embedding_to_pos"]


 def combinatorial_embedding_to_pos(embedding, fully_triangulate=False):
@@ -33,12 +35,106 @@ def combinatorial_embedding_to_pos(embedding, fully_triangulate=False):
         http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.51.6677

     """
-    pass
+    if len(embedding.nodes()) < 4:
+        # Position the node in any triangle
+        default_positions = [(0, 0), (2, 0), (1, 1)]
+        pos = {}
+        for i, v in enumerate(embedding.nodes()):
+            pos[v] = default_positions[i]
+        return pos
+
+    embedding, outer_face = triangulate_embedding(embedding, fully_triangulate)
+
+    # The following dicts map a node to another node
+    # If a node is not in the key set it means that the node is not yet in G_k
+    # If a node maps to None then the corresponding subtree does not exist
+    left_t_child = {}
+    right_t_child = {}
+
+    # The following dicts map a node to an integer
+    delta_x = {}
+    y_coordinate = {}
+
+    node_list = get_canonical_ordering(embedding, outer_face)
+
+    # 1. Phase: Compute relative positions
+
+    # Initialization
+    v1, v2, v3 = node_list[0][0], node_list[1][0], node_list[2][0]
+
+    delta_x[v1] = 0
+    y_coordinate[v1] = 0
+    right_t_child[v1] = v3
+    left_t_child[v1] = None
+
+    delta_x[v2] = 1
+    y_coordinate[v2] = 0
+    right_t_child[v2] = None
+    left_t_child[v2] = None
+
+    delta_x[v3] = 1
+    y_coordinate[v3] = 1
+    right_t_child[v3] = v2
+    left_t_child[v3] = None
+
+    for k in range(3, len(node_list)):
+        vk, contour_nbrs = node_list[k]
+        wp = contour_nbrs[0]
+        wp1 = contour_nbrs[1]
+        wq = contour_nbrs[-1]
+        wq1 = contour_nbrs[-2]
+        adds_mult_tri = len(contour_nbrs) > 2
+
+        # Stretch gaps:
+        delta_x[wp1] += 1
+        delta_x[wq] += 1
+
+        delta_x_wp_wq = sum(delta_x[x] for x in contour_nbrs[1:])
+
+        # Adjust offsets
+        delta_x[vk] = (-y_coordinate[wp] + delta_x_wp_wq + y_coordinate[wq]) // 2
+        y_coordinate[vk] = (y_coordinate[wp] + delta_x_wp_wq + y_coordinate[wq]) // 2
+        delta_x[wq] = delta_x_wp_wq - delta_x[vk]
+        if adds_mult_tri:
+            delta_x[wp1] -= delta_x[vk]
+
+        # Install v_k:
+        right_t_child[wp] = vk
+        right_t_child[vk] = wq
+        if adds_mult_tri:
+            left_t_child[vk] = wp1
+            right_t_child[wq1] = None
+        else:
+            left_t_child[vk] = None
+
+    # 2. Phase: Set absolute positions
+    pos = {}
+    pos[v1] = (0, y_coordinate[v1])
+    remaining_nodes = [v1]
+    while remaining_nodes:
+        parent_node = remaining_nodes.pop()
+
+        # Calculate position for left child
+        set_position(
+            parent_node, left_t_child, remaining_nodes, delta_x, y_coordinate, pos
+        )
+        # Calculate position for right child
+        set_position(
+            parent_node, right_t_child, remaining_nodes, delta_x, y_coordinate, pos
+        )
+    return pos


 def set_position(parent, tree, remaining_nodes, delta_x, y_coordinate, pos):
     """Helper method to calculate the absolute position of nodes."""
-    pass
+    child = tree[parent]
+    parent_node_x = pos[parent][0]
+    if child is not None:
+        # Calculate pos of child
+        child_x = parent_node_x + delta_x[child]
+        pos[child] = (child_x, y_coordinate[child])
+        # Remember to calculate pos of its children
+        remaining_nodes.append(child)


 def get_canonical_ordering(embedding, outer_face):
@@ -88,7 +184,124 @@ def get_canonical_ordering(embedding, outer_face):
         http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.51.6677

     """
-    pass
+    v1 = outer_face[0]
+    v2 = outer_face[1]
+    chords = defaultdict(int)  # Maps nodes to the number of their chords
+    marked_nodes = set()
+    ready_to_pick = set(outer_face)
+
+    # Initialize outer_face_ccw_nbr (do not include v1 -> v2)
+    outer_face_ccw_nbr = {}
+    prev_nbr = v2
+    for idx in range(2, len(outer_face)):
+        outer_face_ccw_nbr[prev_nbr] = outer_face[idx]
+        prev_nbr = outer_face[idx]
+    outer_face_ccw_nbr[prev_nbr] = v1
+
+    # Initialize outer_face_cw_nbr (do not include v2 -> v1)
+    outer_face_cw_nbr = {}
+    prev_nbr = v1
+    for idx in range(len(outer_face) - 1, 0, -1):
+        outer_face_cw_nbr[prev_nbr] = outer_face[idx]
+        prev_nbr = outer_face[idx]
+
+    def is_outer_face_nbr(x, y):
+        if x not in outer_face_ccw_nbr:
+            return outer_face_cw_nbr[x] == y
+        if x not in outer_face_cw_nbr:
+            return outer_face_ccw_nbr[x] == y
+        return outer_face_ccw_nbr[x] == y or outer_face_cw_nbr[x] == y
+
+    def is_on_outer_face(x):
+        return x not in marked_nodes and (x in outer_face_ccw_nbr or x == v1)
+
+    # Initialize number of chords
+    for v in outer_face:
+        for nbr in embedding.neighbors_cw_order(v):
+            if is_on_outer_face(nbr) and not is_outer_face_nbr(v, nbr):
+                chords[v] += 1
+                ready_to_pick.discard(v)
+
+    # Initialize canonical_ordering
+    canonical_ordering = [None] * len(embedding.nodes())
+    canonical_ordering[0] = (v1, [])
+    canonical_ordering[1] = (v2, [])
+    ready_to_pick.discard(v1)
+    ready_to_pick.discard(v2)
+
+    for k in range(len(embedding.nodes()) - 1, 1, -1):
+        # 1. Pick v from ready_to_pick
+        v = ready_to_pick.pop()
+        marked_nodes.add(v)
+
+        # v has exactly two neighbors on the outer face (wp and wq)
+        wp = None
+        wq = None
+        # Iterate over neighbors of v to find wp and wq
+        nbr_iterator = iter(embedding.neighbors_cw_order(v))
+        while True:
+            nbr = next(nbr_iterator)
+            if nbr in marked_nodes:
+                # Only consider nodes that are not yet removed
+                continue
+            if is_on_outer_face(nbr):
+                # nbr is either wp or wq
+                if nbr == v1:
+                    wp = v1
+                elif nbr == v2:
+                    wq = v2
+                else:
+                    if outer_face_cw_nbr[nbr] == v:
+                        # nbr is wp
+                        wp = nbr
+                    else:
+                        # nbr is wq
+                        wq = nbr
+            if wp is not None and wq is not None:
+                # We don't need to iterate any further
+                break
+
+        # Obtain new nodes on outer face (neighbors of v from wp to wq)
+        wp_wq = [wp]
+        nbr = wp
+        while nbr != wq:
+            # Get next neighbor (clockwise on the outer face)
+            next_nbr = embedding[v][nbr]["ccw"]
+            wp_wq.append(next_nbr)
+            # Update outer face
+            outer_face_cw_nbr[nbr] = next_nbr
+            outer_face_ccw_nbr[next_nbr] = nbr
+            # Move to next neighbor of v
+            nbr = next_nbr
+
+        if len(wp_wq) == 2:
+            # There was a chord between wp and wq, decrease number of chords
+            chords[wp] -= 1
+            if chords[wp] == 0:
+                ready_to_pick.add(wp)
+            chords[wq] -= 1
+            if chords[wq] == 0:
+                ready_to_pick.add(wq)
+        else:
+            # Update all chords involving w_(p+1) to w_(q-1)
+            new_face_nodes = set(wp_wq[1:-1])
+            for w in new_face_nodes:
+                # If we do not find a chord for w later we can pick it next
+                ready_to_pick.add(w)
+                for nbr in embedding.neighbors_cw_order(w):
+                    if is_on_outer_face(nbr) and not is_outer_face_nbr(w, nbr):
+                        # There is a chord involving w
+                        chords[w] += 1
+                        ready_to_pick.discard(w)
+                        if nbr not in new_face_nodes:
+                            # Also increase chord for the neighbor
+                            # We only iterator over new_face_nodes
+                            chords[nbr] += 1
+                            ready_to_pick.discard(nbr)
+        # Set the canonical ordering node and the list of contour neighbors
+        canonical_ordering[k] = (v, wp_wq)
+
+    return canonical_ordering


 def triangulate_face(embedding, v1, v2):
@@ -101,7 +314,23 @@ def triangulate_face(embedding, v1, v2):
         The half-edge (v1, v2) belongs to the face that gets triangulated
     v2 : node
     """
-    pass
+    _, v3 = embedding.next_face_half_edge(v1, v2)
+    _, v4 = embedding.next_face_half_edge(v2, v3)
+    if v1 in (v2, v3):
+        # The component has less than 3 nodes
+        return
+    while v1 != v4:
+        # Add edge if not already present on other side
+        if embedding.has_edge(v1, v3):
+            # Cannot triangulate at this position
+            v1, v2, v3 = v2, v3, v4
+        else:
+            # Add edge for triangulation
+            embedding.add_half_edge(v1, v3, ccw=v2)
+            embedding.add_half_edge(v3, v1, cw=v2)
+            v1, v2, v3 = v1, v3, v4
+        # Get next node
+        _, v4 = embedding.next_face_half_edge(v2, v3)


 def triangulate_embedding(embedding, fully_triangulate=True):
@@ -131,7 +360,46 @@ def triangulate_embedding(embedding, fully_triangulate=True):
         nodes.

     """
-    pass
+    if len(embedding.nodes) <= 1:
+        return embedding, list(embedding.nodes)
+    embedding = nx.PlanarEmbedding(embedding)
+
+    # Get a list with a node for each connected component
+    component_nodes = [next(iter(x)) for x in nx.connected_components(embedding)]
+
+    # 1. Make graph a single component (add edge between components)
+    for i in range(len(component_nodes) - 1):
+        v1 = component_nodes[i]
+        v2 = component_nodes[i + 1]
+        embedding.connect_components(v1, v2)
+
+    # 2. Calculate faces, ensure 2-connectedness and determine outer face
+    outer_face = []  # A face with the most number of nodes
+    face_list = []
+    edges_visited = set()  # Used to keep track of already visited faces
+    for v in embedding.nodes():
+        for w in embedding.neighbors_cw_order(v):
+            new_face = make_bi_connected(embedding, v, w, edges_visited)
+            if new_face:
+                # Found a new face
+                face_list.append(new_face)
+                if len(new_face) > len(outer_face):
+                    # The face is a candidate to be the outer face
+                    outer_face = new_face
+
+    # 3. Triangulate (internal) faces
+    for face in face_list:
+        if face is not outer_face or fully_triangulate:
+            # Triangulate this face
+            triangulate_face(embedding, face[0], face[1])
+
+    if fully_triangulate:
+        v1 = outer_face[0]
+        v2 = outer_face[1]
+        v3 = embedding[v2][v1]["ccw"]
+        outer_face = [v1, v2, v3]
+
+    return embedding, outer_face


 def make_bi_connected(embedding, starting_node, outgoing_node, edges_counted):
@@ -156,4 +424,41 @@ def make_bi_connected(embedding, starting_node, outgoing_node, edges_counted):
     face_nodes: list
         A list of all nodes at the border of this face
     """
-    pass
+
+    # Check if the face has already been calculated
+    if (starting_node, outgoing_node) in edges_counted:
+        # This face was already counted
+        return []
+    edges_counted.add((starting_node, outgoing_node))
+
+    # Add all edges to edges_counted which have this face to their left
+    v1 = starting_node
+    v2 = outgoing_node
+    face_list = [starting_node]  # List of nodes around the face
+    face_set = set(face_list)  # Set for faster queries
+    _, v3 = embedding.next_face_half_edge(v1, v2)
+
+    # Move the nodes v1, v2, v3 around the face:
+    while v2 != starting_node or v3 != outgoing_node:
+        if v1 == v2:
+            raise nx.NetworkXException("Invalid half-edge")
+        # cycle is not completed yet
+        if v2 in face_set:
+            # v2 encountered twice: Add edge to ensure 2-connectedness
+            embedding.add_half_edge(v1, v3, ccw=v2)
+            embedding.add_half_edge(v3, v1, cw=v2)
+            edges_counted.add((v2, v3))
+            edges_counted.add((v3, v1))
+            v2 = v1
+        else:
+            face_set.add(v2)
+            face_list.append(v2)
+
+        # set next edge
+        v1 = v2
+        v2, v3 = embedding.next_face_half_edge(v2, v3)
+
+        # remember that this edge has been counted
+        edges_counted.add((v1, v2))
+
+    return face_list
diff --git a/networkx/algorithms/planarity.py b/networkx/algorithms/planarity.py
index 5c1c4f969..17d0bec5a 100644
--- a/networkx/algorithms/planarity.py
+++ b/networkx/algorithms/planarity.py
@@ -1,6 +1,8 @@
 from collections import defaultdict
+
 import networkx as nx
-__all__ = ['check_planarity', 'is_planar', 'PlanarEmbedding']
+
+__all__ = ["check_planarity", "is_planar", "PlanarEmbedding"]


 @nx._dispatchable
@@ -32,7 +34,8 @@ def is_planar(G):
     check_planarity :
         Check if graph is planar *and* return a `PlanarEmbedding` instance if True.
     """
-    pass
+
+    return check_planarity(G, counterexample=False)[0]


 @nx._dispatchable(returns_graph=True)
@@ -97,13 +100,34 @@ def check_planarity(G, counterexample=False):
         Lecture Notes Series on Computing: Volume 12
         2004
     """
-    pass
+
+    planarity_state = LRPlanarity(G)
+    embedding = planarity_state.lr_planarity()
+    if embedding is None:
+        # graph is not planar
+        if counterexample:
+            return False, get_counterexample(G)
+        else:
+            return False, None
+    else:
+        # graph is planar
+        return True, embedding


 @nx._dispatchable(returns_graph=True)
 def check_planarity_recursive(G, counterexample=False):
     """Recursive version of :meth:`check_planarity`."""
-    pass
+    planarity_state = LRPlanarity(G)
+    embedding = planarity_state.lr_planarity_recursive()
+    if embedding is None:
+        # graph is not planar
+        if counterexample:
+            return False, get_counterexample_recursive(G)
+        else:
+            return False, None
+    else:
+        # graph is planar
+        return True, embedding


 @nx._dispatchable(returns_graph=True)
@@ -126,13 +150,46 @@ def get_counterexample(G):
         A Kuratowski subgraph that proves that G is not planar.

     """
-    pass
+    # copy graph
+    G = nx.Graph(G)
+
+    if check_planarity(G)[0]:
+        raise nx.NetworkXException("G is planar - no counter example.")
+
+    # find Kuratowski subgraph
+    subgraph = nx.Graph()
+    for u in G:
+        nbrs = list(G[u])
+        for v in nbrs:
+            G.remove_edge(u, v)
+            if check_planarity(G)[0]:
+                G.add_edge(u, v)
+                subgraph.add_edge(u, v)
+
+    return subgraph


 @nx._dispatchable(returns_graph=True)
 def get_counterexample_recursive(G):
     """Recursive version of :meth:`get_counterexample`."""
-    pass
+
+    # copy graph
+    G = nx.Graph(G)
+
+    if check_planarity_recursive(G)[0]:
+        raise nx.NetworkXException("G is planar - no counter example.")
+
+    # find Kuratowski subgraph
+    subgraph = nx.Graph()
+    for u in G:
+        nbrs = list(G[u])
+        for v in nbrs:
+            G.remove_edge(u, v)
+            if check_planarity_recursive(G)[0]:
+                G.add_edge(u, v)
+                subgraph.add_edge(u, v)
+
+    return subgraph


 class Interval:
@@ -149,15 +206,18 @@ class Interval:

     def empty(self):
         """Check if the interval is empty"""
-        pass
+        return self.low is None and self.high is None

     def copy(self):
         """Returns a copy of this interval"""
-        pass
+        return Interval(self.low, self.high)

     def conflicting(self, b, planarity_state):
         """Returns True if interval I conflicts with edge b"""
-        pass
+        return (
+            not self.empty()
+            and planarity_state.lowpt[self.high] > planarity_state.lowpt[b]
+        )


 class ConflictPair:
@@ -173,47 +233,90 @@ class ConflictPair:

     def swap(self):
         """Swap left and right intervals"""
-        pass
+        temp = self.left
+        self.left = self.right
+        self.right = temp

     def lowest(self, planarity_state):
         """Returns the lowest lowpoint of a conflict pair"""
-        pass
+        if self.left.empty():
+            return planarity_state.lowpt[self.right.low]
+        if self.right.empty():
+            return planarity_state.lowpt[self.left.low]
+        return min(
+            planarity_state.lowpt[self.left.low], planarity_state.lowpt[self.right.low]
+        )


 def top_of_stack(l):
     """Returns the element on top of the stack."""
-    pass
+    if not l:
+        return None
+    return l[-1]


 class LRPlanarity:
     """A class to maintain the state during planarity check."""
-    __slots__ = ['G', 'roots', 'height', 'lowpt', 'lowpt2', 'nesting_depth',
-        'parent_edge', 'DG', 'adjs', 'ordered_adjs', 'ref', 'side', 'S',
-        'stack_bottom', 'lowpt_edge', 'left_ref', 'right_ref', 'embedding']
+
+    __slots__ = [
+        "G",
+        "roots",
+        "height",
+        "lowpt",
+        "lowpt2",
+        "nesting_depth",
+        "parent_edge",
+        "DG",
+        "adjs",
+        "ordered_adjs",
+        "ref",
+        "side",
+        "S",
+        "stack_bottom",
+        "lowpt_edge",
+        "left_ref",
+        "right_ref",
+        "embedding",
+    ]

     def __init__(self, G):
+        # copy G without adding self-loops
         self.G = nx.Graph()
         self.G.add_nodes_from(G.nodes)
         for e in G.edges:
             if e[0] != e[1]:
                 self.G.add_edge(e[0], e[1])
+
         self.roots = []
-        self.height = defaultdict(lambda : None)
-        self.lowpt = {}
-        self.lowpt2 = {}
-        self.nesting_depth = {}
-        self.parent_edge = defaultdict(lambda : None)
+
+        # distance from tree root
+        self.height = defaultdict(lambda: None)
+
+        self.lowpt = {}  # height of lowest return point of an edge
+        self.lowpt2 = {}  # height of second lowest return point
+        self.nesting_depth = {}  # for nesting order
+
+        # None -> missing edge
+        self.parent_edge = defaultdict(lambda: None)
+
+        # oriented DFS graph
         self.DG = nx.DiGraph()
         self.DG.add_nodes_from(G.nodes)
+
         self.adjs = {}
         self.ordered_adjs = {}
-        self.ref = defaultdict(lambda : None)
-        self.side = defaultdict(lambda : 1)
+
+        self.ref = defaultdict(lambda: None)
+        self.side = defaultdict(lambda: 1)
+
+        # stack of conflict pairs
         self.S = []
         self.stack_bottom = {}
         self.lowpt_edge = {}
+
         self.left_ref = {}
         self.right_ref = {}
+
         self.embedding = PlanarEmbedding()

     def lr_planarity(self):
@@ -224,43 +327,442 @@ class LRPlanarity:
         embedding : dict
             If the graph is planar an embedding is returned. Otherwise None.
         """
-        pass
+        if self.G.order() > 2 and self.G.size() > 3 * self.G.order() - 6:
+            # graph is not planar
+            return None
+
+        # make adjacency lists for dfs
+        for v in self.G:
+            self.adjs[v] = list(self.G[v])
+
+        # orientation of the graph by depth first search traversal
+        for v in self.G:
+            if self.height[v] is None:
+                self.height[v] = 0
+                self.roots.append(v)
+                self.dfs_orientation(v)
+
+        # Free no longer used variables
+        self.G = None
+        self.lowpt2 = None
+        self.adjs = None
+
+        # testing
+        for v in self.DG:  # sort the adjacency lists by nesting depth
+            # note: this sorting leads to non linear time
+            self.ordered_adjs[v] = sorted(
+                self.DG[v], key=lambda x: self.nesting_depth[(v, x)]
+            )
+        for v in self.roots:
+            if not self.dfs_testing(v):
+                return None
+
+        # Free no longer used variables
+        self.height = None
+        self.lowpt = None
+        self.S = None
+        self.stack_bottom = None
+        self.lowpt_edge = None
+
+        for e in self.DG.edges:
+            self.nesting_depth[e] = self.sign(e) * self.nesting_depth[e]
+
+        self.embedding.add_nodes_from(self.DG.nodes)
+        for v in self.DG:
+            # sort the adjacency lists again
+            self.ordered_adjs[v] = sorted(
+                self.DG[v], key=lambda x: self.nesting_depth[(v, x)]
+            )
+            # initialize the embedding
+            previous_node = None
+            for w in self.ordered_adjs[v]:
+                self.embedding.add_half_edge(v, w, ccw=previous_node)
+                previous_node = w
+
+        # Free no longer used variables
+        self.DG = None
+        self.nesting_depth = None
+        self.ref = None
+
+        # compute the complete embedding
+        for v in self.roots:
+            self.dfs_embedding(v)
+
+        # Free no longer used variables
+        self.roots = None
+        self.parent_edge = None
+        self.ordered_adjs = None
+        self.left_ref = None
+        self.right_ref = None
+        self.side = None
+
+        return self.embedding

     def lr_planarity_recursive(self):
         """Recursive version of :meth:`lr_planarity`."""
-        pass
+        if self.G.order() > 2 and self.G.size() > 3 * self.G.order() - 6:
+            # graph is not planar
+            return None
+
+        # orientation of the graph by depth first search traversal
+        for v in self.G:
+            if self.height[v] is None:
+                self.height[v] = 0
+                self.roots.append(v)
+                self.dfs_orientation_recursive(v)
+
+        # Free no longer used variable
+        self.G = None
+
+        # testing
+        for v in self.DG:  # sort the adjacency lists by nesting depth
+            # note: this sorting leads to non linear time
+            self.ordered_adjs[v] = sorted(
+                self.DG[v], key=lambda x: self.nesting_depth[(v, x)]
+            )
+        for v in self.roots:
+            if not self.dfs_testing_recursive(v):
+                return None
+
+        for e in self.DG.edges:
+            self.nesting_depth[e] = self.sign_recursive(e) * self.nesting_depth[e]
+
+        self.embedding.add_nodes_from(self.DG.nodes)
+        for v in self.DG:
+            # sort the adjacency lists again
+            self.ordered_adjs[v] = sorted(
+                self.DG[v], key=lambda x: self.nesting_depth[(v, x)]
+            )
+            # initialize the embedding
+            previous_node = None
+            for w in self.ordered_adjs[v]:
+                self.embedding.add_half_edge(v, w, ccw=previous_node)
+                previous_node = w
+
+        # compute the complete embedding
+        for v in self.roots:
+            self.dfs_embedding_recursive(v)
+
+        return self.embedding

     def dfs_orientation(self, v):
         """Orient the graph by DFS, compute lowpoints and nesting order."""
-        pass
+        # the recursion stack
+        dfs_stack = [v]
+        # index of next edge to handle in adjacency list of each node
+        ind = defaultdict(lambda: 0)
+        # boolean to indicate whether to skip the initial work for an edge
+        skip_init = defaultdict(lambda: False)
+
+        while dfs_stack:
+            v = dfs_stack.pop()
+            e = self.parent_edge[v]
+
+            for w in self.adjs[v][ind[v] :]:
+                vw = (v, w)
+
+                if not skip_init[vw]:
+                    if (v, w) in self.DG.edges or (w, v) in self.DG.edges:
+                        ind[v] += 1
+                        continue  # the edge was already oriented
+
+                    self.DG.add_edge(v, w)  # orient the edge
+
+                    self.lowpt[vw] = self.height[v]
+                    self.lowpt2[vw] = self.height[v]
+                    if self.height[w] is None:  # (v, w) is a tree edge
+                        self.parent_edge[w] = vw
+                        self.height[w] = self.height[v] + 1
+
+                        dfs_stack.append(v)  # revisit v after finishing w
+                        dfs_stack.append(w)  # visit w next
+                        skip_init[vw] = True  # don't redo this block
+                        break  # handle next node in dfs_stack (i.e. w)
+                    else:  # (v, w) is a back edge
+                        self.lowpt[vw] = self.height[w]
+
+                # determine nesting graph
+                self.nesting_depth[vw] = 2 * self.lowpt[vw]
+                if self.lowpt2[vw] < self.height[v]:  # chordal
+                    self.nesting_depth[vw] += 1
+
+                # update lowpoints of parent edge e
+                if e is not None:
+                    if self.lowpt[vw] < self.lowpt[e]:
+                        self.lowpt2[e] = min(self.lowpt[e], self.lowpt2[vw])
+                        self.lowpt[e] = self.lowpt[vw]
+                    elif self.lowpt[vw] > self.lowpt[e]:
+                        self.lowpt2[e] = min(self.lowpt2[e], self.lowpt[vw])
+                    else:
+                        self.lowpt2[e] = min(self.lowpt2[e], self.lowpt2[vw])
+
+                ind[v] += 1

     def dfs_orientation_recursive(self, v):
         """Recursive version of :meth:`dfs_orientation`."""
-        pass
+        e = self.parent_edge[v]
+        for w in self.G[v]:
+            if (v, w) in self.DG.edges or (w, v) in self.DG.edges:
+                continue  # the edge was already oriented
+            vw = (v, w)
+            self.DG.add_edge(v, w)  # orient the edge
+
+            self.lowpt[vw] = self.height[v]
+            self.lowpt2[vw] = self.height[v]
+            if self.height[w] is None:  # (v, w) is a tree edge
+                self.parent_edge[w] = vw
+                self.height[w] = self.height[v] + 1
+                self.dfs_orientation_recursive(w)
+            else:  # (v, w) is a back edge
+                self.lowpt[vw] = self.height[w]
+
+            # determine nesting graph
+            self.nesting_depth[vw] = 2 * self.lowpt[vw]
+            if self.lowpt2[vw] < self.height[v]:  # chordal
+                self.nesting_depth[vw] += 1
+
+            # update lowpoints of parent edge e
+            if e is not None:
+                if self.lowpt[vw] < self.lowpt[e]:
+                    self.lowpt2[e] = min(self.lowpt[e], self.lowpt2[vw])
+                    self.lowpt[e] = self.lowpt[vw]
+                elif self.lowpt[vw] > self.lowpt[e]:
+                    self.lowpt2[e] = min(self.lowpt2[e], self.lowpt[vw])
+                else:
+                    self.lowpt2[e] = min(self.lowpt2[e], self.lowpt2[vw])

     def dfs_testing(self, v):
         """Test for LR partition."""
-        pass
+        # the recursion stack
+        dfs_stack = [v]
+        # index of next edge to handle in adjacency list of each node
+        ind = defaultdict(lambda: 0)
+        # boolean to indicate whether to skip the initial work for an edge
+        skip_init = defaultdict(lambda: False)
+
+        while dfs_stack:
+            v = dfs_stack.pop()
+            e = self.parent_edge[v]
+            # to indicate whether to skip the final block after the for loop
+            skip_final = False
+
+            for w in self.ordered_adjs[v][ind[v] :]:
+                ei = (v, w)
+
+                if not skip_init[ei]:
+                    self.stack_bottom[ei] = top_of_stack(self.S)
+
+                    if ei == self.parent_edge[w]:  # tree edge
+                        dfs_stack.append(v)  # revisit v after finishing w
+                        dfs_stack.append(w)  # visit w next
+                        skip_init[ei] = True  # don't redo this block
+                        skip_final = True  # skip final work after breaking
+                        break  # handle next node in dfs_stack (i.e. w)
+                    else:  # back edge
+                        self.lowpt_edge[ei] = ei
+                        self.S.append(ConflictPair(right=Interval(ei, ei)))
+
+                # integrate new return edges
+                if self.lowpt[ei] < self.height[v]:
+                    if w == self.ordered_adjs[v][0]:  # e_i has return edge
+                        self.lowpt_edge[e] = self.lowpt_edge[ei]
+                    else:  # add constraints of e_i
+                        if not self.add_constraints(ei, e):
+                            # graph is not planar
+                            return False
+
+                ind[v] += 1
+
+            if not skip_final:
+                # remove back edges returning to parent
+                if e is not None:  # v isn't root
+                    self.remove_back_edges(e)
+
+        return True

     def dfs_testing_recursive(self, v):
         """Recursive version of :meth:`dfs_testing`."""
-        pass
+        e = self.parent_edge[v]
+        for w in self.ordered_adjs[v]:
+            ei = (v, w)
+            self.stack_bottom[ei] = top_of_stack(self.S)
+            if ei == self.parent_edge[w]:  # tree edge
+                if not self.dfs_testing_recursive(w):
+                    return False
+            else:  # back edge
+                self.lowpt_edge[ei] = ei
+                self.S.append(ConflictPair(right=Interval(ei, ei)))
+
+            # integrate new return edges
+            if self.lowpt[ei] < self.height[v]:
+                if w == self.ordered_adjs[v][0]:  # e_i has return edge
+                    self.lowpt_edge[e] = self.lowpt_edge[ei]
+                else:  # add constraints of e_i
+                    if not self.add_constraints(ei, e):
+                        # graph is not planar
+                        return False
+
+        # remove back edges returning to parent
+        if e is not None:  # v isn't root
+            self.remove_back_edges(e)
+        return True
+
+    def add_constraints(self, ei, e):
+        P = ConflictPair()
+        # merge return edges of e_i into P.right
+        while True:
+            Q = self.S.pop()
+            if not Q.left.empty():
+                Q.swap()
+            if not Q.left.empty():  # not planar
+                return False
+            if self.lowpt[Q.right.low] > self.lowpt[e]:
+                # merge intervals
+                if P.right.empty():  # topmost interval
+                    P.right = Q.right.copy()
+                else:
+                    self.ref[P.right.low] = Q.right.high
+                P.right.low = Q.right.low
+            else:  # align
+                self.ref[Q.right.low] = self.lowpt_edge[e]
+            if top_of_stack(self.S) == self.stack_bottom[ei]:
+                break
+        # merge conflicting return edges of e_1,...,e_i-1 into P.L
+        while top_of_stack(self.S).left.conflicting(ei, self) or top_of_stack(
+            self.S
+        ).right.conflicting(ei, self):
+            Q = self.S.pop()
+            if Q.right.conflicting(ei, self):
+                Q.swap()
+            if Q.right.conflicting(ei, self):  # not planar
+                return False
+            # merge interval below lowpt(e_i) into P.R
+            self.ref[P.right.low] = Q.right.high
+            if Q.right.low is not None:
+                P.right.low = Q.right.low
+
+            if P.left.empty():  # topmost interval
+                P.left = Q.left.copy()
+            else:
+                self.ref[P.left.low] = Q.left.high
+            P.left.low = Q.left.low
+
+        if not (P.left.empty() and P.right.empty()):
+            self.S.append(P)
+        return True
+
+    def remove_back_edges(self, e):
+        u = e[0]
+        # trim back edges ending at parent u
+        # drop entire conflict pairs
+        while self.S and top_of_stack(self.S).lowest(self) == self.height[u]:
+            P = self.S.pop()
+            if P.left.low is not None:
+                self.side[P.left.low] = -1
+
+        if self.S:  # one more conflict pair to consider
+            P = self.S.pop()
+            # trim left interval
+            while P.left.high is not None and P.left.high[1] == u:
+                P.left.high = self.ref[P.left.high]
+            if P.left.high is None and P.left.low is not None:
+                # just emptied
+                self.ref[P.left.low] = P.right.low
+                self.side[P.left.low] = -1
+                P.left.low = None
+            # trim right interval
+            while P.right.high is not None and P.right.high[1] == u:
+                P.right.high = self.ref[P.right.high]
+            if P.right.high is None and P.right.low is not None:
+                # just emptied
+                self.ref[P.right.low] = P.left.low
+                self.side[P.right.low] = -1
+                P.right.low = None
+            self.S.append(P)
+
+        # side of e is side of a highest return edge
+        if self.lowpt[e] < self.height[u]:  # e has return edge
+            hl = top_of_stack(self.S).left.high
+            hr = top_of_stack(self.S).right.high
+
+            if hl is not None and (hr is None or self.lowpt[hl] > self.lowpt[hr]):
+                self.ref[e] = hl
+            else:
+                self.ref[e] = hr

     def dfs_embedding(self, v):
         """Completes the embedding."""
-        pass
+        # the recursion stack
+        dfs_stack = [v]
+        # index of next edge to handle in adjacency list of each node
+        ind = defaultdict(lambda: 0)
+
+        while dfs_stack:
+            v = dfs_stack.pop()
+
+            for w in self.ordered_adjs[v][ind[v] :]:
+                ind[v] += 1
+                ei = (v, w)
+
+                if ei == self.parent_edge[w]:  # tree edge
+                    self.embedding.add_half_edge_first(w, v)
+                    self.left_ref[v] = w
+                    self.right_ref[v] = w
+
+                    dfs_stack.append(v)  # revisit v after finishing w
+                    dfs_stack.append(w)  # visit w next
+                    break  # handle next node in dfs_stack (i.e. w)
+                else:  # back edge
+                    if self.side[ei] == 1:
+                        self.embedding.add_half_edge(w, v, ccw=self.right_ref[w])
+                    else:
+                        self.embedding.add_half_edge(w, v, cw=self.left_ref[w])
+                        self.left_ref[w] = v

     def dfs_embedding_recursive(self, v):
         """Recursive version of :meth:`dfs_embedding`."""
-        pass
+        for w in self.ordered_adjs[v]:
+            ei = (v, w)
+            if ei == self.parent_edge[w]:  # tree edge
+                self.embedding.add_half_edge_first(w, v)
+                self.left_ref[v] = w
+                self.right_ref[v] = w
+                self.dfs_embedding_recursive(w)
+            else:  # back edge
+                if self.side[ei] == 1:
+                    # place v directly after right_ref[w] in embed. list of w
+                    self.embedding.add_half_edge(w, v, ccw=self.right_ref[w])
+                else:
+                    # place v directly before left_ref[w] in embed. list of w
+                    self.embedding.add_half_edge(w, v, cw=self.left_ref[w])
+                    self.left_ref[w] = v

     def sign(self, e):
         """Resolve the relative side of an edge to the absolute side."""
-        pass
+        # the recursion stack
+        dfs_stack = [e]
+        # dict to remember reference edges
+        old_ref = defaultdict(lambda: None)
+
+        while dfs_stack:
+            e = dfs_stack.pop()
+
+            if self.ref[e] is not None:
+                dfs_stack.append(e)  # revisit e after finishing self.ref[e]
+                dfs_stack.append(self.ref[e])  # visit self.ref[e] next
+                old_ref[e] = self.ref[e]  # remember value of self.ref[e]
+                self.ref[e] = None
+            else:
+                self.side[e] *= self.side[old_ref[e]]
+
+        return self.side[e]

     def sign_recursive(self, e):
         """Recursive version of :meth:`sign`."""
-        pass
+        if self.ref[e] is not None:
+            self.side[e] = self.side[e] * self.sign_recursive(self.ref[e])
+            self.ref[e] = None
+        return self.side[e]


 class PlanarEmbedding(nx.DiGraph):
@@ -365,7 +867,9 @@ class PlanarEmbedding(nx.DiGraph):
         Any edge additions to a PlanarEmbedding should be done using
         method `add_half_edge`.
         """
-        pass
+        raise NotImplementedError(
+            "Use `add_half_edge` method to add edges to a PlanarEmbedding."
+        )

     def get_data(self):
         """Converts the adjacency structure into a better readable structure.
@@ -381,7 +885,10 @@ class PlanarEmbedding(nx.DiGraph):
         set_data

         """
-        pass
+        embedding = {}
+        for v in self:
+            embedding[v] = list(self.neighbors_cw_order(v))
+        return embedding

     def set_data(self, data):
         """Inserts edges according to given sorted neighbor list.
@@ -399,7 +906,11 @@ class PlanarEmbedding(nx.DiGraph):
         get_data

         """
-        pass
+        for v in data:
+            ref = None
+            for w in reversed(data[v]):
+                self.add_half_edge(v, w, cw=ref)
+                ref = w

     def remove_node(self, n):
         """Remove node n.
@@ -423,7 +934,24 @@ class PlanarEmbedding(nx.DiGraph):
         remove_nodes_from

         """
-        pass
+        try:
+            for u in self._pred[n]:
+                succs_u = self._succ[u]
+                un_cw = succs_u[n]["cw"]
+                un_ccw = succs_u[n]["ccw"]
+                del succs_u[n]
+                del self._pred[u][n]
+                if n != un_cw:
+                    succs_u[un_cw]["ccw"] = un_ccw
+                    succs_u[un_ccw]["cw"] = un_cw
+            del self._node[n]
+            del self._succ[n]
+            del self._pred[n]
+        except KeyError as err:  # NetworkXError if n not in self
+            raise nx.NetworkXError(
+                f"The node {n} is not in the planar embedding."
+            ) from err
+        nx._clear_cache(self)

     def remove_nodes_from(self, nodes):
         """Remove multiple nodes.
@@ -449,7 +977,10 @@ class PlanarEmbedding(nx.DiGraph):
         object to `G.remove_nodes_from`.

         """
-        pass
+        for n in nodes:
+            if n in self._node:
+                self.remove_node(n)
+            # silently skip non-existing nodes

     def neighbors_cw_order(self, v):
         """Generator for the neighbors of v in clockwise order.
@@ -463,7 +994,16 @@ class PlanarEmbedding(nx.DiGraph):
         node

         """
-        pass
+        succs = self._succ[v]
+        if not succs:
+            # v has no neighbors
+            return
+        start_node = next(reversed(succs))
+        yield start_node
+        current_node = succs[start_node]["cw"]
+        while start_node != current_node:
+            yield current_node
+            current_node = succs[current_node]["cw"]

     def add_half_edge(self, start_node, end_node, *, cw=None, ccw=None):
         """Adds a half-edge from `start_node` to `end_node`.
@@ -498,7 +1038,47 @@ class PlanarEmbedding(nx.DiGraph):
         --------
         connect_components
         """
-        pass
+
+        succs = self._succ.get(start_node)
+        if succs:
+            # there is already some edge out of start_node
+            leftmost_nbr = next(reversed(self._succ[start_node]))
+            if cw is not None:
+                if cw not in succs:
+                    raise nx.NetworkXError("Invalid clockwise reference node.")
+                if ccw is not None:
+                    raise nx.NetworkXError("Only one of cw/ccw can be specified.")
+                ref_ccw = succs[cw]["ccw"]
+                super().add_edge(start_node, end_node, cw=cw, ccw=ref_ccw)
+                succs[ref_ccw]["cw"] = end_node
+                succs[cw]["ccw"] = end_node
+                # when (cw == leftmost_nbr), the newly added neighbor is
+                # already at the end of dict self._succ[start_node] and
+                # takes the place of the former leftmost_nbr
+                move_leftmost_nbr_to_end = cw != leftmost_nbr
+            elif ccw is not None:
+                if ccw not in succs:
+                    raise nx.NetworkXError("Invalid counterclockwise reference node.")
+                ref_cw = succs[ccw]["cw"]
+                super().add_edge(start_node, end_node, cw=ref_cw, ccw=ccw)
+                succs[ref_cw]["ccw"] = end_node
+                succs[ccw]["cw"] = end_node
+                move_leftmost_nbr_to_end = True
+            else:
+                raise nx.NetworkXError(
+                    "Node already has out-half-edge(s), either cw or ccw reference node required."
+                )
+            if move_leftmost_nbr_to_end:
+                # LRPlanarity (via self.add_half_edge_first()) requires that
+                # we keep track of the leftmost neighbor, which we accomplish
+                # by keeping it as the last key in dict self._succ[start_node]
+                succs[leftmost_nbr] = succs.pop(leftmost_nbr)
+
+        else:
+            if cw is not None or ccw is not None:
+                raise nx.NetworkXError("Invalid reference node.")
+            # adding the first edge out of start_node
+            super().add_edge(start_node, end_node, ccw=end_node, cw=end_node)

     def check_structure(self):
         """Runs without exceptions if this object is valid.
@@ -517,7 +1097,46 @@ class PlanarEmbedding(nx.DiGraph):
             This exception is raised with a short explanation if the
             PlanarEmbedding is invalid.
         """
-        pass
+        # Check fundamental structure
+        for v in self:
+            try:
+                sorted_nbrs = set(self.neighbors_cw_order(v))
+            except KeyError as err:
+                msg = f"Bad embedding. Missing orientation for a neighbor of {v}"
+                raise nx.NetworkXException(msg) from err
+
+            unsorted_nbrs = set(self[v])
+            if sorted_nbrs != unsorted_nbrs:
+                msg = "Bad embedding. Edge orientations not set correctly."
+                raise nx.NetworkXException(msg)
+            for w in self[v]:
+                # Check if opposite half-edge exists
+                if not self.has_edge(w, v):
+                    msg = "Bad embedding. Opposite half-edge is missing."
+                    raise nx.NetworkXException(msg)
+
+        # Check planarity
+        counted_half_edges = set()
+        for component in nx.connected_components(self):
+            if len(component) == 1:
+                # Don't need to check single node component
+                continue
+            num_nodes = len(component)
+            num_half_edges = 0
+            num_faces = 0
+            for v in component:
+                for w in self.neighbors_cw_order(v):
+                    num_half_edges += 1
+                    if (v, w) not in counted_half_edges:
+                        # We encountered a new face
+                        num_faces += 1
+                        # Mark all half-edges belonging to this face
+                        self.traverse_face(v, w, counted_half_edges)
+            num_edges = num_half_edges // 2  # num_half_edges is even
+            if num_nodes - num_edges + num_faces != 2:
+                # The result does not match Euler's formula
+                msg = "Bad embedding. The graph does not match Euler's formula"
+                raise nx.NetworkXException(msg)

     def add_half_edge_ccw(self, start_node, end_node, reference_neighbor):
         """Adds a half-edge from start_node to end_node.
@@ -546,7 +1165,7 @@ class PlanarEmbedding(nx.DiGraph):
         connect_components

         """
-        pass
+        self.add_half_edge(start_node, end_node, cw=reference_neighbor)

     def add_half_edge_cw(self, start_node, end_node, reference_neighbor):
         """Adds a half-edge from start_node to end_node.
@@ -574,7 +1193,7 @@ class PlanarEmbedding(nx.DiGraph):
         add_half_edge_ccw
         connect_components
         """
-        pass
+        self.add_half_edge(start_node, end_node, ccw=reference_neighbor)

     def remove_edge(self, u, v):
         """Remove the edge between u and v.
@@ -594,7 +1213,28 @@ class PlanarEmbedding(nx.DiGraph):
         --------
         remove_edges_from : remove a collection of edges
         """
-        pass
+        try:
+            succs_u = self._succ[u]
+            succs_v = self._succ[v]
+            uv_cw = succs_u[v]["cw"]
+            uv_ccw = succs_u[v]["ccw"]
+            vu_cw = succs_v[u]["cw"]
+            vu_ccw = succs_v[u]["ccw"]
+            del succs_u[v]
+            del self._pred[v][u]
+            del succs_v[u]
+            del self._pred[u][v]
+            if v != uv_cw:
+                succs_u[uv_cw]["ccw"] = uv_ccw
+                succs_u[uv_ccw]["cw"] = uv_cw
+            if u != vu_cw:
+                succs_v[vu_cw]["ccw"] = vu_ccw
+                succs_v[vu_ccw]["cw"] = vu_cw
+        except KeyError as err:
+            raise nx.NetworkXError(
+                f"The edge {u}-{v} is not in the planar embedding."
+            ) from err
+        nx._clear_cache(self)

     def remove_edges_from(self, ebunch):
         """Remove all edges specified in ebunch.
@@ -622,7 +1262,12 @@ class PlanarEmbedding(nx.DiGraph):
         >>> ebunch = [(1, 2), (2, 3)]
         >>> G.remove_edges_from(ebunch)
         """
-        pass
+        for e in ebunch:
+            u, v = e[:2]  # ignore edge data
+            # assuming that the PlanarEmbedding is valid, if the half_edge
+            # (u, v) is in the graph, then so is half_edge (v, u)
+            if u in self._succ and v in self._succ[u]:
+                self.remove_edge(u, v)

     def connect_components(self, v, w):
         """Adds half-edges for (v, w) and (w, v) at some position.
@@ -643,7 +1288,16 @@ class PlanarEmbedding(nx.DiGraph):
         --------
         add_half_edge
         """
-        pass
+        if v in self._succ and self._succ[v]:
+            ref = next(reversed(self._succ[v]))
+        else:
+            ref = None
+        self.add_half_edge(v, w, cw=ref)
+        if w in self._succ and self._succ[w]:
+            ref = next(reversed(self._succ[w]))
+        else:
+            ref = None
+        self.add_half_edge(w, v, cw=ref)

     def add_half_edge_first(self, start_node, end_node):
         """Add a half-edge and set end_node as start_node's leftmost neighbor.
@@ -661,7 +1315,11 @@ class PlanarEmbedding(nx.DiGraph):
         add_half_edge
         connect_components
         """
-        pass
+        succs = self._succ.get(start_node)
+        # the leftmost neighbor is the last entry in the
+        # self._succ[start_node] dict
+        leftmost_nbr = next(reversed(succs)) if succs else None
+        self.add_half_edge(start_node, end_node, cw=leftmost_nbr)

     def next_face_half_edge(self, v, w):
         """Returns the following half-edge left of a face.
@@ -675,7 +1333,8 @@ class PlanarEmbedding(nx.DiGraph):
         -------
         half-edge : tuple
         """
-        pass
+        new_node = self[w][v]["ccw"]
+        return w, new_node

     def traverse_face(self, v, w, mark_half_edges=None):
         """Returns nodes on the face that belong to the half-edge (v, w).
@@ -701,7 +1360,24 @@ class PlanarEmbedding(nx.DiGraph):
         face : list
             A list of nodes that lie on this face.
         """
-        pass
+        if mark_half_edges is None:
+            mark_half_edges = set()
+
+        face_nodes = [v]
+        mark_half_edges.add((v, w))
+        prev_node = v
+        cur_node = w
+        # Last half-edge is (incoming_node, v)
+        incoming_node = self[v][w]["cw"]
+
+        while cur_node != v or prev_node != incoming_node:
+            face_nodes.append(cur_node)
+            prev_node, cur_node = self.next_face_half_edge(prev_node, cur_node)
+            if (prev_node, cur_node) in mark_half_edges:
+                raise nx.NetworkXException("Bad planar embedding. Impossible face.")
+            mark_half_edges.add((prev_node, cur_node))
+
+        return face_nodes

     def is_directed(self):
         """A valid PlanarEmbedding is undirected.
@@ -710,4 +1386,17 @@ class PlanarEmbedding(nx.DiGraph):
         half-edge (v, w) the half-edge in the opposite direction (w, v) is also
         contained.
         """
-        pass
+        return False
+
+    def copy(self, as_view=False):
+        if as_view is True:
+            return nx.graphviews.generic_graph_view(self)
+        G = self.__class__()
+        G.graph.update(self.graph)
+        G.add_nodes_from((n, d.copy()) for n, d in self._node.items())
+        super(self.__class__, G).add_edges_from(
+            (u, v, datadict.copy())
+            for u, nbrs in self._adj.items()
+            for v, datadict in nbrs.items()
+        )
+        return G
diff --git a/networkx/algorithms/polynomials.py b/networkx/algorithms/polynomials.py
index 29cd24353..217c7dbe3 100644
--- a/networkx/algorithms/polynomials.py
+++ b/networkx/algorithms/polynomials.py
@@ -19,18 +19,20 @@ x**4 - 6*x**2 - 8*x - 3


 .. [1] Y. Shi, M. Dehmer, X. Li, I. Gutman,
-   "Graph Polynomials\"
+   "Graph Polynomials"
 """
 from collections import deque
+
 import networkx as nx
 from networkx.utils import not_implemented_for
-__all__ = ['tutte_polynomial', 'chromatic_polynomial']
+
+__all__ = ["tutte_polynomial", "chromatic_polynomial"]


-@not_implemented_for('directed')
+@not_implemented_for("directed")
 @nx._dispatchable
 def tutte_polynomial(G):
-    """Returns the Tutte polynomial of `G`
+    r"""Returns the Tutte polynomial of `G`

     This function computes the Tutte polynomial via an iterative version of
     the deletion-contraction algorithm.
@@ -51,25 +53,25 @@ def tutte_polynomial(G):

     .. math::

-        T_G(x, y) = \\sum_{A \\in E} (x-1)^{c(A) - c(E)} (y-1)^{c(A) + |A| - n(G)}
+        T_G(x, y) = \sum_{A \in E} (x-1)^{c(A) - c(E)} (y-1)^{c(A) + |A| - n(G)}

     Def 2 (spanning tree expansion): Let `G` be an undirected graph, `T` a spanning
     tree of `G`, and `E` the edge set of `G`. Let `E` have an arbitrary strict
     linear order `L`. Let `B_e` be the unique minimal nonempty edge cut of
-    $E \\setminus T \\cup {e}$. An edge `e` is internally active with respect to
+    $E \setminus T \cup {e}$. An edge `e` is internally active with respect to
     `T` and `L` if `e` is the least edge in `B_e` according to the linear order
     `L`. The internal activity of `T` (denoted `i(T)`) is the number of edges
-    in $E \\setminus T$ that are internally active with respect to `T` and `L`.
-    Let `P_e` be the unique path in $T \\cup {e}$ whose source and target vertex
+    in $E \setminus T$ that are internally active with respect to `T` and `L`.
+    Let `P_e` be the unique path in $T \cup {e}$ whose source and target vertex
     are the same. An edge `e` is externally active with respect to `T` and `L`
     if `e` is the least edge in `P_e` according to the linear order `L`. The
     external activity of `T` (denoted `e(T)`) is the number of edges in
-    $E \\setminus T$ that are externally active with respect to `T` and `L`.
+    $E \setminus T$ that are externally active with respect to `T` and `L`.
     Then [4]_ [5]_:

     .. math::

-        T_G(x, y) = \\sum_{T \\text{ a spanning tree of } G} x^{i(T)} y^{e(T)}
+        T_G(x, y) = \sum_{T \text{ a spanning tree of } G} x^{i(T)} y^{e(T)}

     Def 3 (deletion-contraction recurrence): For `G` an undirected graph, `G-e`
     the graph obtained from `G` by deleting edge `e`, `G/e` the graph obtained
@@ -77,10 +79,10 @@ def tutte_polynomial(G):
     and `l(G)` the number of self-loops of `G`:

     .. math::
-        T_G(x, y) = \\begin{cases}
-          x^{k(G)} y^{l(G)}, & \\text{if all edges are cut-edges or self-loops} \\\\
-           T_{G-e}(x, y) + T_{G/e}(x, y), & \\text{otherwise, for an arbitrary edge $e$ not a cut-edge or loop}
-        \\end{cases}
+        T_G(x, y) = \begin{cases}
+          x^{k(G)} y^{l(G)}, & \text{if all edges are cut-edges or self-loops} \\
+           T_{G-e}(x, y) + T_{G/e}(x, y), & \text{otherwise, for an arbitrary edge $e$ not a cut-edge or loop}
+        \end{cases}

     Parameters
     ----------
@@ -147,13 +149,40 @@ def tutte_polynomial(G):
        Structural Analysis of Complex Networks, 2011
        https://arxiv.org/pdf/0803.3079.pdf
     """
-    pass
-
-
-@not_implemented_for('directed')
+    import sympy
+
+    x = sympy.Symbol("x")
+    y = sympy.Symbol("y")
+    stack = deque()
+    stack.append(nx.MultiGraph(G))
+
+    polynomial = 0
+    while stack:
+        G = stack.pop()
+        bridges = set(nx.bridges(G))
+
+        e = None
+        for i in G.edges:
+            if (i[0], i[1]) not in bridges and i[0] != i[1]:
+                e = i
+                break
+        if not e:
+            loops = list(nx.selfloop_edges(G, keys=True))
+            polynomial += x ** len(bridges) * y ** len(loops)
+        else:
+            # deletion-contraction
+            C = nx.contracted_edge(G, e, self_loops=True)
+            C.remove_edge(e[0], e[0])
+            G.remove_edge(*e)
+            stack.append(G)
+            stack.append(C)
+    return sympy.simplify(polynomial)
+
+
+@not_implemented_for("directed")
 @nx._dispatchable
 def chromatic_polynomial(G):
-    """Returns the chromatic polynomial of `G`
+    r"""Returns the chromatic polynomial of `G`

     This function computes the chromatic polynomial via an iterative version of
     the deletion-contraction algorithm.
@@ -171,7 +200,7 @@ def chromatic_polynomial(G):

     .. math::

-        X_G(x) = \\sum_{S \\subseteq E} (-1)^{|S|} x^{c(G(S))}
+        X_G(x) = \sum_{S \subseteq E} (-1)^{|S|} x^{c(G(S))}


     Def 2 (interpolating polynomial):
@@ -179,7 +208,7 @@ def chromatic_polynomial(G):
     and `k_i` the number of distinct ways to color the vertices of `G` with `i`
     unique colors (for `i` a natural number at most `n(G)`), `X_G(x)` is the
     unique Lagrange interpolating polynomial of degree `n(G)` through the points
-    `(0, k_0), (1, k_1), \\dots, (n(G), k_{n(G)})` [2]_.
+    `(0, k_0), (1, k_1), \dots, (n(G), k_{n(G)})` [2]_.


     Def 3 (chromatic recurrence):
@@ -188,10 +217,10 @@ def chromatic_polynomial(G):
     the number of vertices of `G`, and `e(G)` the number of edges of `G` [3]_:

     .. math::
-        X_G(x) = \\begin{cases}
-          x^{n(G)}, & \\text{if $e(G)=0$} \\\\
-           X_{G-e}(x) - X_{G/e}(x), & \\text{otherwise, for an arbitrary edge $e$}
-        \\end{cases}
+        X_G(x) = \begin{cases}
+          x^{n(G)}, & \text{if $e(G)=0$} \\
+           X_{G-e}(x) - X_{G/e}(x), & \text{otherwise, for an arbitrary edge $e$}
+        \end{cases}

     This formulation is also known as the Fundamental Reduction Theorem [4]_.

@@ -253,4 +282,24 @@ def chromatic_polynomial(G):
        Discrete Mathematics, 2006
        https://math.mit.edu/~rstan/pubs/pubfiles/18.pdf
     """
-    pass
+    import sympy
+
+    x = sympy.Symbol("x")
+    stack = deque()
+    stack.append(nx.MultiGraph(G, contraction_idx=0))
+
+    polynomial = 0
+    while stack:
+        G = stack.pop()
+        edges = list(G.edges)
+        if not edges:
+            polynomial += (-1) ** G.graph["contraction_idx"] * x ** len(G)
+        else:
+            e = edges[0]
+            C = nx.contracted_edge(G, e, self_loops=True)
+            C.graph["contraction_idx"] = G.graph["contraction_idx"] + 1
+            C.remove_edge(e[0], e[0])
+            G.remove_edge(*e)
+            stack.append(G)
+            stack.append(C)
+    return polynomial
diff --git a/networkx/algorithms/reciprocity.py b/networkx/algorithms/reciprocity.py
index e288fcf22..25b0fa1ba 100644
--- a/networkx/algorithms/reciprocity.py
+++ b/networkx/algorithms/reciprocity.py
@@ -1,19 +1,21 @@
 """Algorithms to calculate reciprocity in a directed graph."""
 import networkx as nx
 from networkx import NetworkXError
+
 from ..utils import not_implemented_for
-__all__ = ['reciprocity', 'overall_reciprocity']
+
+__all__ = ["reciprocity", "overall_reciprocity"]


-@not_implemented_for('undirected', 'multigraph')
+@not_implemented_for("undirected", "multigraph")
 @nx._dispatchable
 def reciprocity(G, nodes=None):
-    """Compute the reciprocity in a directed graph.
+    r"""Compute the reciprocity in a directed graph.

     The reciprocity of a directed graph is defined as the ratio
     of the number of edges pointing in both directions to the total
     number of edges in the graph.
-    Formally, $r = |{(u,v) \\in G|(v,u) \\in G}| / |{(u,v) \\in G}|$.
+    Formally, $r = |{(u,v) \in G|(v,u) \in G}| / |{(u,v) \in G}|$.

     The reciprocity of a single node u is defined similarly,
     it is the ratio of the number of edges in both directions to
@@ -37,15 +39,43 @@ def reciprocity(G, nodes=None):
     In such cases this function will return None.

     """
-    pass
+    # If `nodes` is not specified, calculate the reciprocity of the graph.
+    if nodes is None:
+        return overall_reciprocity(G)
+
+    # If `nodes` represents a single node in the graph, return only its
+    # reciprocity.
+    if nodes in G:
+        reciprocity = next(_reciprocity_iter(G, nodes))[1]
+        if reciprocity is None:
+            raise NetworkXError("Not defined for isolated nodes.")
+        else:
+            return reciprocity
+
+    # Otherwise, `nodes` represents an iterable of nodes, so return a
+    # dictionary mapping node to its reciprocity.
+    return dict(_reciprocity_iter(G, nodes))


 def _reciprocity_iter(G, nodes):
     """Return an iterator of (node, reciprocity)."""
-    pass
+    n = G.nbunch_iter(nodes)
+    for node in n:
+        pred = set(G.predecessors(node))
+        succ = set(G.successors(node))
+        overlap = pred & succ
+        n_total = len(pred) + len(succ)

+        # Reciprocity is not defined for isolated nodes.
+        # Return None.
+        if n_total == 0:
+            yield (node, None)
+        else:
+            reciprocity = 2 * len(overlap) / n_total
+            yield (node, reciprocity)

-@not_implemented_for('undirected', 'multigraph')
+
+@not_implemented_for("undirected", "multigraph")
 @nx._dispatchable
 def overall_reciprocity(G):
     """Compute the reciprocity for the whole graph.
@@ -58,4 +88,10 @@ def overall_reciprocity(G):
        A networkx graph

     """
-    pass
+    n_all_edge = G.number_of_edges()
+    n_overlap_edge = (n_all_edge - G.to_undirected().number_of_edges()) * 2
+
+    if n_all_edge == 0:
+        raise NetworkXError("Not defined for empty graphs")
+
+    return n_overlap_edge / n_all_edge
diff --git a/networkx/algorithms/regular.py b/networkx/algorithms/regular.py
index 42826bc6a..058ad3654 100644
--- a/networkx/algorithms/regular.py
+++ b/networkx/algorithms/regular.py
@@ -1,7 +1,8 @@
 """Functions for computing and verifying regular graphs."""
 import networkx as nx
 from networkx.utils import not_implemented_for
-__all__ = ['is_regular', 'is_k_regular', 'k_factor']
+
+__all__ = ["is_regular", "is_k_regular", "k_factor"]


 @nx._dispatchable
@@ -28,10 +29,21 @@ def is_regular(G):
     True

     """
-    pass
-
-
-@not_implemented_for('directed')
+    if len(G) == 0:
+        raise nx.NetworkXPointlessConcept("Graph has no nodes.")
+    n1 = nx.utils.arbitrary_element(G)
+    if not G.is_directed():
+        d1 = G.degree(n1)
+        return all(d1 == d for _, d in G.degree)
+    else:
+        d_in = G.in_degree(n1)
+        in_regular = all(d_in == d for _, d in G.in_degree)
+        d_out = G.out_degree(n1)
+        out_regular = all(d_out == d for _, d in G.out_degree)
+        return in_regular and out_regular
+
+
+@not_implemented_for("directed")
 @nx._dispatchable
 def is_k_regular(G, k):
     """Determines whether the graph ``G`` is a k-regular graph.
@@ -54,13 +66,13 @@ def is_k_regular(G, k):
     False

     """
-    pass
+    return all(d == k for n, d in G.degree)


-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable(preserve_edge_attrs=True, returns_graph=True)
-def k_factor(G, k, matching_weight='weight'):
+def k_factor(G, k, matching_weight="weight"):
     """Compute a k-factor of G

     A k-factor of a graph is a spanning k-regular subgraph.
@@ -96,4 +108,107 @@ def k_factor(G, k, matching_weight='weight'):
        Meijer, Henk, Yurai Núñez-Rodríguez, and David Rappaport,
        Information processing letters, 2009.
     """
-    pass
+
+    from networkx.algorithms.matching import is_perfect_matching, max_weight_matching
+
+    class LargeKGadget:
+        def __init__(self, k, degree, node, g):
+            self.original = node
+            self.g = g
+            self.k = k
+            self.degree = degree
+
+            self.outer_vertices = [(node, x) for x in range(degree)]
+            self.core_vertices = [(node, x + degree) for x in range(degree - k)]
+
+        def replace_node(self):
+            adj_view = self.g[self.original]
+            neighbors = list(adj_view.keys())
+            edge_attrs = list(adj_view.values())
+            for outer, neighbor, edge_attrs in zip(
+                self.outer_vertices, neighbors, edge_attrs
+            ):
+                self.g.add_edge(outer, neighbor, **edge_attrs)
+            for core in self.core_vertices:
+                for outer in self.outer_vertices:
+                    self.g.add_edge(core, outer)
+            self.g.remove_node(self.original)
+
+        def restore_node(self):
+            self.g.add_node(self.original)
+            for outer in self.outer_vertices:
+                adj_view = self.g[outer]
+                for neighbor, edge_attrs in list(adj_view.items()):
+                    if neighbor not in self.core_vertices:
+                        self.g.add_edge(self.original, neighbor, **edge_attrs)
+                        break
+            g.remove_nodes_from(self.outer_vertices)
+            g.remove_nodes_from(self.core_vertices)
+
+    class SmallKGadget:
+        def __init__(self, k, degree, node, g):
+            self.original = node
+            self.k = k
+            self.degree = degree
+            self.g = g
+
+            self.outer_vertices = [(node, x) for x in range(degree)]
+            self.inner_vertices = [(node, x + degree) for x in range(degree)]
+            self.core_vertices = [(node, x + 2 * degree) for x in range(k)]
+
+        def replace_node(self):
+            adj_view = self.g[self.original]
+            for outer, inner, (neighbor, edge_attrs) in zip(
+                self.outer_vertices, self.inner_vertices, list(adj_view.items())
+            ):
+                self.g.add_edge(outer, inner)
+                self.g.add_edge(outer, neighbor, **edge_attrs)
+            for core in self.core_vertices:
+                for inner in self.inner_vertices:
+                    self.g.add_edge(core, inner)
+            self.g.remove_node(self.original)
+
+        def restore_node(self):
+            self.g.add_node(self.original)
+            for outer in self.outer_vertices:
+                adj_view = self.g[outer]
+                for neighbor, edge_attrs in adj_view.items():
+                    if neighbor not in self.core_vertices:
+                        self.g.add_edge(self.original, neighbor, **edge_attrs)
+                        break
+            self.g.remove_nodes_from(self.outer_vertices)
+            self.g.remove_nodes_from(self.inner_vertices)
+            self.g.remove_nodes_from(self.core_vertices)
+
+    # Step 1
+    if any(d < k for _, d in G.degree):
+        raise nx.NetworkXUnfeasible("Graph contains a vertex with degree less than k")
+    g = G.copy()
+
+    # Step 2
+    gadgets = []
+    for node, degree in list(g.degree):
+        if k < degree / 2.0:
+            gadget = SmallKGadget(k, degree, node, g)
+        else:
+            gadget = LargeKGadget(k, degree, node, g)
+        gadget.replace_node()
+        gadgets.append(gadget)
+
+    # Step 3
+    matching = max_weight_matching(g, maxcardinality=True, weight=matching_weight)
+
+    # Step 4
+    if not is_perfect_matching(g, matching):
+        raise nx.NetworkXUnfeasible(
+            "Cannot find k-factor because no perfect matching exists"
+        )
+
+    for edge in g.edges():
+        if edge not in matching and (edge[1], edge[0]) not in matching:
+            g.remove_edge(edge[0], edge[1])
+
+    for gadget in gadgets:
+        gadget.restore_node()
+
+    return g
diff --git a/networkx/algorithms/richclub.py b/networkx/algorithms/richclub.py
index 5a1d6688f..445b27d14 100644
--- a/networkx/algorithms/richclub.py
+++ b/networkx/algorithms/richclub.py
@@ -1,15 +1,18 @@
 """Functions for computing rich-club coefficients."""
+
 from itertools import accumulate
+
 import networkx as nx
 from networkx.utils import not_implemented_for
-__all__ = ['rich_club_coefficient']
+
+__all__ = ["rich_club_coefficient"]


-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def rich_club_coefficient(G, normalized=True, Q=100, seed=None):
-    """Returns the rich-club coefficient of the graph `G`.
+    r"""Returns the rich-club coefficient of the graph `G`.

     For each degree *k*, the *rich-club coefficient* is the ratio of the
     number of actual to the number of potential edges for nodes with
@@ -17,7 +20,7 @@ def rich_club_coefficient(G, normalized=True, Q=100, seed=None):

     .. math::

-        \\phi(k) = \\frac{2 E_k}{N_k (N_k - 1)}
+        \phi(k) = \frac{2 E_k}{N_k (N_k - 1)}

     where `N_k` is the number of nodes with degree larger than *k*, and
     `E_k` is the number of edges among those nodes.
@@ -81,7 +84,20 @@ def rich_club_coefficient(G, normalized=True, Q=100, seed=None):
        "Uniform generation of random graphs with arbitrary degree
        sequences", 2006. https://arxiv.org/abs/cond-mat/0312028
     """
-    pass
+    if nx.number_of_selfloops(G) > 0:
+        raise Exception(
+            "rich_club_coefficient is not implemented for graphs with self loops."
+        )
+    rc = _compute_rc(G)
+    if normalized:
+        # make R a copy of G, randomize with Q*|E| double edge swaps
+        # and use rich_club coefficient of R to normalize
+        R = G.copy()
+        E = R.number_of_edges()
+        nx.double_edge_swap(R, Q * E, max_tries=Q * E * 10, seed=seed)
+        rcran = _compute_rc(R)
+        rc = {k: v / rcran[k] for k, v in rc.items()}
+    return rc


 def _compute_rc(G):
@@ -94,4 +110,29 @@ def _compute_rc(G):
     that degree.

     """
-    pass
+    deghist = nx.degree_histogram(G)
+    total = sum(deghist)
+    # Compute the number of nodes with degree greater than `k`, for each
+    # degree `k` (omitting the last entry, which is zero).
+    nks = (total - cs for cs in accumulate(deghist) if total - cs > 1)
+    # Create a sorted list of pairs of edge endpoint degrees.
+    #
+    # The list is sorted in reverse order so that we can pop from the
+    # right side of the list later, instead of popping from the left
+    # side of the list, which would have a linear time cost.
+    edge_degrees = sorted((sorted(map(G.degree, e)) for e in G.edges()), reverse=True)
+    ek = G.number_of_edges()
+    if ek == 0:
+        return {}
+
+    k1, k2 = edge_degrees.pop()
+    rc = {}
+    for d, nk in enumerate(nks):
+        while k1 <= d:
+            if len(edge_degrees) == 0:
+                ek = 0
+                break
+            k1, k2 = edge_degrees.pop()
+            ek -= 1
+        rc[d] = 2 * ek / (nk * (nk - 1))
+    return rc
diff --git a/networkx/algorithms/similarity.py b/networkx/algorithms/similarity.py
index 095581ae6..7af9c8980 100644
--- a/networkx/algorithms/similarity.py
+++ b/networkx/algorithms/similarity.py
@@ -12,24 +12,49 @@ and/or `optimize_edit_paths`.
 At the same time, I encourage capable people to investigate
 alternative GED algorithms, in order to improve the choices available.
 """
+
 import math
 import time
 import warnings
 from dataclasses import dataclass
 from itertools import product
+
 import networkx as nx
 from networkx.utils import np_random_state
-__all__ = ['graph_edit_distance', 'optimal_edit_paths',
-    'optimize_graph_edit_distance', 'optimize_edit_paths',
-    'simrank_similarity', 'panther_similarity', 'generate_random_paths']

-
-@nx._dispatchable(graphs={'G1': 0, 'G2': 1}, preserve_edge_attrs=True,
-    preserve_node_attrs=True)
-def graph_edit_distance(G1, G2, node_match=None, edge_match=None,
-    node_subst_cost=None, node_del_cost=None, node_ins_cost=None,
-    edge_subst_cost=None, edge_del_cost=None, edge_ins_cost=None, roots=
-    None, upper_bound=None, timeout=None):
+__all__ = [
+    "graph_edit_distance",
+    "optimal_edit_paths",
+    "optimize_graph_edit_distance",
+    "optimize_edit_paths",
+    "simrank_similarity",
+    "panther_similarity",
+    "generate_random_paths",
+]
+
+
+def debug_print(*args, **kwargs):
+    print(*args, **kwargs)
+
+
+@nx._dispatchable(
+    graphs={"G1": 0, "G2": 1}, preserve_edge_attrs=True, preserve_node_attrs=True
+)
+def graph_edit_distance(
+    G1,
+    G2,
+    node_match=None,
+    edge_match=None,
+    node_subst_cost=None,
+    node_del_cost=None,
+    node_ins_cost=None,
+    edge_subst_cost=None,
+    edge_del_cost=None,
+    edge_ins_cost=None,
+    roots=None,
+    upper_bound=None,
+    timeout=None,
+):
     """Returns GED (graph edit distance) between graphs G1 and G2.

     Graph edit distance is a graph similarity measure analogous to
@@ -164,14 +189,42 @@ def graph_edit_distance(G1, G2, node_match=None, edge_match=None,
        https://hal.archives-ouvertes.fr/hal-01168816

     """
-    pass
-
-
-@nx._dispatchable(graphs={'G1': 0, 'G2': 1})
-def optimal_edit_paths(G1, G2, node_match=None, edge_match=None,
-    node_subst_cost=None, node_del_cost=None, node_ins_cost=None,
-    edge_subst_cost=None, edge_del_cost=None, edge_ins_cost=None,
-    upper_bound=None):
+    bestcost = None
+    for _, _, cost in optimize_edit_paths(
+        G1,
+        G2,
+        node_match,
+        edge_match,
+        node_subst_cost,
+        node_del_cost,
+        node_ins_cost,
+        edge_subst_cost,
+        edge_del_cost,
+        edge_ins_cost,
+        upper_bound,
+        True,
+        roots,
+        timeout,
+    ):
+        # assert bestcost is None or cost < bestcost
+        bestcost = cost
+    return bestcost
+
+
+@nx._dispatchable(graphs={"G1": 0, "G2": 1})
+def optimal_edit_paths(
+    G1,
+    G2,
+    node_match=None,
+    edge_match=None,
+    node_subst_cost=None,
+    node_del_cost=None,
+    node_ins_cost=None,
+    edge_subst_cost=None,
+    edge_del_cost=None,
+    edge_ins_cost=None,
+    upper_bound=None,
+):
     """Returns all minimum-cost edit paths transforming G1 to G2.

     Graph edit path is a sequence of node and edge edit operations
@@ -306,14 +359,44 @@ def optimal_edit_paths(G1, G2, node_match=None, edge_match=None,
        https://hal.archives-ouvertes.fr/hal-01168816

     """
-    pass
-
-
-@nx._dispatchable(graphs={'G1': 0, 'G2': 1})
-def optimize_graph_edit_distance(G1, G2, node_match=None, edge_match=None,
-    node_subst_cost=None, node_del_cost=None, node_ins_cost=None,
-    edge_subst_cost=None, edge_del_cost=None, edge_ins_cost=None,
-    upper_bound=None):
+    paths = []
+    bestcost = None
+    for vertex_path, edge_path, cost in optimize_edit_paths(
+        G1,
+        G2,
+        node_match,
+        edge_match,
+        node_subst_cost,
+        node_del_cost,
+        node_ins_cost,
+        edge_subst_cost,
+        edge_del_cost,
+        edge_ins_cost,
+        upper_bound,
+        False,
+    ):
+        # assert bestcost is None or cost <= bestcost
+        if bestcost is not None and cost < bestcost:
+            paths = []
+        paths.append((vertex_path, edge_path))
+        bestcost = cost
+    return paths, bestcost
+
+
+@nx._dispatchable(graphs={"G1": 0, "G2": 1})
+def optimize_graph_edit_distance(
+    G1,
+    G2,
+    node_match=None,
+    edge_match=None,
+    node_subst_cost=None,
+    node_del_cost=None,
+    node_ins_cost=None,
+    edge_subst_cost=None,
+    edge_del_cost=None,
+    edge_ins_cost=None,
+    upper_bound=None,
+):
     """Returns consecutive approximations of GED (graph edit distance)
     between graphs G1 and G2.

@@ -434,15 +517,42 @@ def optimize_graph_edit_distance(G1, G2, node_match=None, edge_match=None,
        <10.5220/0005209202710278>. <hal-01168816>
        https://hal.archives-ouvertes.fr/hal-01168816
     """
-    pass
-
-
-@nx._dispatchable(graphs={'G1': 0, 'G2': 1}, preserve_edge_attrs=True,
-    preserve_node_attrs=True)
-def optimize_edit_paths(G1, G2, node_match=None, edge_match=None,
-    node_subst_cost=None, node_del_cost=None, node_ins_cost=None,
-    edge_subst_cost=None, edge_del_cost=None, edge_ins_cost=None,
-    upper_bound=None, strictly_decreasing=True, roots=None, timeout=None):
+    for _, _, cost in optimize_edit_paths(
+        G1,
+        G2,
+        node_match,
+        edge_match,
+        node_subst_cost,
+        node_del_cost,
+        node_ins_cost,
+        edge_subst_cost,
+        edge_del_cost,
+        edge_ins_cost,
+        upper_bound,
+        True,
+    ):
+        yield cost
+
+
+@nx._dispatchable(
+    graphs={"G1": 0, "G2": 1}, preserve_edge_attrs=True, preserve_node_attrs=True
+)
+def optimize_edit_paths(
+    G1,
+    G2,
+    node_match=None,
+    edge_match=None,
+    node_subst_cost=None,
+    node_del_cost=None,
+    node_ins_cost=None,
+    edge_subst_cost=None,
+    edge_del_cost=None,
+    edge_ins_cost=None,
+    upper_bound=None,
+    strictly_decreasing=True,
+    roots=None,
+    timeout=None,
+):
     """GED (graph edit distance) calculation: advanced interface.

     Graph edit path is a sequence of node and edge edit operations
@@ -573,12 +683,545 @@ def optimize_edit_paths(G1, G2, node_match=None, edge_match=None,
        https://hal.archives-ouvertes.fr/hal-01168816

     """
-    pass
+    # TODO: support DiGraph
+
+    import numpy as np
+    import scipy as sp
+
+    @dataclass
+    class CostMatrix:
+        C: ...
+        lsa_row_ind: ...
+        lsa_col_ind: ...
+        ls: ...
+
+    def make_CostMatrix(C, m, n):
+        # assert(C.shape == (m + n, m + n))
+        lsa_row_ind, lsa_col_ind = sp.optimize.linear_sum_assignment(C)
+
+        # Fixup dummy assignments:
+        # each substitution i<->j should have dummy assignment m+j<->n+i
+        # NOTE: fast reduce of Cv relies on it
+        # assert len(lsa_row_ind) == len(lsa_col_ind)
+        indexes = zip(range(len(lsa_row_ind)), lsa_row_ind, lsa_col_ind)
+        subst_ind = [k for k, i, j in indexes if i < m and j < n]
+        indexes = zip(range(len(lsa_row_ind)), lsa_row_ind, lsa_col_ind)
+        dummy_ind = [k for k, i, j in indexes if i >= m and j >= n]
+        # assert len(subst_ind) == len(dummy_ind)
+        lsa_row_ind[dummy_ind] = lsa_col_ind[subst_ind] + m
+        lsa_col_ind[dummy_ind] = lsa_row_ind[subst_ind] + n
+
+        return CostMatrix(
+            C, lsa_row_ind, lsa_col_ind, C[lsa_row_ind, lsa_col_ind].sum()
+        )
+
+    def extract_C(C, i, j, m, n):
+        # assert(C.shape == (m + n, m + n))
+        row_ind = [k in i or k - m in j for k in range(m + n)]
+        col_ind = [k in j or k - n in i for k in range(m + n)]
+        return C[row_ind, :][:, col_ind]
+
+    def reduce_C(C, i, j, m, n):
+        # assert(C.shape == (m + n, m + n))
+        row_ind = [k not in i and k - m not in j for k in range(m + n)]
+        col_ind = [k not in j and k - n not in i for k in range(m + n)]
+        return C[row_ind, :][:, col_ind]
+
+    def reduce_ind(ind, i):
+        # assert set(ind) == set(range(len(ind)))
+        rind = ind[[k not in i for k in ind]]
+        for k in set(i):
+            rind[rind >= k] -= 1
+        return rind
+
+    def match_edges(u, v, pending_g, pending_h, Ce, matched_uv=None):
+        """
+        Parameters:
+            u, v: matched vertices, u=None or v=None for
+               deletion/insertion
+            pending_g, pending_h: lists of edges not yet mapped
+            Ce: CostMatrix of pending edge mappings
+            matched_uv: partial vertex edit path
+                list of tuples (u, v) of previously matched vertex
+                    mappings u<->v, u=None or v=None for
+                    deletion/insertion
+
+        Returns:
+            list of (i, j): indices of edge mappings g<->h
+            localCe: local CostMatrix of edge mappings
+                (basically submatrix of Ce at cross of rows i, cols j)
+        """
+        M = len(pending_g)
+        N = len(pending_h)
+        # assert Ce.C.shape == (M + N, M + N)
+
+        # only attempt to match edges after one node match has been made
+        # this will stop self-edges on the first node being automatically deleted
+        # even when a substitution is the better option
+        if matched_uv is None or len(matched_uv) == 0:
+            g_ind = []
+            h_ind = []
+        else:
+            g_ind = [
+                i
+                for i in range(M)
+                if pending_g[i][:2] == (u, u)
+                or any(
+                    pending_g[i][:2] in ((p, u), (u, p), (p, p)) for p, q in matched_uv
+                )
+            ]
+            h_ind = [
+                j
+                for j in range(N)
+                if pending_h[j][:2] == (v, v)
+                or any(
+                    pending_h[j][:2] in ((q, v), (v, q), (q, q)) for p, q in matched_uv
+                )
+            ]
+
+        m = len(g_ind)
+        n = len(h_ind)
+
+        if m or n:
+            C = extract_C(Ce.C, g_ind, h_ind, M, N)
+            # assert C.shape == (m + n, m + n)
+
+            # Forbid structurally invalid matches
+            # NOTE: inf remembered from Ce construction
+            for k, i in enumerate(g_ind):
+                g = pending_g[i][:2]
+                for l, j in enumerate(h_ind):
+                    h = pending_h[j][:2]
+                    if nx.is_directed(G1) or nx.is_directed(G2):
+                        if any(
+                            g == (p, u) and h == (q, v) or g == (u, p) and h == (v, q)
+                            for p, q in matched_uv
+                        ):
+                            continue
+                    else:
+                        if any(
+                            g in ((p, u), (u, p)) and h in ((q, v), (v, q))
+                            for p, q in matched_uv
+                        ):
+                            continue
+                    if g == (u, u) or any(g == (p, p) for p, q in matched_uv):
+                        continue
+                    if h == (v, v) or any(h == (q, q) for p, q in matched_uv):
+                        continue
+                    C[k, l] = inf
+
+            localCe = make_CostMatrix(C, m, n)
+            ij = [
+                (
+                    g_ind[k] if k < m else M + h_ind[l],
+                    h_ind[l] if l < n else N + g_ind[k],
+                )
+                for k, l in zip(localCe.lsa_row_ind, localCe.lsa_col_ind)
+                if k < m or l < n
+            ]
+
+        else:
+            ij = []
+            localCe = CostMatrix(np.empty((0, 0)), [], [], 0)
+
+        return ij, localCe
+
+    def reduce_Ce(Ce, ij, m, n):
+        if len(ij):
+            i, j = zip(*ij)
+            m_i = m - sum(1 for t in i if t < m)
+            n_j = n - sum(1 for t in j if t < n)
+            return make_CostMatrix(reduce_C(Ce.C, i, j, m, n), m_i, n_j)
+        return Ce
+
+    def get_edit_ops(
+        matched_uv, pending_u, pending_v, Cv, pending_g, pending_h, Ce, matched_cost
+    ):
+        """
+        Parameters:
+            matched_uv: partial vertex edit path
+                list of tuples (u, v) of vertex mappings u<->v,
+                u=None or v=None for deletion/insertion
+            pending_u, pending_v: lists of vertices not yet mapped
+            Cv: CostMatrix of pending vertex mappings
+            pending_g, pending_h: lists of edges not yet mapped
+            Ce: CostMatrix of pending edge mappings
+            matched_cost: cost of partial edit path
+
+        Returns:
+            sequence of
+                (i, j): indices of vertex mapping u<->v
+                Cv_ij: reduced CostMatrix of pending vertex mappings
+                    (basically Cv with row i, col j removed)
+                list of (x, y): indices of edge mappings g<->h
+                Ce_xy: reduced CostMatrix of pending edge mappings
+                    (basically Ce with rows x, cols y removed)
+                cost: total cost of edit operation
+            NOTE: most promising ops first
+        """
+        m = len(pending_u)
+        n = len(pending_v)
+        # assert Cv.C.shape == (m + n, m + n)
+
+        # 1) a vertex mapping from optimal linear sum assignment
+        i, j = min(
+            (k, l) for k, l in zip(Cv.lsa_row_ind, Cv.lsa_col_ind) if k < m or l < n
+        )
+        xy, localCe = match_edges(
+            pending_u[i] if i < m else None,
+            pending_v[j] if j < n else None,
+            pending_g,
+            pending_h,
+            Ce,
+            matched_uv,
+        )
+        Ce_xy = reduce_Ce(Ce, xy, len(pending_g), len(pending_h))
+        # assert Ce.ls <= localCe.ls + Ce_xy.ls
+        if prune(matched_cost + Cv.ls + localCe.ls + Ce_xy.ls):
+            pass
+        else:
+            # get reduced Cv efficiently
+            Cv_ij = CostMatrix(
+                reduce_C(Cv.C, (i,), (j,), m, n),
+                reduce_ind(Cv.lsa_row_ind, (i, m + j)),
+                reduce_ind(Cv.lsa_col_ind, (j, n + i)),
+                Cv.ls - Cv.C[i, j],
+            )
+            yield (i, j), Cv_ij, xy, Ce_xy, Cv.C[i, j] + localCe.ls
+
+        # 2) other candidates, sorted by lower-bound cost estimate
+        other = []
+        fixed_i, fixed_j = i, j
+        if m <= n:
+            candidates = (
+                (t, fixed_j)
+                for t in range(m + n)
+                if t != fixed_i and (t < m or t == m + fixed_j)
+            )
+        else:
+            candidates = (
+                (fixed_i, t)
+                for t in range(m + n)
+                if t != fixed_j and (t < n or t == n + fixed_i)
+            )
+        for i, j in candidates:
+            if prune(matched_cost + Cv.C[i, j] + Ce.ls):
+                continue
+            Cv_ij = make_CostMatrix(
+                reduce_C(Cv.C, (i,), (j,), m, n),
+                m - 1 if i < m else m,
+                n - 1 if j < n else n,
+            )
+            # assert Cv.ls <= Cv.C[i, j] + Cv_ij.ls
+            if prune(matched_cost + Cv.C[i, j] + Cv_ij.ls + Ce.ls):
+                continue
+            xy, localCe = match_edges(
+                pending_u[i] if i < m else None,
+                pending_v[j] if j < n else None,
+                pending_g,
+                pending_h,
+                Ce,
+                matched_uv,
+            )
+            if prune(matched_cost + Cv.C[i, j] + Cv_ij.ls + localCe.ls):
+                continue
+            Ce_xy = reduce_Ce(Ce, xy, len(pending_g), len(pending_h))
+            # assert Ce.ls <= localCe.ls + Ce_xy.ls
+            if prune(matched_cost + Cv.C[i, j] + Cv_ij.ls + localCe.ls + Ce_xy.ls):
+                continue
+            other.append(((i, j), Cv_ij, xy, Ce_xy, Cv.C[i, j] + localCe.ls))
+
+        yield from sorted(other, key=lambda t: t[4] + t[1].ls + t[3].ls)
+
+    def get_edit_paths(
+        matched_uv,
+        pending_u,
+        pending_v,
+        Cv,
+        matched_gh,
+        pending_g,
+        pending_h,
+        Ce,
+        matched_cost,
+    ):
+        """
+        Parameters:
+            matched_uv: partial vertex edit path
+                list of tuples (u, v) of vertex mappings u<->v,
+                u=None or v=None for deletion/insertion
+            pending_u, pending_v: lists of vertices not yet mapped
+            Cv: CostMatrix of pending vertex mappings
+            matched_gh: partial edge edit path
+                list of tuples (g, h) of edge mappings g<->h,
+                g=None or h=None for deletion/insertion
+            pending_g, pending_h: lists of edges not yet mapped
+            Ce: CostMatrix of pending edge mappings
+            matched_cost: cost of partial edit path
+
+        Returns:
+            sequence of (vertex_path, edge_path, cost)
+                vertex_path: complete vertex edit path
+                    list of tuples (u, v) of vertex mappings u<->v,
+                    u=None or v=None for deletion/insertion
+                edge_path: complete edge edit path
+                    list of tuples (g, h) of edge mappings g<->h,
+                    g=None or h=None for deletion/insertion
+                cost: total cost of edit path
+            NOTE: path costs are non-increasing
+        """
+        # debug_print('matched-uv:', matched_uv)
+        # debug_print('matched-gh:', matched_gh)
+        # debug_print('matched-cost:', matched_cost)
+        # debug_print('pending-u:', pending_u)
+        # debug_print('pending-v:', pending_v)
+        # debug_print(Cv.C)
+        # assert list(sorted(G1.nodes)) == list(sorted(list(u for u, v in matched_uv if u is not None) + pending_u))
+        # assert list(sorted(G2.nodes)) == list(sorted(list(v for u, v in matched_uv if v is not None) + pending_v))
+        # debug_print('pending-g:', pending_g)
+        # debug_print('pending-h:', pending_h)
+        # debug_print(Ce.C)
+        # assert list(sorted(G1.edges)) == list(sorted(list(g for g, h in matched_gh if g is not None) + pending_g))
+        # assert list(sorted(G2.edges)) == list(sorted(list(h for g, h in matched_gh if h is not None) + pending_h))
+        # debug_print()
+
+        if prune(matched_cost + Cv.ls + Ce.ls):
+            return
+
+        if not max(len(pending_u), len(pending_v)):
+            # assert not len(pending_g)
+            # assert not len(pending_h)
+            # path completed!
+            # assert matched_cost <= maxcost_value
+            nonlocal maxcost_value
+            maxcost_value = min(maxcost_value, matched_cost)
+            yield matched_uv, matched_gh, matched_cost
+
+        else:
+            edit_ops = get_edit_ops(
+                matched_uv,
+                pending_u,
+                pending_v,
+                Cv,
+                pending_g,
+                pending_h,
+                Ce,
+                matched_cost,
+            )
+            for ij, Cv_ij, xy, Ce_xy, edit_cost in edit_ops:
+                i, j = ij
+                # assert Cv.C[i, j] + sum(Ce.C[t] for t in xy) == edit_cost
+                if prune(matched_cost + edit_cost + Cv_ij.ls + Ce_xy.ls):
+                    continue
+
+                # dive deeper
+                u = pending_u.pop(i) if i < len(pending_u) else None
+                v = pending_v.pop(j) if j < len(pending_v) else None
+                matched_uv.append((u, v))
+                for x, y in xy:
+                    len_g = len(pending_g)
+                    len_h = len(pending_h)
+                    matched_gh.append(
+                        (
+                            pending_g[x] if x < len_g else None,
+                            pending_h[y] if y < len_h else None,
+                        )
+                    )
+                sortedx = sorted(x for x, y in xy)
+                sortedy = sorted(y for x, y in xy)
+                G = [
+                    (pending_g.pop(x) if x < len(pending_g) else None)
+                    for x in reversed(sortedx)
+                ]
+                H = [
+                    (pending_h.pop(y) if y < len(pending_h) else None)
+                    for y in reversed(sortedy)
+                ]
+
+                yield from get_edit_paths(
+                    matched_uv,
+                    pending_u,
+                    pending_v,
+                    Cv_ij,
+                    matched_gh,
+                    pending_g,
+                    pending_h,
+                    Ce_xy,
+                    matched_cost + edit_cost,
+                )
+
+                # backtrack
+                if u is not None:
+                    pending_u.insert(i, u)
+                if v is not None:
+                    pending_v.insert(j, v)
+                matched_uv.pop()
+                for x, g in zip(sortedx, reversed(G)):
+                    if g is not None:
+                        pending_g.insert(x, g)
+                for y, h in zip(sortedy, reversed(H)):
+                    if h is not None:
+                        pending_h.insert(y, h)
+                for _ in xy:
+                    matched_gh.pop()
+
+    # Initialization
+
+    pending_u = list(G1.nodes)
+    pending_v = list(G2.nodes)
+
+    initial_cost = 0
+    if roots:
+        root_u, root_v = roots
+        if root_u not in pending_u or root_v not in pending_v:
+            raise nx.NodeNotFound("Root node not in graph.")
+
+        # remove roots from pending
+        pending_u.remove(root_u)
+        pending_v.remove(root_v)
+
+    # cost matrix of vertex mappings
+    m = len(pending_u)
+    n = len(pending_v)
+    C = np.zeros((m + n, m + n))
+    if node_subst_cost:
+        C[0:m, 0:n] = np.array(
+            [
+                node_subst_cost(G1.nodes[u], G2.nodes[v])
+                for u in pending_u
+                for v in pending_v
+            ]
+        ).reshape(m, n)
+        if roots:
+            initial_cost = node_subst_cost(G1.nodes[root_u], G2.nodes[root_v])
+    elif node_match:
+        C[0:m, 0:n] = np.array(
+            [
+                1 - int(node_match(G1.nodes[u], G2.nodes[v]))
+                for u in pending_u
+                for v in pending_v
+            ]
+        ).reshape(m, n)
+        if roots:
+            initial_cost = 1 - node_match(G1.nodes[root_u], G2.nodes[root_v])
+    else:
+        # all zeroes
+        pass
+    # assert not min(m, n) or C[0:m, 0:n].min() >= 0
+    if node_del_cost:
+        del_costs = [node_del_cost(G1.nodes[u]) for u in pending_u]
+    else:
+        del_costs = [1] * len(pending_u)
+    # assert not m or min(del_costs) >= 0
+    if node_ins_cost:
+        ins_costs = [node_ins_cost(G2.nodes[v]) for v in pending_v]
+    else:
+        ins_costs = [1] * len(pending_v)
+    # assert not n or min(ins_costs) >= 0
+    inf = C[0:m, 0:n].sum() + sum(del_costs) + sum(ins_costs) + 1
+    C[0:m, n : n + m] = np.array(
+        [del_costs[i] if i == j else inf for i in range(m) for j in range(m)]
+    ).reshape(m, m)
+    C[m : m + n, 0:n] = np.array(
+        [ins_costs[i] if i == j else inf for i in range(n) for j in range(n)]
+    ).reshape(n, n)
+    Cv = make_CostMatrix(C, m, n)
+    # debug_print(f"Cv: {m} x {n}")
+    # debug_print(Cv.C)
+
+    pending_g = list(G1.edges)
+    pending_h = list(G2.edges)
+
+    # cost matrix of edge mappings
+    m = len(pending_g)
+    n = len(pending_h)
+    C = np.zeros((m + n, m + n))
+    if edge_subst_cost:
+        C[0:m, 0:n] = np.array(
+            [
+                edge_subst_cost(G1.edges[g], G2.edges[h])
+                for g in pending_g
+                for h in pending_h
+            ]
+        ).reshape(m, n)
+    elif edge_match:
+        C[0:m, 0:n] = np.array(
+            [
+                1 - int(edge_match(G1.edges[g], G2.edges[h]))
+                for g in pending_g
+                for h in pending_h
+            ]
+        ).reshape(m, n)
+    else:
+        # all zeroes
+        pass
+    # assert not min(m, n) or C[0:m, 0:n].min() >= 0
+    if edge_del_cost:
+        del_costs = [edge_del_cost(G1.edges[g]) for g in pending_g]
+    else:
+        del_costs = [1] * len(pending_g)
+    # assert not m or min(del_costs) >= 0
+    if edge_ins_cost:
+        ins_costs = [edge_ins_cost(G2.edges[h]) for h in pending_h]
+    else:
+        ins_costs = [1] * len(pending_h)
+    # assert not n or min(ins_costs) >= 0
+    inf = C[0:m, 0:n].sum() + sum(del_costs) + sum(ins_costs) + 1
+    C[0:m, n : n + m] = np.array(
+        [del_costs[i] if i == j else inf for i in range(m) for j in range(m)]
+    ).reshape(m, m)
+    C[m : m + n, 0:n] = np.array(
+        [ins_costs[i] if i == j else inf for i in range(n) for j in range(n)]
+    ).reshape(n, n)
+    Ce = make_CostMatrix(C, m, n)
+    # debug_print(f'Ce: {m} x {n}')
+    # debug_print(Ce.C)
+    # debug_print()
+
+    maxcost_value = Cv.C.sum() + Ce.C.sum() + 1
+
+    if timeout is not None:
+        if timeout <= 0:
+            raise nx.NetworkXError("Timeout value must be greater than 0")
+        start = time.perf_counter()
+
+    def prune(cost):
+        if timeout is not None:
+            if time.perf_counter() - start > timeout:
+                return True
+        if upper_bound is not None:
+            if cost > upper_bound:
+                return True
+        if cost > maxcost_value:
+            return True
+        if strictly_decreasing and cost >= maxcost_value:
+            return True
+        return False
+
+    # Now go!
+
+    done_uv = [] if roots is None else [roots]
+
+    for vertex_path, edge_path, cost in get_edit_paths(
+        done_uv, pending_u, pending_v, Cv, [], pending_g, pending_h, Ce, initial_cost
+    ):
+        # assert sorted(G1.nodes) == sorted(u for u, v in vertex_path if u is not None)
+        # assert sorted(G2.nodes) == sorted(v for u, v in vertex_path if v is not None)
+        # assert sorted(G1.edges) == sorted(g for g, h in edge_path if g is not None)
+        # assert sorted(G2.edges) == sorted(h for g, h in edge_path if h is not None)
+        # print(vertex_path, edge_path, cost, file = sys.stderr)
+        # assert cost == maxcost_value
+        yield list(vertex_path), list(edge_path), float(cost)


 @nx._dispatchable
-def simrank_similarity(G, source=None, target=None, importance_factor=0.9,
-    max_iterations=1000, tolerance=0.0001):
+def simrank_similarity(
+    G,
+    source=None,
+    target=None,
+    importance_factor=0.9,
+    max_iterations=1000,
+    tolerance=1e-4,
+):
     """Returns the SimRank similarity of nodes in the graph ``G``.

     SimRank is a similarity metric that says "two objects are considered
@@ -683,11 +1326,45 @@ def simrank_similarity(G, source=None, target=None, importance_factor=0.9,
            International Conference on Knowledge Discovery and Data Mining,
            pp. 538--543. ACM Press, 2002.
     """
-    pass
-
-
-def _simrank_similarity_python(G, source=None, target=None,
-    importance_factor=0.9, max_iterations=1000, tolerance=0.0001):
+    import numpy as np
+
+    nodelist = list(G)
+    if source is not None:
+        if source not in nodelist:
+            raise nx.NodeNotFound(f"Source node {source} not in G")
+        else:
+            s_indx = nodelist.index(source)
+    else:
+        s_indx = None
+
+    if target is not None:
+        if target not in nodelist:
+            raise nx.NodeNotFound(f"Target node {target} not in G")
+        else:
+            t_indx = nodelist.index(target)
+    else:
+        t_indx = None
+
+    x = _simrank_similarity_numpy(
+        G, s_indx, t_indx, importance_factor, max_iterations, tolerance
+    )
+
+    if isinstance(x, np.ndarray):
+        if x.ndim == 1:
+            return dict(zip(G, x.tolist()))
+        # else x.ndim == 2
+        return {u: dict(zip(G, row)) for u, row in zip(G, x.tolist())}
+    return float(x)
+
+
+def _simrank_similarity_python(
+    G,
+    source=None,
+    target=None,
+    importance_factor=0.9,
+    max_iterations=1000,
+    tolerance=1e-4,
+):
     """Returns the SimRank similarity of nodes in the graph ``G``.

     This pure Python version is provided for pedagogical purposes.
@@ -702,11 +1379,52 @@ def _simrank_similarity_python(G, source=None, target=None,
     >>> nx.similarity._simrank_similarity_python(G, source=0, target=0)
     1
     """
-    pass
-
-
-def _simrank_similarity_numpy(G, source=None, target=None,
-    importance_factor=0.9, max_iterations=1000, tolerance=0.0001):
+    # build up our similarity adjacency dictionary output
+    newsim = {u: {v: 1 if u == v else 0 for v in G} for u in G}
+
+    # These functions compute the update to the similarity value of the nodes
+    # `u` and `v` with respect to the previous similarity values.
+    def avg_sim(s):
+        return sum(newsim[w][x] for (w, x) in s) / len(s) if s else 0.0
+
+    Gadj = G.pred if G.is_directed() else G.adj
+
+    def sim(u, v):
+        return importance_factor * avg_sim(list(product(Gadj[u], Gadj[v])))
+
+    for its in range(max_iterations):
+        oldsim = newsim
+        newsim = {u: {v: sim(u, v) if u != v else 1 for v in G} for u in G}
+        is_close = all(
+            all(
+                abs(newsim[u][v] - old) <= tolerance * (1 + abs(old))
+                for v, old in nbrs.items()
+            )
+            for u, nbrs in oldsim.items()
+        )
+        if is_close:
+            break
+
+    if its + 1 == max_iterations:
+        raise nx.ExceededMaxIterations(
+            f"simrank did not converge after {max_iterations} iterations."
+        )
+
+    if source is not None and target is not None:
+        return newsim[source][target]
+    if source is not None:
+        return newsim[source]
+    return newsim
+
+
+def _simrank_similarity_numpy(
+    G,
+    source=None,
+    target=None,
+    importance_factor=0.9,
+    max_iterations=1000,
+    tolerance=1e-4,
+):
     """Calculate SimRank of nodes in ``G`` using matrices with ``numpy``.

     The SimRank algorithm for determining node similarity is defined in
@@ -772,13 +1490,47 @@ def _simrank_similarity_numpy(G, source=None, target=None,
            International Conference on Knowledge Discovery and Data Mining,
            pp. 538--543. ACM Press, 2002.
     """
-    pass
-
-
-@nx._dispatchable(edge_attrs='weight')
-def panther_similarity(G, source, k=5, path_length=5, c=0.5, delta=0.1, eps
-    =None, weight='weight'):
-    """Returns the Panther similarity of nodes in the graph `G` to node ``v``.
+    # This algorithm follows roughly
+    #
+    #     S = max{C * (A.T * S * A), I}
+    #
+    # where C is the importance factor, A is the column normalized
+    # adjacency matrix, and I is the identity matrix.
+    import numpy as np
+
+    adjacency_matrix = nx.to_numpy_array(G)
+
+    # column-normalize the ``adjacency_matrix``
+    s = np.array(adjacency_matrix.sum(axis=0))
+    s[s == 0] = 1
+    adjacency_matrix /= s  # adjacency_matrix.sum(axis=0)
+
+    newsim = np.eye(len(G), dtype=np.float64)
+    for its in range(max_iterations):
+        prevsim = newsim.copy()
+        newsim = importance_factor * ((adjacency_matrix.T @ prevsim) @ adjacency_matrix)
+        np.fill_diagonal(newsim, 1.0)
+
+        if np.allclose(prevsim, newsim, atol=tolerance):
+            break
+
+    if its + 1 == max_iterations:
+        raise nx.ExceededMaxIterations(
+            f"simrank did not converge after {max_iterations} iterations."
+        )
+
+    if source is not None and target is not None:
+        return float(newsim[source, target])
+    if source is not None:
+        return newsim[source]
+    return newsim
+
+
+@nx._dispatchable(edge_attrs="weight")
+def panther_similarity(
+    G, source, k=5, path_length=5, c=0.5, delta=0.1, eps=None, weight="weight"
+):
+    r"""Returns the Panther similarity of nodes in the graph `G` to node ``v``.

     Panther is a similarity metric that says "two objects are considered
     to be similar if they frequently appear on the same paths." [1]_.
@@ -798,8 +1550,8 @@ def panther_similarity(G, source, k=5, path_length=5, c=0.5, delta=0.1, eps
         of sample random paths to generate.
     delta : float (default = 0.1)
         The probability that the similarity $S$ is not an epsilon-approximation to (R, phi),
-        where $R$ is the number of random paths and $\\phi$ is the probability
-        that an element sampled from a set $A \\subseteq D$, where $D$ is the domain.
+        where $R$ is the number of random paths and $\phi$ is the probability
+        that an element sampled from a set $A \subseteq D$, where $D$ is the domain.
     eps : float or None (default = None)
         The error bound. Per [1]_, a good value is ``sqrt(1/|E|)``. Therefore,
         if no value is provided, the recommended computed value will be used.
@@ -840,13 +1592,82 @@ def panther_similarity(G, source, k=5, path_length=5, c=0.5, delta=0.1, eps
            on Knowledge Discovery and Data Mining (Vol. 2015-August, pp. 1445–1454).
            Association for Computing Machinery. https://doi.org/10.1145/2783258.2783267.
     """
-    pass
+    import numpy as np
+
+    if source not in G:
+        raise nx.NodeNotFound(f"Source node {source} not in G")
+
+    isolates = set(nx.isolates(G))
+
+    if source in isolates:
+        raise nx.NetworkXUnfeasible(
+            f"Panther similarity is not defined for the isolated source node {source}."
+        )
+
+    G = G.subgraph([node for node in G.nodes if node not in isolates]).copy()
+
+    num_nodes = G.number_of_nodes()
+    if num_nodes < k:
+        warnings.warn(
+            f"Number of nodes is {num_nodes}, but requested k is {k}. "
+            "Setting k to number of nodes."
+        )
+        k = num_nodes
+    # According to [1], they empirically determined
+    # a good value for ``eps`` to be sqrt( 1 / |E| )
+    if eps is None:
+        eps = np.sqrt(1.0 / G.number_of_edges())
+
+    inv_node_map = {name: index for index, name in enumerate(G.nodes)}
+    node_map = np.array(G)
+
+    # Calculate the sample size ``R`` for how many paths
+    # to randomly generate
+    t_choose_2 = math.comb(path_length, 2)
+    sample_size = int((c / eps**2) * (np.log2(t_choose_2) + 1 + np.log(1 / delta)))
+    index_map = {}
+    _ = list(
+        generate_random_paths(
+            G, sample_size, path_length=path_length, index_map=index_map, weight=weight
+        )
+    )
+    S = np.zeros(num_nodes)
+
+    inv_sample_size = 1 / sample_size
+
+    source_paths = set(index_map[source])
+
+    # Calculate the path similarities
+    # between ``source`` (v) and ``node`` (v_j)
+    # using our inverted index mapping of
+    # vertices to paths
+    for node, paths in index_map.items():
+        # Only consider paths where both
+        # ``node`` and ``source`` are present
+        common_paths = source_paths.intersection(paths)
+        S[inv_node_map[node]] = len(common_paths) * inv_sample_size
+
+    # Retrieve top ``k`` similar
+    # Note: the below performed anywhere from 4-10x faster
+    # (depending on input sizes) vs the equivalent ``np.argsort(S)[::-1]``
+    top_k_unsorted = np.argpartition(S, -k)[-k:]
+    top_k_sorted = top_k_unsorted[np.argsort(S[top_k_unsorted])][::-1]
+
+    # Add back the similarity scores
+    top_k_with_val = dict(
+        zip(node_map[top_k_sorted].tolist(), S[top_k_sorted].tolist())
+    )
+
+    # Remove the self-similarity
+    top_k_with_val.pop(source, None)
+    return top_k_with_val


 @np_random_state(5)
-@nx._dispatchable(edge_attrs='weight')
-def generate_random_paths(G, sample_size, path_length=5, index_map=None,
-    weight='weight', seed=None):
+@nx._dispatchable(edge_attrs="weight")
+def generate_random_paths(
+    G, sample_size, path_length=5, index_map=None, weight="weight", seed=None
+):
     """Randomly generate `sample_size` paths of length `path_length`.

     Parameters
@@ -900,4 +1721,57 @@ def generate_random_paths(G, sample_size, path_length=5, index_map=None,
            on Knowledge Discovery and Data Mining (Vol. 2015-August, pp. 1445–1454).
            Association for Computing Machinery. https://doi.org/10.1145/2783258.2783267.
     """
-    pass
+    import numpy as np
+
+    randint_fn = (
+        seed.integers if isinstance(seed, np.random.Generator) else seed.randint
+    )
+
+    # Calculate transition probabilities between
+    # every pair of vertices according to Eq. (3)
+    adj_mat = nx.to_numpy_array(G, weight=weight)
+    inv_row_sums = np.reciprocal(adj_mat.sum(axis=1)).reshape(-1, 1)
+    transition_probabilities = adj_mat * inv_row_sums
+
+    node_map = list(G)
+    num_nodes = G.number_of_nodes()
+
+    for path_index in range(sample_size):
+        # Sample current vertex v = v_i uniformly at random
+        node_index = randint_fn(num_nodes)
+        node = node_map[node_index]
+
+        # Add v into p_r and add p_r into the path set
+        # of v, i.e., P_v
+        path = [node]
+
+        # Build the inverted index (P_v) of vertices to paths
+        if index_map is not None:
+            if node in index_map:
+                index_map[node].add(path_index)
+            else:
+                index_map[node] = {path_index}
+
+        starting_index = node_index
+        for _ in range(path_length):
+            # Randomly sample a neighbor (v_j) according
+            # to transition probabilities from ``node`` (v) to its neighbors
+            nbr_index = seed.choice(
+                num_nodes, p=transition_probabilities[starting_index]
+            )
+
+            # Set current vertex (v = v_j)
+            starting_index = nbr_index
+
+            # Add v into p_r
+            nbr_node = node_map[nbr_index]
+            path.append(nbr_node)
+
+            # Add p_r into P_v
+            if index_map is not None:
+                if nbr_node in index_map:
+                    index_map[nbr_node].add(path_index)
+                else:
+                    index_map[nbr_node] = {path_index}
+
+        yield path
diff --git a/networkx/algorithms/simple_paths.py b/networkx/algorithms/simple_paths.py
index c96d21ae5..1bd2feb70 100644
--- a/networkx/algorithms/simple_paths.py
+++ b/networkx/algorithms/simple_paths.py
@@ -1,10 +1,16 @@
 from heapq import heappop, heappush
 from itertools import count
+
 import networkx as nx
 from networkx.algorithms.shortest_paths.weighted import _weight_function
 from networkx.utils import not_implemented_for, pairwise
-__all__ = ['all_simple_paths', 'is_simple_path', 'shortest_simple_paths',
-    'all_simple_edge_paths']
+
+__all__ = [
+    "all_simple_paths",
+    "is_simple_path",
+    "shortest_simple_paths",
+    "all_simple_edge_paths",
+]


 @nx._dispatchable
@@ -62,7 +68,27 @@ def is_simple_path(G, nodes):
     False

     """
-    pass
+    # The empty list is not a valid path. Could also return
+    # NetworkXPointlessConcept here.
+    if len(nodes) == 0:
+        return False
+
+    # If the list is a single node, just check that the node is actually
+    # in the graph.
+    if len(nodes) == 1:
+        return nodes[0] in G
+
+    # check that all nodes in the list are in the graph, if at least one
+    # is not in the graph, then this is not a simple path
+    if not all(n in G for n in nodes):
+        return False
+
+    # If the list contains repeated nodes, then it's not a simple path
+    if len(set(nodes)) != len(nodes):
+        return False
+
+    # Test that each adjacent pair of nodes is adjacent.
+    return all(v in G[u] for u, v in pairwise(nodes))


 @nx._dispatchable
@@ -227,7 +253,8 @@ def all_simple_paths(G, source, target, cutoff=None):
     all_shortest_paths, shortest_path, has_path

     """
-    pass
+    for edge_path in all_simple_edge_paths(G, source, target, cutoff):
+        yield [source] + [edge[1] for edge in edge_path]


 @nx._dispatchable
@@ -315,11 +342,67 @@ def all_simple_edge_paths(G, source, target, cutoff=None):
     all_shortest_paths, shortest_path, all_simple_paths

     """
-    pass
-
-
-@not_implemented_for('multigraph')
-@nx._dispatchable(edge_attrs='weight')
+    if source not in G:
+        raise nx.NodeNotFound(f"source node {source} not in graph")
+
+    if target in G:
+        targets = {target}
+    else:
+        try:
+            targets = set(target)
+        except TypeError as err:
+            raise nx.NodeNotFound(f"target node {target} not in graph") from err
+
+    cutoff = cutoff if cutoff is not None else len(G) - 1
+
+    if cutoff >= 0 and targets:
+        yield from _all_simple_edge_paths(G, source, targets, cutoff)
+
+
+def _all_simple_edge_paths(G, source, targets, cutoff):
+    # We simulate recursion with a stack, keeping the current path being explored
+    # and the outgoing edge iterators at each point in the stack.
+    # To avoid unnecessary checks, the loop is structured in a way such that a path
+    # is considered for yielding only after a new node/edge is added.
+    # We bootstrap the search by adding a dummy iterator to the stack that only yields
+    # a dummy edge to source (so that the trivial path has a chance of being included).
+
+    get_edges = (
+        (lambda node: G.edges(node, keys=True))
+        if G.is_multigraph()
+        else (lambda node: G.edges(node))
+    )
+
+    # The current_path is a dictionary that maps nodes in the path to the edge that was
+    # used to enter that node (instead of a list of edges) because we want both a fast
+    # membership test for nodes in the path and the preservation of insertion order.
+    current_path = {None: None}
+    stack = [iter([(None, source)])]
+
+    while stack:
+        # 1. Try to extend the current path.
+        next_edge = next((e for e in stack[-1] if e[1] not in current_path), None)
+        if next_edge is None:
+            # All edges of the last node in the current path have been explored.
+            stack.pop()
+            current_path.popitem()
+            continue
+        previous_node, next_node, *_ = next_edge
+
+        # 2. Check if we've reached a target.
+        if next_node in targets:
+            yield (list(current_path.values()) + [next_edge])[2:]  # remove dummy edge
+
+        # 3. Only expand the search through the next node if it makes sense.
+        if len(current_path) - 1 < cutoff and (
+            targets - current_path.keys() - {next_node}
+        ):
+            current_path[next_node] = next_edge
+            stack.append(iter(get_edges(next_node)))
+
+
+@not_implemented_for("multigraph")
+@nx._dispatchable(edge_attrs="weight")
 def shortest_simple_paths(G, source, target, weight=None):
     """Generate all simple paths in the graph G from source to target,
        starting from shortest ones.
@@ -407,11 +490,66 @@ def shortest_simple_paths(G, source, target, weight=None):
        (Jul., 1971), pp. 712-716.

     """
-    pass
+    if source not in G:
+        raise nx.NodeNotFound(f"source node {source} not in graph")
+
+    if target not in G:
+        raise nx.NodeNotFound(f"target node {target} not in graph")
+
+    if weight is None:
+        length_func = len
+        shortest_path_func = _bidirectional_shortest_path
+    else:
+        wt = _weight_function(G, weight)
+
+        def length_func(path):
+            return sum(
+                wt(u, v, G.get_edge_data(u, v)) for (u, v) in zip(path, path[1:])
+            )
+
+        shortest_path_func = _bidirectional_dijkstra
+
+    listA = []
+    listB = PathBuffer()
+    prev_path = None
+    while True:
+        if not prev_path:
+            length, path = shortest_path_func(G, source, target, weight=weight)
+            listB.push(length, path)
+        else:
+            ignore_nodes = set()
+            ignore_edges = set()
+            for i in range(1, len(prev_path)):
+                root = prev_path[:i]
+                root_length = length_func(root)
+                for path in listA:
+                    if path[:i] == root:
+                        ignore_edges.add((path[i - 1], path[i]))
+                try:
+                    length, spur = shortest_path_func(
+                        G,
+                        root[-1],
+                        target,
+                        ignore_nodes=ignore_nodes,
+                        ignore_edges=ignore_edges,
+                        weight=weight,
+                    )
+                    path = root[:-1] + spur
+                    listB.push(root_length + length, path)
+                except nx.NetworkXNoPath:
+                    pass
+                ignore_nodes.add(root[-1])
+
+        if listB:
+            path = listB.pop()
+            yield path
+            listA.append(path)
+            prev_path = path
+        else:
+            break


 class PathBuffer:
-
     def __init__(self):
         self.paths = set()
         self.sortedpaths = []
@@ -420,9 +558,22 @@ class PathBuffer:
     def __len__(self):
         return len(self.sortedpaths)

+    def push(self, cost, path):
+        hashable_path = tuple(path)
+        if hashable_path not in self.paths:
+            heappush(self.sortedpaths, (cost, next(self.counter), path))
+            self.paths.add(hashable_path)
+
+    def pop(self):
+        (cost, num, path) = heappop(self.sortedpaths)
+        hashable_path = tuple(path)
+        self.paths.remove(hashable_path)
+        return path

-def _bidirectional_shortest_path(G, source, target, ignore_nodes=None,
-    ignore_edges=None, weight=None):
+
+def _bidirectional_shortest_path(
+    G, source, target, ignore_nodes=None, ignore_edges=None, weight=None
+):
     """Returns the shortest path between source and target ignoring
        nodes and edges in the containers ignore_nodes and ignore_edges.

@@ -464,21 +615,133 @@ def _bidirectional_shortest_path(G, source, target, ignore_nodes=None,
     shortest_path

     """
-    pass
-
-
-def _bidirectional_pred_succ(G, source, target, ignore_nodes=None,
-    ignore_edges=None):
+    # call helper to do the real work
+    results = _bidirectional_pred_succ(G, source, target, ignore_nodes, ignore_edges)
+    pred, succ, w = results
+
+    # build path from pred+w+succ
+    path = []
+    # from w to target
+    while w is not None:
+        path.append(w)
+        w = succ[w]
+    # from source to w
+    w = pred[path[0]]
+    while w is not None:
+        path.insert(0, w)
+        w = pred[w]
+
+    return len(path), path
+
+
+def _bidirectional_pred_succ(G, source, target, ignore_nodes=None, ignore_edges=None):
     """Bidirectional shortest path helper.
     Returns (pred,succ,w) where
     pred is a dictionary of predecessors from w to the source, and
     succ is a dictionary of successors from w to the target.
     """
-    pass
-
-
-def _bidirectional_dijkstra(G, source, target, weight='weight',
-    ignore_nodes=None, ignore_edges=None):
+    # does BFS from both source and target and meets in the middle
+    if ignore_nodes and (source in ignore_nodes or target in ignore_nodes):
+        raise nx.NetworkXNoPath(f"No path between {source} and {target}.")
+    if target == source:
+        return ({target: None}, {source: None}, source)
+
+    # handle either directed or undirected
+    if G.is_directed():
+        Gpred = G.predecessors
+        Gsucc = G.successors
+    else:
+        Gpred = G.neighbors
+        Gsucc = G.neighbors
+
+    # support optional nodes filter
+    if ignore_nodes:
+
+        def filter_iter(nodes):
+            def iterate(v):
+                for w in nodes(v):
+                    if w not in ignore_nodes:
+                        yield w
+
+            return iterate
+
+        Gpred = filter_iter(Gpred)
+        Gsucc = filter_iter(Gsucc)
+
+    # support optional edges filter
+    if ignore_edges:
+        if G.is_directed():
+
+            def filter_pred_iter(pred_iter):
+                def iterate(v):
+                    for w in pred_iter(v):
+                        if (w, v) not in ignore_edges:
+                            yield w
+
+                return iterate
+
+            def filter_succ_iter(succ_iter):
+                def iterate(v):
+                    for w in succ_iter(v):
+                        if (v, w) not in ignore_edges:
+                            yield w
+
+                return iterate
+
+            Gpred = filter_pred_iter(Gpred)
+            Gsucc = filter_succ_iter(Gsucc)
+
+        else:
+
+            def filter_iter(nodes):
+                def iterate(v):
+                    for w in nodes(v):
+                        if (v, w) not in ignore_edges and (w, v) not in ignore_edges:
+                            yield w
+
+                return iterate
+
+            Gpred = filter_iter(Gpred)
+            Gsucc = filter_iter(Gsucc)
+
+    # predecessor and successors in search
+    pred = {source: None}
+    succ = {target: None}
+
+    # initialize fringes, start with forward
+    forward_fringe = [source]
+    reverse_fringe = [target]
+
+    while forward_fringe and reverse_fringe:
+        if len(forward_fringe) <= len(reverse_fringe):
+            this_level = forward_fringe
+            forward_fringe = []
+            for v in this_level:
+                for w in Gsucc(v):
+                    if w not in pred:
+                        forward_fringe.append(w)
+                        pred[w] = v
+                    if w in succ:
+                        # found path
+                        return pred, succ, w
+        else:
+            this_level = reverse_fringe
+            reverse_fringe = []
+            for v in this_level:
+                for w in Gpred(v):
+                    if w not in succ:
+                        succ[w] = v
+                        reverse_fringe.append(w)
+                    if w in pred:
+                        # found path
+                        return pred, succ, w
+
+    raise nx.NetworkXNoPath(f"No path between {source} and {target}.")
+
+
+def _bidirectional_dijkstra(
+    G, source, target, weight="weight", ignore_nodes=None, ignore_edges=None
+):
     """Dijkstra's algorithm for shortest paths using bidirectional search.

     This function returns the shortest path between source and target
@@ -545,4 +808,130 @@ def _bidirectional_dijkstra(G, source, target, weight='weight',
     shortest_path
     shortest_path_length
     """
-    pass
+    if ignore_nodes and (source in ignore_nodes or target in ignore_nodes):
+        raise nx.NetworkXNoPath(f"No path between {source} and {target}.")
+    if source == target:
+        if source not in G:
+            raise nx.NodeNotFound(f"Node {source} not in graph")
+        return (0, [source])
+
+    # handle either directed or undirected
+    if G.is_directed():
+        Gpred = G.predecessors
+        Gsucc = G.successors
+    else:
+        Gpred = G.neighbors
+        Gsucc = G.neighbors
+
+    # support optional nodes filter
+    if ignore_nodes:
+
+        def filter_iter(nodes):
+            def iterate(v):
+                for w in nodes(v):
+                    if w not in ignore_nodes:
+                        yield w
+
+            return iterate
+
+        Gpred = filter_iter(Gpred)
+        Gsucc = filter_iter(Gsucc)
+
+    # support optional edges filter
+    if ignore_edges:
+        if G.is_directed():
+
+            def filter_pred_iter(pred_iter):
+                def iterate(v):
+                    for w in pred_iter(v):
+                        if (w, v) not in ignore_edges:
+                            yield w
+
+                return iterate
+
+            def filter_succ_iter(succ_iter):
+                def iterate(v):
+                    for w in succ_iter(v):
+                        if (v, w) not in ignore_edges:
+                            yield w
+
+                return iterate
+
+            Gpred = filter_pred_iter(Gpred)
+            Gsucc = filter_succ_iter(Gsucc)
+
+        else:
+
+            def filter_iter(nodes):
+                def iterate(v):
+                    for w in nodes(v):
+                        if (v, w) not in ignore_edges and (w, v) not in ignore_edges:
+                            yield w
+
+                return iterate
+
+            Gpred = filter_iter(Gpred)
+            Gsucc = filter_iter(Gsucc)
+
+    push = heappush
+    pop = heappop
+    # Init:   Forward             Backward
+    dists = [{}, {}]  # dictionary of final distances
+    paths = [{source: [source]}, {target: [target]}]  # dictionary of paths
+    fringe = [[], []]  # heap of (distance, node) tuples for
+    # extracting next node to expand
+    seen = [{source: 0}, {target: 0}]  # dictionary of distances to
+    # nodes seen
+    c = count()
+    # initialize fringe heap
+    push(fringe[0], (0, next(c), source))
+    push(fringe[1], (0, next(c), target))
+    # neighs for extracting correct neighbor information
+    neighs = [Gsucc, Gpred]
+    # variables to hold shortest discovered path
+    # finaldist = 1e30000
+    finalpath = []
+    dir = 1
+    while fringe[0] and fringe[1]:
+        # choose direction
+        # dir == 0 is forward direction and dir == 1 is back
+        dir = 1 - dir
+        # extract closest to expand
+        (dist, _, v) = pop(fringe[dir])
+        if v in dists[dir]:
+            # Shortest path to v has already been found
+            continue
+        # update distance
+        dists[dir][v] = dist  # equal to seen[dir][v]
+        if v in dists[1 - dir]:
+            # if we have scanned v in both directions we are done
+            # we have now discovered the shortest path
+            return (finaldist, finalpath)
+
+        wt = _weight_function(G, weight)
+        for w in neighs[dir](v):
+            if dir == 0:  # forward
+                minweight = wt(v, w, G.get_edge_data(v, w))
+                vwLength = dists[dir][v] + minweight
+            else:  # back, must remember to change v,w->w,v
+                minweight = wt(w, v, G.get_edge_data(w, v))
+                vwLength = dists[dir][v] + minweight
+
+            if w in dists[dir]:
+                if vwLength < dists[dir][w]:
+                    raise ValueError("Contradictory paths found: negative weights?")
+            elif w not in seen[dir] or vwLength < seen[dir][w]:
+                # relaxing
+                seen[dir][w] = vwLength
+                push(fringe[dir], (vwLength, next(c), w))
+                paths[dir][w] = paths[dir][v] + [w]
+                if w in seen[0] and w in seen[1]:
+                    # see if this path is better than the already
+                    # discovered shortest path
+                    totaldist = seen[0][w] + seen[1][w]
+                    if finalpath == [] or finaldist > totaldist:
+                        finaldist = totaldist
+                        revpath = paths[1][w][:]
+                        revpath.reverse()
+                        finalpath = paths[0][w] + revpath[1:]
+    raise nx.NetworkXNoPath(f"No path between {source} and {target}.")
diff --git a/networkx/algorithms/smallworld.py b/networkx/algorithms/smallworld.py
index d916d5515..05ae17082 100644
--- a/networkx/algorithms/smallworld.py
+++ b/networkx/algorithms/smallworld.py
@@ -16,11 +16,12 @@ For more information, see the Wikipedia article on small-world network [1]_.
 """
 import networkx as nx
 from networkx.utils import not_implemented_for, py_random_state
-__all__ = ['random_reference', 'lattice_reference', 'sigma', 'omega']

+__all__ = ["random_reference", "lattice_reference", "sigma", "omega"]

-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @py_random_state(3)
 @nx._dispatchable(returns_graph=True)
 def random_reference(G, niter=1, connectivity=True, seed=None):
@@ -62,11 +63,63 @@ def random_reference(G, niter=1, connectivity=True, seed=None):
            "Specificity and stability in topology of protein networks."
            Science 296.5569 (2002): 910-913.
     """
-    pass
-
-
-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+    if len(G) < 4:
+        raise nx.NetworkXError("Graph has fewer than four nodes.")
+    if len(G.edges) < 2:
+        raise nx.NetworkXError("Graph has fewer that 2 edges")
+
+    from networkx.utils import cumulative_distribution, discrete_sequence
+
+    local_conn = nx.connectivity.local_edge_connectivity
+
+    G = G.copy()
+    keys, degrees = zip(*G.degree())  # keys, degree
+    cdf = cumulative_distribution(degrees)  # cdf of degree
+    nnodes = len(G)
+    nedges = nx.number_of_edges(G)
+    niter = niter * nedges
+    ntries = int(nnodes * nedges / (nnodes * (nnodes - 1) / 2))
+    swapcount = 0
+
+    for i in range(niter):
+        n = 0
+        while n < ntries:
+            # pick two random edges without creating edge list
+            # choose source node indices from discrete distribution
+            (ai, ci) = discrete_sequence(2, cdistribution=cdf, seed=seed)
+            if ai == ci:
+                continue  # same source, skip
+            a = keys[ai]  # convert index to label
+            c = keys[ci]
+            # choose target uniformly from neighbors
+            b = seed.choice(list(G.neighbors(a)))
+            d = seed.choice(list(G.neighbors(c)))
+            if b in [a, c, d] or d in [a, b, c]:
+                continue  # all vertices should be different
+
+            # don't create parallel edges
+            if (d not in G[a]) and (b not in G[c]):
+                G.add_edge(a, d)
+                G.add_edge(c, b)
+                G.remove_edge(a, b)
+                G.remove_edge(c, d)
+
+                # Check if the graph is still connected
+                if connectivity and local_conn(G, a, b) == 0:
+                    # Not connected, revert the swap
+                    G.remove_edge(a, d)
+                    G.remove_edge(c, b)
+                    G.add_edge(a, b)
+                    G.add_edge(c, d)
+                else:
+                    swapcount += 1
+                    break
+            n += 1
+    return G
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @py_random_state(4)
 @nx._dispatchable(returns_graph=True)
 def lattice_reference(G, niter=5, D=None, connectivity=True, seed=None):
@@ -114,11 +167,83 @@ def lattice_reference(G, niter=5, D=None, connectivity=True, seed=None):
        "Specificity and stability in topology of protein networks."
        Science 296.5569 (2002): 910-913.
     """
-    pass
-
-
-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+    import numpy as np
+
+    from networkx.utils import cumulative_distribution, discrete_sequence
+
+    local_conn = nx.connectivity.local_edge_connectivity
+
+    if len(G) < 4:
+        raise nx.NetworkXError("Graph has fewer than four nodes.")
+    if len(G.edges) < 2:
+        raise nx.NetworkXError("Graph has fewer that 2 edges")
+    # Instead of choosing uniformly at random from a generated edge list,
+    # this algorithm chooses nonuniformly from the set of nodes with
+    # probability weighted by degree.
+    G = G.copy()
+    keys, degrees = zip(*G.degree())  # keys, degree
+    cdf = cumulative_distribution(degrees)  # cdf of degree
+
+    nnodes = len(G)
+    nedges = nx.number_of_edges(G)
+    if D is None:
+        D = np.zeros((nnodes, nnodes))
+        un = np.arange(1, nnodes)
+        um = np.arange(nnodes - 1, 0, -1)
+        u = np.append((0,), np.where(un < um, un, um))
+
+        for v in range(int(np.ceil(nnodes / 2))):
+            D[nnodes - v - 1, :] = np.append(u[v + 1 :], u[: v + 1])
+            D[v, :] = D[nnodes - v - 1, :][::-1]
+
+    niter = niter * nedges
+    # maximal number of rewiring attempts per 'niter'
+    max_attempts = int(nnodes * nedges / (nnodes * (nnodes - 1) / 2))
+
+    for _ in range(niter):
+        n = 0
+        while n < max_attempts:
+            # pick two random edges without creating edge list
+            # choose source node indices from discrete distribution
+            (ai, ci) = discrete_sequence(2, cdistribution=cdf, seed=seed)
+            if ai == ci:
+                continue  # same source, skip
+            a = keys[ai]  # convert index to label
+            c = keys[ci]
+            # choose target uniformly from neighbors
+            b = seed.choice(list(G.neighbors(a)))
+            d = seed.choice(list(G.neighbors(c)))
+            bi = keys.index(b)
+            di = keys.index(d)
+
+            if b in [a, c, d] or d in [a, b, c]:
+                continue  # all vertices should be different
+
+            # don't create parallel edges
+            if (d not in G[a]) and (b not in G[c]):
+                if D[ai, bi] + D[ci, di] >= D[ai, ci] + D[bi, di]:
+                    # only swap if we get closer to the diagonal
+                    G.add_edge(a, d)
+                    G.add_edge(c, b)
+                    G.remove_edge(a, b)
+                    G.remove_edge(c, d)
+
+                    # Check if the graph is still connected
+                    if connectivity and local_conn(G, a, b) == 0:
+                        # Not connected, revert the swap
+                        G.remove_edge(a, d)
+                        G.remove_edge(c, b)
+                        G.add_edge(a, b)
+                        G.add_edge(c, d)
+                    else:
+                        break
+            n += 1
+
+    return G
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @py_random_state(3)
 @nx._dispatchable
 def sigma(G, niter=100, nrand=10, seed=None):
@@ -166,11 +291,28 @@ def sigma(G, niter=100, nrand=10, seed=None):
            Canonical Network Equivalence".
            PLoS One. 3 (4). PMID 18446219. doi:10.1371/journal.pone.0002051.
     """
-    pass
+    import numpy as np
+
+    # Compute the mean clustering coefficient and average shortest path length
+    # for an equivalent random graph
+    randMetrics = {"C": [], "L": []}
+    for i in range(nrand):
+        Gr = random_reference(G, niter=niter, seed=seed)
+        randMetrics["C"].append(nx.transitivity(Gr))
+        randMetrics["L"].append(nx.average_shortest_path_length(Gr))
+
+    C = nx.transitivity(G)
+    L = nx.average_shortest_path_length(G)
+    Cr = np.mean(randMetrics["C"])
+    Lr = np.mean(randMetrics["L"])
+
+    sigma = (C / Cr) / (L / Lr)
+
+    return float(sigma)


-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @py_random_state(3)
 @nx._dispatchable
 def omega(G, niter=5, nrand=10, seed=None):
@@ -224,4 +366,38 @@ def omega(G, niter=5, nrand=10, seed=None):
            Brain Connectivity. 1 (0038): 367-75.  PMC 3604768. PMID 22432451.
            doi:10.1089/brain.2011.0038.
     """
-    pass
+    import numpy as np
+
+    # Compute the mean clustering coefficient and average shortest path length
+    # for an equivalent random graph
+    randMetrics = {"C": [], "L": []}
+
+    # Calculate initial average clustering coefficient which potentially will
+    # get replaced by higher clustering coefficients from generated lattice
+    # reference graphs
+    Cl = nx.average_clustering(G)
+
+    niter_lattice_reference = niter
+    niter_random_reference = niter * 2
+
+    for _ in range(nrand):
+        # Generate random graph
+        Gr = random_reference(G, niter=niter_random_reference, seed=seed)
+        randMetrics["L"].append(nx.average_shortest_path_length(Gr))
+
+        # Generate lattice graph
+        Gl = lattice_reference(G, niter=niter_lattice_reference, seed=seed)
+
+        # Replace old clustering coefficient, if clustering is higher in
+        # generated lattice reference
+        Cl_temp = nx.average_clustering(Gl)
+        if Cl_temp > Cl:
+            Cl = Cl_temp
+
+    C = nx.average_clustering(G)
+    L = nx.average_shortest_path_length(G)
+    Lr = np.mean(randMetrics["L"])
+
+    omega = (Lr / L) - (C / Cl)
+
+    return float(omega)
diff --git a/networkx/algorithms/smetric.py b/networkx/algorithms/smetric.py
index a94fa7d50..5a27014ee 100644
--- a/networkx/algorithms/smetric.py
+++ b/networkx/algorithms/smetric.py
@@ -1,5 +1,6 @@
 import networkx as nx
-__all__ = ['s_metric']
+
+__all__ = ["s_metric"]


 @nx._dispatchable
@@ -33,4 +34,27 @@ def s_metric(G, **kwargs):
            Definition, Properties, and  Implications (Extended Version), 2005.
            https://arxiv.org/abs/cond-mat/0501169
     """
-    pass
+    # NOTE: This entire code block + the **kwargs in the signature can all be
+    # removed when the deprecation expires.
+    # Normalized is always False, since all `normalized=True` did was raise
+    # a NotImplementedError
+    if kwargs:
+        # Warn for `normalize`, raise for any other kwarg
+        if "normalized" in kwargs:
+            import warnings
+
+            warnings.warn(
+                "\n\nThe `normalized` keyword is deprecated and will be removed\n"
+                "in the future. To silence this warning, remove `normalized`\n"
+                "when calling `s_metric`.\n\n"
+                "The value of `normalized` is ignored.",
+                DeprecationWarning,
+                stacklevel=3,
+            )
+        else:
+            # Typical raising behavior for Python when kwarg not recognized
+            raise TypeError(
+                f"s_metric got an unexpected keyword argument '{list(kwargs.keys())[0]}'"
+            )
+
+    return float(sum(G.degree(u) * G.degree(v) for (u, v) in G.edges()))
diff --git a/networkx/algorithms/sparsifiers.py b/networkx/algorithms/sparsifiers.py
index 49426f9d8..870b7ba6f 100644
--- a/networkx/algorithms/sparsifiers.py
+++ b/networkx/algorithms/sparsifiers.py
@@ -1,14 +1,16 @@
 """Functions for computing sparsifiers of graphs."""
 import math
+
 import networkx as nx
 from networkx.utils import not_implemented_for, py_random_state
-__all__ = ['spanner']
+
+__all__ = ["spanner"]


-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @py_random_state(3)
-@nx._dispatchable(edge_attrs='weight', returns_graph=True)
+@nx._dispatchable(edge_attrs="weight", returns_graph=True)
 def spanner(G, stretch, weight=None, seed=None):
     """Returns a spanner of the given graph with the given stretch.

@@ -61,7 +63,127 @@ def spanner(G, stretch, weight=None, seed=None):
     Algorithm for Computing Sparse Spanners in Weighted Graphs.
     Random Struct. Algorithms 30(4): 532-563 (2007).
     """
-    pass
+    if stretch < 1:
+        raise ValueError("stretch must be at least 1")
+
+    k = (stretch + 1) // 2
+
+    # initialize spanner H with empty edge set
+    H = nx.empty_graph()
+    H.add_nodes_from(G.nodes)
+
+    # phase 1: forming the clusters
+    # the residual graph has V' from the paper as its node set
+    # and E' from the paper as its edge set
+    residual_graph = _setup_residual_graph(G, weight)
+    # clustering is a dictionary that maps nodes in a cluster to the
+    # cluster center
+    clustering = {v: v for v in G.nodes}
+    sample_prob = math.pow(G.number_of_nodes(), -1 / k)
+    size_limit = 2 * math.pow(G.number_of_nodes(), 1 + 1 / k)
+
+    i = 0
+    while i < k - 1:
+        # step 1: sample centers
+        sampled_centers = set()
+        for center in set(clustering.values()):
+            if seed.random() < sample_prob:
+                sampled_centers.add(center)
+
+        # combined loop for steps 2 and 3
+        edges_to_add = set()
+        edges_to_remove = set()
+        new_clustering = {}
+        for v in residual_graph.nodes:
+            if clustering[v] in sampled_centers:
+                continue
+
+            # step 2: find neighboring (sampled) clusters and
+            # lightest edges to them
+            lightest_edge_neighbor, lightest_edge_weight = _lightest_edge_dicts(
+                residual_graph, clustering, v
+            )
+            neighboring_sampled_centers = (
+                set(lightest_edge_weight.keys()) & sampled_centers
+            )
+
+            # step 3: add edges to spanner
+            if not neighboring_sampled_centers:
+                # connect to each neighboring center via lightest edge
+                for neighbor in lightest_edge_neighbor.values():
+                    edges_to_add.add((v, neighbor))
+                # remove all incident edges
+                for neighbor in residual_graph.adj[v]:
+                    edges_to_remove.add((v, neighbor))
+
+            else:  # there is a neighboring sampled center
+                closest_center = min(
+                    neighboring_sampled_centers, key=lightest_edge_weight.get
+                )
+                closest_center_weight = lightest_edge_weight[closest_center]
+                closest_center_neighbor = lightest_edge_neighbor[closest_center]
+
+                edges_to_add.add((v, closest_center_neighbor))
+                new_clustering[v] = closest_center
+
+                # connect to centers with edge weight less than
+                # closest_center_weight
+                for center, edge_weight in lightest_edge_weight.items():
+                    if edge_weight < closest_center_weight:
+                        neighbor = lightest_edge_neighbor[center]
+                        edges_to_add.add((v, neighbor))
+
+                # remove edges to centers with edge weight less than
+                # closest_center_weight
+                for neighbor in residual_graph.adj[v]:
+                    nbr_cluster = clustering[neighbor]
+                    nbr_weight = lightest_edge_weight[nbr_cluster]
+                    if (
+                        nbr_cluster == closest_center
+                        or nbr_weight < closest_center_weight
+                    ):
+                        edges_to_remove.add((v, neighbor))
+
+        # check whether iteration added too many edges to spanner,
+        # if so repeat
+        if len(edges_to_add) > size_limit:
+            # an iteration is repeated O(1) times on expectation
+            continue
+
+        # iteration succeeded
+        i = i + 1
+
+        # actually add edges to spanner
+        for u, v in edges_to_add:
+            _add_edge_to_spanner(H, residual_graph, u, v, weight)
+
+        # actually delete edges from residual graph
+        residual_graph.remove_edges_from(edges_to_remove)
+
+        # copy old clustering data to new_clustering
+        for node, center in clustering.items():
+            if center in sampled_centers:
+                new_clustering[node] = center
+        clustering = new_clustering
+
+        # step 4: remove intra-cluster edges
+        for u in residual_graph.nodes:
+            for v in list(residual_graph.adj[u]):
+                if clustering[u] == clustering[v]:
+                    residual_graph.remove_edge(u, v)
+
+        # update residual graph node set
+        for v in list(residual_graph.nodes):
+            if v not in clustering:
+                residual_graph.remove_node(v)
+
+    # phase 2: vertex-cluster joining
+    for v in residual_graph.nodes:
+        lightest_edge_neighbor, _ = _lightest_edge_dicts(residual_graph, clustering, v)
+        for neighbor in lightest_edge_neighbor.values():
+            _add_edge_to_spanner(H, residual_graph, v, neighbor, weight)
+
+    return H


 def _setup_residual_graph(G, weight):
@@ -88,7 +210,16 @@ def _setup_residual_graph(G, weight):
     NetworkX graph
         The residual graph used for the Baswana-Sen algorithm.
     """
-    pass
+    residual_graph = G.copy()
+
+    # establish unique edge weights, even for unweighted graphs
+    for u, v in G.edges():
+        if not weight:
+            residual_graph[u][v]["weight"] = (id(u), id(v))
+        else:
+            residual_graph[u][v]["weight"] = (G[u][v][weight], id(u), id(v))
+
+    return residual_graph


 def _lightest_edge_dicts(residual_graph, clustering, node):
@@ -123,7 +254,18 @@ def _lightest_edge_dicts(residual_graph, clustering, node):
     residual graph then the center of the cluster is not a key in the
     returned dictionaries.
     """
-    pass
+    lightest_edge_neighbor = {}
+    lightest_edge_weight = {}
+    for neighbor in residual_graph.adj[node]:
+        nbr_center = clustering[neighbor]
+        weight = residual_graph[node][neighbor]["weight"]
+        if (
+            nbr_center not in lightest_edge_weight
+            or weight < lightest_edge_weight[nbr_center]
+        ):
+            lightest_edge_neighbor[nbr_center] = neighbor
+            lightest_edge_weight[nbr_center] = weight
+    return lightest_edge_neighbor, lightest_edge_weight


 def _add_edge_to_spanner(H, residual_graph, u, v, weight):
@@ -148,4 +290,6 @@ def _add_edge_to_spanner(H, residual_graph, u, v, weight):
     weight : object
         The edge attribute to use as distance.
     """
-    pass
+    H.add_edge(u, v)
+    if weight:
+        H[u][v][weight] = residual_graph[u][v]["weight"][0]
diff --git a/networkx/algorithms/structuralholes.py b/networkx/algorithms/structuralholes.py
index b7e22f07f..bae42d060 100644
--- a/networkx/algorithms/structuralholes.py
+++ b/networkx/algorithms/structuralholes.py
@@ -1,9 +1,11 @@
 """Functions for computing measures of structural holes."""
+
 import networkx as nx
-__all__ = ['constraint', 'local_constraint', 'effective_size']
+
+__all__ = ["constraint", "local_constraint", "effective_size"]


-@nx._dispatchable(edge_attrs='weight')
+@nx._dispatchable(edge_attrs="weight")
 def mutual_weight(G, u, v, weight=None):
     """Returns the sum of the weights of the edge from `u` to `v` and
     the edge from `v` to `u` in `G`.
@@ -15,10 +17,18 @@ def mutual_weight(G, u, v, weight=None):
     Pre-conditions: `u` and `v` must both be in `G`.

     """
-    pass
-
-
-@nx._dispatchable(edge_attrs='weight')
+    try:
+        a_uv = G[u][v].get(weight, 1)
+    except KeyError:
+        a_uv = 0
+    try:
+        a_vu = G[v][u].get(weight, 1)
+    except KeyError:
+        a_vu = 0
+    return a_uv + a_vu
+
+
+@nx._dispatchable(edge_attrs="weight")
 def normalized_mutual_weight(G, u, v, norm=sum, weight=None):
     """Returns normalized mutual weight of the edges from `u` to `v`
     with respect to the mutual weights of the neighbors of `u` in `G`.
@@ -35,12 +45,13 @@ def normalized_mutual_weight(G, u, v, norm=sum, weight=None):
     attribute used as weight.

     """
-    pass
+    scale = norm(mutual_weight(G, u, w, weight) for w in set(nx.all_neighbors(G, u)))
+    return 0 if scale == 0 else mutual_weight(G, u, v, weight) / scale


-@nx._dispatchable(edge_attrs='weight')
+@nx._dispatchable(edge_attrs="weight")
 def effective_size(G, nodes=None, weight=None):
-    """Returns the effective size of all nodes in the graph ``G``.
+    r"""Returns the effective size of all nodes in the graph ``G``.

     The *effective size* of a node's ego network is based on the concept
     of redundancy. A person's ego network has redundancy to the extent
@@ -51,8 +62,8 @@ def effective_size(G, nodes=None, weight=None):

     .. math::

-       e(u) = \\sum_{v \\in N(u) \\setminus \\{u\\}}
-       \\left(1 - \\sum_{w \\in N(v)} p_{uw} m_{vw}\\right)
+       e(u) = \sum_{v \in N(u) \setminus \{u\}}
+       \left(1 - \sum_{w \in N(v)} p_{uw} m_{vw}\right)

     where $N(u)$ is the set of neighbors of $u$ and $p_{uw}$ is the
     normalized mutual weight of the (directed or undirected) edges
@@ -67,7 +78,7 @@ def effective_size(G, nodes=None, weight=None):

     .. math::

-       e(u) = n - \\frac{2t}{n}
+       e(u) = n - \frac{2t}{n}

     where `t` is the number of ties in the ego network (not including
     ties to ego) and `n` is the number of nodes (excluding ego).
@@ -118,12 +129,42 @@ def effective_size(G, nodes=None, weight=None):
            http://www.analytictech.com/connections/v20(1)/holes.htm

     """
-    pass

-
-@nx._dispatchable(edge_attrs='weight')
+    def redundancy(G, u, v, weight=None):
+        nmw = normalized_mutual_weight
+        r = sum(
+            nmw(G, u, w, weight=weight) * nmw(G, v, w, norm=max, weight=weight)
+            for w in set(nx.all_neighbors(G, u))
+        )
+        return 1 - r
+
+    effective_size = {}
+    if nodes is None:
+        nodes = G
+    # Use Borgatti's simplified formula for unweighted and undirected graphs
+    if not G.is_directed() and weight is None:
+        for v in nodes:
+            # Effective size is not defined for isolated nodes
+            if len(G[v]) == 0:
+                effective_size[v] = float("nan")
+                continue
+            E = nx.ego_graph(G, v, center=False, undirected=True)
+            effective_size[v] = len(E) - (2 * E.size()) / len(E)
+    else:
+        for v in nodes:
+            # Effective size is not defined for isolated nodes
+            if len(G[v]) == 0:
+                effective_size[v] = float("nan")
+                continue
+            effective_size[v] = sum(
+                redundancy(G, v, u, weight) for u in set(nx.all_neighbors(G, v))
+            )
+    return effective_size
+
+
+@nx._dispatchable(edge_attrs="weight")
 def constraint(G, nodes=None, weight=None):
-    """Returns the constraint on all nodes in the graph ``G``.
+    r"""Returns the constraint on all nodes in the graph ``G``.

     The *constraint* is a measure of the extent to which a node *v* is
     invested in those nodes that are themselves invested in the
@@ -132,10 +173,10 @@ def constraint(G, nodes=None, weight=None):

     .. math::

-       c(v) = \\sum_{w \\in N(v) \\setminus \\{v\\}} \\ell(v, w)
+       c(v) = \sum_{w \in N(v) \setminus \{v\}} \ell(v, w)

     where $N(v)$ is the subset of the neighbors of `v` that are either
-    predecessors or successors of `v` and $\\ell(v, w)$ is the local
+    predecessors or successors of `v` and $\ell(v, w)$ is the local
     constraint on `v` with respect to `w` [1]_. For the definition of local
     constraint, see :func:`local_constraint`.

@@ -168,20 +209,31 @@ def constraint(G, nodes=None, weight=None):
            American Journal of Sociology (110): 349–399.

     """
-    pass
-
-
-@nx._dispatchable(edge_attrs='weight')
+    if nodes is None:
+        nodes = G
+    constraint = {}
+    for v in nodes:
+        # Constraint is not defined for isolated nodes
+        if len(G[v]) == 0:
+            constraint[v] = float("nan")
+            continue
+        constraint[v] = sum(
+            local_constraint(G, v, n, weight) for n in set(nx.all_neighbors(G, v))
+        )
+    return constraint
+
+
+@nx._dispatchable(edge_attrs="weight")
 def local_constraint(G, u, v, weight=None):
-    """Returns the local constraint on the node ``u`` with respect to
+    r"""Returns the local constraint on the node ``u`` with respect to
     the node ``v`` in the graph ``G``.

     Formally, the *local constraint on u with respect to v*, denoted
-    $\\ell(u, v)$, is defined by
+    $\ell(u, v)$, is defined by

     .. math::

-       \\ell(u, v) = \\left(p_{uv} + \\sum_{w \\in N(v)} p_{uw} p_{wv}\\right)^2,
+       \ell(u, v) = \left(p_{uv} + \sum_{w \in N(v)} p_{uw} p_{wv}\right)^2,

     where $N(v)$ is the set of neighbors of $v$ and $p_{uv}$ is the
     normalized mutual weight of the (directed or undirected) edges
@@ -222,4 +274,10 @@ def local_constraint(G, u, v, weight=None):
            American Journal of Sociology (110): 349–399.

     """
-    pass
+    nmw = normalized_mutual_weight
+    direct = nmw(G, u, v, weight=weight)
+    indirect = sum(
+        nmw(G, u, w, weight=weight) * nmw(G, w, v, weight=weight)
+        for w in set(nx.all_neighbors(G, u))
+    )
+    return (direct + indirect) ** 2
diff --git a/networkx/algorithms/summarization.py b/networkx/algorithms/summarization.py
index c37617f80..794a77ab3 100644
--- a/networkx/algorithms/summarization.py
+++ b/networkx/algorithms/summarization.py
@@ -59,11 +59,13 @@ For more information on graph summarization, see `Graph Summarization Methods
 and Applications: A Survey <https://dl.acm.org/doi/abs/10.1145/3186727>`_
 """
 from collections import Counter, defaultdict
+
 import networkx as nx
-__all__ = ['dedensify', 'snap_aggregation']
+
+__all__ = ["dedensify", "snap_aggregation"]


-@nx._dispatchable(mutates_input={'not copy': 3}, returns_graph=True)
+@nx._dispatchable(mutates_input={"not copy": 3}, returns_graph=True)
 def dedensify(G, threshold, prefix=None, copy=True):
     """Compresses neighborhoods around high-degree nodes

@@ -168,12 +170,60 @@ def dedensify(G, threshold, prefix=None, copy=True):
        Knowledge Discovery and Data Mining (pp. 1755-1764).
        http://www.cs.umd.edu/~abadi/papers/graph-dedense.pdf
     """
-    pass
-
-
-def _snap_build_graph(G, groups, node_attributes, edge_attributes,
-    neighbor_info, edge_types, prefix, supernode_attribute, superedge_attribute
-    ):
+    if threshold < 2:
+        raise nx.NetworkXError("The degree threshold must be >= 2")
+
+    degrees = G.in_degree if G.is_directed() else G.degree
+    # Group nodes based on degree threshold
+    high_degree_nodes = {n for n, d in degrees if d > threshold}
+    low_degree_nodes = G.nodes() - high_degree_nodes
+
+    auxiliary = {}
+    for node in G:
+        high_degree_nbrs = frozenset(high_degree_nodes & set(G[node]))
+        if high_degree_nbrs:
+            if high_degree_nbrs in auxiliary:
+                auxiliary[high_degree_nbrs].add(node)
+            else:
+                auxiliary[high_degree_nbrs] = {node}
+
+    if copy:
+        G = G.copy()
+
+    compressor_nodes = set()
+    for index, (high_degree_nodes, low_degree_nodes) in enumerate(auxiliary.items()):
+        low_degree_node_count = len(low_degree_nodes)
+        high_degree_node_count = len(high_degree_nodes)
+        old_edges = high_degree_node_count * low_degree_node_count
+        new_edges = high_degree_node_count + low_degree_node_count
+        if old_edges <= new_edges:
+            continue
+        compression_node = "".join(str(node) for node in high_degree_nodes)
+        if prefix:
+            compression_node = str(prefix) + compression_node
+        for node in low_degree_nodes:
+            for high_node in high_degree_nodes:
+                if G.has_edge(node, high_node):
+                    G.remove_edge(node, high_node)
+
+            G.add_edge(node, compression_node)
+        for node in high_degree_nodes:
+            G.add_edge(compression_node, node)
+        compressor_nodes.add(compression_node)
+    return G, compressor_nodes
+
+
+def _snap_build_graph(
+    G,
+    groups,
+    node_attributes,
+    edge_attributes,
+    neighbor_info,
+    edge_types,
+    prefix,
+    supernode_attribute,
+    superedge_attribute,
+):
     """
     Build the summary graph from the data structures produced in the SNAP aggregation algorithm

@@ -209,7 +259,48 @@ def _snap_build_graph(G, groups, node_attributes, edge_attributes,
     -------
     summary graph: Networkx graph
     """
-    pass
+    output = G.__class__()
+    node_label_lookup = {}
+    for index, group_id in enumerate(groups):
+        group_set = groups[group_id]
+        supernode = f"{prefix}{index}"
+        node_label_lookup[group_id] = supernode
+        supernode_attributes = {
+            attr: G.nodes[next(iter(group_set))][attr] for attr in node_attributes
+        }
+        supernode_attributes[supernode_attribute] = group_set
+        output.add_node(supernode, **supernode_attributes)
+
+    for group_id in groups:
+        group_set = groups[group_id]
+        source_supernode = node_label_lookup[group_id]
+        for other_group, group_edge_types in neighbor_info[
+            next(iter(group_set))
+        ].items():
+            if group_edge_types:
+                target_supernode = node_label_lookup[other_group]
+                summary_graph_edge = (source_supernode, target_supernode)
+
+                edge_types = [
+                    dict(zip(edge_attributes, edge_type))
+                    for edge_type in group_edge_types
+                ]
+
+                has_edge = output.has_edge(*summary_graph_edge)
+                if output.is_multigraph():
+                    if not has_edge:
+                        for edge_type in edge_types:
+                            output.add_edge(*summary_graph_edge, **edge_type)
+                    elif not output.is_directed():
+                        existing_edge_data = output.get_edge_data(*summary_graph_edge)
+                        for edge_type in edge_types:
+                            if edge_type not in existing_edge_data.values():
+                                output.add_edge(*summary_graph_edge, **edge_type)
+                else:
+                    superedge_attributes = {superedge_attribute: edge_types}
+                    output.add_edge(*summary_graph_edge, **superedge_attributes)
+
+    return output


 def _snap_eligible_group(G, groups, group_lookup, edge_types):
@@ -235,7 +326,33 @@ def _snap_eligible_group(G, groups, group_lookup, edge_types):
     -------
     tuple: group ID to split, and neighbor-groups participation_counts data structure
     """
-    pass
+    nbr_info = {node: {gid: Counter() for gid in groups} for node in group_lookup}
+    for group_id in groups:
+        current_group = groups[group_id]
+
+        # build nbr_info for nodes in group
+        for node in current_group:
+            nbr_info[node] = {group_id: Counter() for group_id in groups}
+            edges = G.edges(node, keys=True) if G.is_multigraph() else G.edges(node)
+            for edge in edges:
+                neighbor = edge[1]
+                edge_type = edge_types[edge]
+                neighbor_group_id = group_lookup[neighbor]
+                nbr_info[node][neighbor_group_id][edge_type] += 1
+
+        # check if group_id is eligible to be split
+        group_size = len(current_group)
+        for other_group_id in groups:
+            edge_counts = Counter()
+            for node in current_group:
+                edge_counts.update(nbr_info[node][other_group_id].keys())
+
+            if not all(count == group_size for count in edge_counts.values()):
+                # only the nbr_info of the returned group_id is required for handling group splits
+                return group_id, nbr_info
+
+    # if no eligible groups, complete nbr_info is calculated
+    return None, nbr_info


 def _snap_split(groups, neighbor_info, group_lookup, group_id):
@@ -266,13 +383,38 @@ def _snap_split(groups, neighbor_info, group_lookup, group_id):
     dict
         The updated groups based on the split
     """
-    pass
-
-
-@nx._dispatchable(node_attrs='[node_attributes]', edge_attrs=
-    '[edge_attributes]', returns_graph=True)
-def snap_aggregation(G, node_attributes, edge_attributes=(), prefix=
-    'Supernode-', supernode_attribute='group', superedge_attribute='types'):
+    new_group_mappings = defaultdict(set)
+    for node in groups[group_id]:
+        signature = tuple(
+            frozenset(edge_types) for edge_types in neighbor_info[node].values()
+        )
+        new_group_mappings[signature].add(node)
+
+    # leave the biggest new_group as the original group
+    new_groups = sorted(new_group_mappings.values(), key=len)
+    for new_group in new_groups[:-1]:
+        # Assign unused integer as the new_group_id
+        # ids are tuples, so will not interact with the original group_ids
+        new_group_id = len(groups)
+        groups[new_group_id] = new_group
+        groups[group_id] -= new_group
+        for node in new_group:
+            group_lookup[node] = new_group_id
+
+    return groups
+
+
+@nx._dispatchable(
+    node_attrs="[node_attributes]", edge_attrs="[edge_attributes]", returns_graph=True
+)
+def snap_aggregation(
+    G,
+    node_attributes,
+    edge_attributes=(),
+    prefix="Supernode-",
+    supernode_attribute="group",
+    superedge_attribute="types",
+):
     """Creates a summary graph based on attributes and connectivity.

     This function uses the Summarization by Grouping Nodes on Attributes
@@ -379,4 +521,43 @@ def snap_aggregation(G, node_attributes, edge_attributes=(), prefix=
        Management of Data (SIGMOD’08), pages 567–580, Vancouver, Canada,
        June 2008.
     """
-    pass
+    edge_types = {
+        edge: tuple(attrs.get(attr) for attr in edge_attributes)
+        for edge, attrs in G.edges.items()
+    }
+    if not G.is_directed():
+        if G.is_multigraph():
+            # list is needed to avoid mutating while iterating
+            edges = [((v, u, k), etype) for (u, v, k), etype in edge_types.items()]
+        else:
+            # list is needed to avoid mutating while iterating
+            edges = [((v, u), etype) for (u, v), etype in edge_types.items()]
+        edge_types.update(edges)
+
+    group_lookup = {
+        node: tuple(attrs[attr] for attr in node_attributes)
+        for node, attrs in G.nodes.items()
+    }
+    groups = defaultdict(set)
+    for node, node_type in group_lookup.items():
+        groups[node_type].add(node)
+
+    eligible_group_id, nbr_info = _snap_eligible_group(
+        G, groups, group_lookup, edge_types
+    )
+    while eligible_group_id:
+        groups = _snap_split(groups, nbr_info, group_lookup, eligible_group_id)
+        eligible_group_id, nbr_info = _snap_eligible_group(
+            G, groups, group_lookup, edge_types
+        )
+    return _snap_build_graph(
+        G,
+        groups,
+        node_attributes,
+        edge_attributes,
+        nbr_info,
+        edge_types,
+        prefix,
+        supernode_attribute,
+        superedge_attribute,
+    )
diff --git a/networkx/algorithms/swap.py b/networkx/algorithms/swap.py
index 2df913522..c190f970b 100644
--- a/networkx/algorithms/swap.py
+++ b/networkx/algorithms/swap.py
@@ -1,13 +1,15 @@
 """Swap edges in a graph.
 """
+
 import math
+
 import networkx as nx
 from networkx.utils import py_random_state
-__all__ = ['double_edge_swap', 'connected_double_edge_swap',
-    'directed_edge_swap']
+
+__all__ = ["double_edge_swap", "connected_double_edge_swap", "directed_edge_swap"]


-@nx.utils.not_implemented_for('undirected')
+@nx.utils.not_implemented_for("undirected")
 @py_random_state(3)
 @nx._dispatchable(mutates_input=True, returns_graph=True)
 def directed_edge_swap(G, *, nswap=1, max_tries=100, seed=None):
@@ -68,7 +70,66 @@ def directed_edge_swap(G, *, nswap=1, max_tries=100, seed=None):
            Degree Sequence with 2-Edge Swaps.” Mathematics Stack Exchange,
            https://math.stackexchange.com/questions/22272/. Accessed 30 May 2022.
     """
-    pass
+    if nswap > max_tries:
+        raise nx.NetworkXError("Number of swaps > number of tries allowed.")
+    if len(G) < 4:
+        raise nx.NetworkXError("DiGraph has fewer than four nodes.")
+    if len(G.edges) < 3:
+        raise nx.NetworkXError("DiGraph has fewer than 3 edges")
+
+    # Instead of choosing uniformly at random from a generated edge list,
+    # this algorithm chooses nonuniformly from the set of nodes with
+    # probability weighted by degree.
+    tries = 0
+    swapcount = 0
+    keys, degrees = zip(*G.degree())  # keys, degree
+    cdf = nx.utils.cumulative_distribution(degrees)  # cdf of degree
+    discrete_sequence = nx.utils.discrete_sequence
+
+    while swapcount < nswap:
+        # choose source node index from discrete distribution
+        start_index = discrete_sequence(1, cdistribution=cdf, seed=seed)[0]
+        start = keys[start_index]
+        tries += 1
+
+        if tries > max_tries:
+            msg = f"Maximum number of swap attempts ({tries}) exceeded before desired swaps achieved ({nswap})."
+            raise nx.NetworkXAlgorithmError(msg)
+
+        # If the given node doesn't have any out edges, then there isn't anything to swap
+        if G.out_degree(start) == 0:
+            continue
+        second = seed.choice(list(G.succ[start]))
+        if start == second:
+            continue
+
+        if G.out_degree(second) == 0:
+            continue
+        third = seed.choice(list(G.succ[second]))
+        if second == third:
+            continue
+
+        if G.out_degree(third) == 0:
+            continue
+        fourth = seed.choice(list(G.succ[third]))
+        if third == fourth:
+            continue
+
+        if (
+            third not in G.succ[start]
+            and fourth not in G.succ[second]
+            and second not in G.succ[third]
+        ):
+            # Swap nodes
+            G.add_edge(start, third)
+            G.add_edge(third, second)
+            G.add_edge(second, fourth)
+            G.remove_edge(start, second)
+            G.remove_edge(second, third)
+            G.remove_edge(third, fourth)
+            swapcount += 1
+
+    return G


 @py_random_state(3)
@@ -121,7 +182,52 @@ def double_edge_swap(G, nswap=1, max_tries=100, seed=None):

     The graph G is modified in place.
     """
-    pass
+    if G.is_directed():
+        raise nx.NetworkXError(
+            "double_edge_swap() not defined for directed graphs. Use directed_edge_swap instead."
+        )
+    if nswap > max_tries:
+        raise nx.NetworkXError("Number of swaps > number of tries allowed.")
+    if len(G) < 4:
+        raise nx.NetworkXError("Graph has fewer than four nodes.")
+    if len(G.edges) < 2:
+        raise nx.NetworkXError("Graph has fewer than 2 edges")
+    # Instead of choosing uniformly at random from a generated edge list,
+    # this algorithm chooses nonuniformly from the set of nodes with
+    # probability weighted by degree.
+    n = 0
+    swapcount = 0
+    keys, degrees = zip(*G.degree())  # keys, degree
+    cdf = nx.utils.cumulative_distribution(degrees)  # cdf of degree
+    discrete_sequence = nx.utils.discrete_sequence
+    while swapcount < nswap:
+        #        if random.random() < 0.5: continue # trick to avoid periodicities?
+        # pick two random edges without creating edge list
+        # choose source node indices from discrete distribution
+        (ui, xi) = discrete_sequence(2, cdistribution=cdf, seed=seed)
+        if ui == xi:
+            continue  # same source, skip
+        u = keys[ui]  # convert index to label
+        x = keys[xi]
+        # choose target uniformly from neighbors
+        v = seed.choice(list(G[u]))
+        y = seed.choice(list(G[x]))
+        if v == y:
+            continue  # same target, skip
+        if (x not in G[u]) and (y not in G[v]):  # don't create parallel edges
+            G.add_edge(u, x)
+            G.add_edge(v, y)
+            G.remove_edge(u, v)
+            G.remove_edge(x, y)
+            swapcount += 1
+        if n >= max_tries:
+            e = (
+                f"Maximum number of swap attempts ({n}) exceeded "
+                f"before desired swaps achieved ({nswap})."
+            )
+            raise nx.NetworkXAlgorithmError(e)
+        n += 1
+    return G


 @py_random_state(3)
@@ -194,4 +300,108 @@ def connected_double_edge_swap(G, nswap=1, _window_threshold=3, seed=None):
            power law random graphs, 2003.
            http://citeseer.ist.psu.edu/gkantsidis03markov.html
     """
-    pass
+    if not nx.is_connected(G):
+        raise nx.NetworkXError("Graph not connected")
+    if len(G) < 4:
+        raise nx.NetworkXError("Graph has fewer than four nodes.")
+    n = 0
+    swapcount = 0
+    deg = G.degree()
+    # Label key for nodes
+    dk = [n for n, d in G.degree()]
+    cdf = nx.utils.cumulative_distribution([d for n, d in G.degree()])
+    discrete_sequence = nx.utils.discrete_sequence
+    window = 1
+    while n < nswap:
+        wcount = 0
+        swapped = []
+        # If the window is small, we just check each time whether the graph is
+        # connected by checking if the nodes that were just separated are still
+        # connected.
+        if window < _window_threshold:
+            # This Boolean keeps track of whether there was a failure or not.
+            fail = False
+            while wcount < window and n < nswap:
+                # Pick two random edges without creating the edge list. Choose
+                # source nodes from the discrete degree distribution.
+                (ui, xi) = discrete_sequence(2, cdistribution=cdf, seed=seed)
+                # If the source nodes are the same, skip this pair.
+                if ui == xi:
+                    continue
+                # Convert an index to a node label.
+                u = dk[ui]
+                x = dk[xi]
+                # Choose targets uniformly from neighbors.
+                v = seed.choice(list(G.neighbors(u)))
+                y = seed.choice(list(G.neighbors(x)))
+                # If the target nodes are the same, skip this pair.
+                if v == y:
+                    continue
+                if x not in G[u] and y not in G[v]:
+                    G.remove_edge(u, v)
+                    G.remove_edge(x, y)
+                    G.add_edge(u, x)
+                    G.add_edge(v, y)
+                    swapped.append((u, v, x, y))
+                    swapcount += 1
+                n += 1
+                # If G remains connected...
+                if nx.has_path(G, u, v):
+                    wcount += 1
+                # Otherwise, undo the changes.
+                else:
+                    G.add_edge(u, v)
+                    G.add_edge(x, y)
+                    G.remove_edge(u, x)
+                    G.remove_edge(v, y)
+                    swapcount -= 1
+                    fail = True
+            # If one of the swaps failed, reduce the window size.
+            if fail:
+                window = math.ceil(window / 2)
+            else:
+                window += 1
+        # If the window is large, then there is a good chance that a bunch of
+        # swaps will work. It's quicker to do all those swaps first and then
+        # check if the graph remains connected.
+        else:
+            while wcount < window and n < nswap:
+                # Pick two random edges without creating the edge list. Choose
+                # source nodes from the discrete degree distribution.
+                (ui, xi) = discrete_sequence(2, cdistribution=cdf, seed=seed)
+                # If the source nodes are the same, skip this pair.
+                if ui == xi:
+                    continue
+                # Convert an index to a node label.
+                u = dk[ui]
+                x = dk[xi]
+                # Choose targets uniformly from neighbors.
+                v = seed.choice(list(G.neighbors(u)))
+                y = seed.choice(list(G.neighbors(x)))
+                # If the target nodes are the same, skip this pair.
+                if v == y:
+                    continue
+                if x not in G[u] and y not in G[v]:
+                    G.remove_edge(u, v)
+                    G.remove_edge(x, y)
+                    G.add_edge(u, x)
+                    G.add_edge(v, y)
+                    swapped.append((u, v, x, y))
+                    swapcount += 1
+                n += 1
+                wcount += 1
+            # If the graph remains connected, increase the window size.
+            if nx.is_connected(G):
+                window += 1
+            # Otherwise, undo the changes from the previous window and decrease
+            # the window size.
+            else:
+                while swapped:
+                    (u, v, x, y) = swapped.pop()
+                    G.add_edge(u, v)
+                    G.add_edge(x, y)
+                    G.remove_edge(u, x)
+                    G.remove_edge(v, y)
+                    swapcount -= 1
+                window = math.ceil(window / 2)
+    return swapcount
diff --git a/networkx/algorithms/threshold.py b/networkx/algorithms/threshold.py
index d48f32d59..bcc03d106 100644
--- a/networkx/algorithms/threshold.py
+++ b/networkx/algorithms/threshold.py
@@ -2,9 +2,11 @@
 Threshold Graphs - Creation, manipulation and identification.
 """
 from math import sqrt
+
 import networkx as nx
 from networkx.utils import py_random_state
-__all__ = ['is_threshold_graph', 'find_threshold_graph']
+
+__all__ = ["is_threshold_graph", "find_threshold_graph"]


 @nx._dispatchable
@@ -36,7 +38,7 @@ def is_threshold_graph(G):
     ----------
     .. [1] Threshold graphs: https://en.wikipedia.org/wiki/Threshold_graph
     """
-    pass
+    return is_threshold_sequence([d for n, d in G.degree()])


 def is_threshold_sequence(degree_sequence):
@@ -49,7 +51,17 @@ def is_threshold_sequence(degree_sequence):
     node that connects to the remaining nodes.  If this deconstruction
     fails then the sequence is not a threshold sequence.
     """
-    pass
+    ds = degree_sequence[:]  # get a copy so we don't destroy original
+    ds.sort()
+    while ds:
+        if ds[0] == 0:  # if isolated node
+            ds.pop(0)  # remove it
+            continue
+        if ds[-1] != len(ds) - 1:  # is the largest degree node dominating?
+            return False  # no, not a threshold degree sequence
+        ds.pop()  # yes, largest is the dominating node
+        ds = [d - 1 for d in ds]  # remove it and decrement all degrees
+    return True


 def creation_sequence(degree_sequence, with_labels=False, compact=False):
@@ -80,7 +92,35 @@ def creation_sequence(degree_sequence, with_labels=False, compact=False):

     Returns None if the sequence is not a threshold sequence
     """
-    pass
+    if with_labels and compact:
+        raise ValueError("compact sequences cannot be labeled")
+
+    # make an indexed copy
+    if isinstance(degree_sequence, dict):  # labeled degree sequence
+        ds = [[degree, label] for (label, degree) in degree_sequence.items()]
+    else:
+        ds = [[d, i] for i, d in enumerate(degree_sequence)]
+    ds.sort()
+    cs = []  # creation sequence
+    while ds:
+        if ds[0][0] == 0:  # isolated node
+            (d, v) = ds.pop(0)
+            if len(ds) > 0:  # make sure we start with a d
+                cs.insert(0, (v, "i"))
+            else:
+                cs.insert(0, (v, "d"))
+            continue
+        if ds[-1][0] != len(ds) - 1:  # Not dominating node
+            return None  # not a threshold degree sequence
+        (d, v) = ds.pop()
+        cs.insert(0, (v, "d"))
+        ds = [[d[0] - 1, d[1]] for d in ds]  # decrement due to removing node
+
+    if with_labels:
+        return cs
+    if compact:
+        return make_compact(cs)
+    return [v[1] for v in cs]  # not labeled


 def make_compact(creation_sequence):
@@ -105,7 +145,26 @@ def make_compact(creation_sequence):
     >>> make_compact([3, 1, 2])
     [3, 1, 2]
     """
-    pass
+    first = creation_sequence[0]
+    if isinstance(first, str):  # creation sequence
+        cs = creation_sequence[:]
+    elif isinstance(first, tuple):  # labeled creation sequence
+        cs = [s[1] for s in creation_sequence]
+    elif isinstance(first, int):  # compact creation sequence
+        return creation_sequence
+    else:
+        raise TypeError("Not a valid creation sequence type")
+
+    ccs = []
+    count = 1  # count the run lengths of d's or i's.
+    for i in range(1, len(cs)):
+        if cs[i] == cs[i - 1]:
+            count += 1
+        else:
+            ccs.append(count)
+            count = 1
+    ccs.append(count)  # don't forget the last one
+    return ccs


 def uncompact(creation_sequence):
@@ -115,7 +174,21 @@ def uncompact(creation_sequence):
     If the creation_sequence is already standard, return it.
     See creation_sequence.
     """
-    pass
+    first = creation_sequence[0]
+    if isinstance(first, str):  # creation sequence
+        return creation_sequence
+    elif isinstance(first, tuple):  # labeled creation sequence
+        return creation_sequence
+    elif isinstance(first, int):  # compact creation sequence
+        ccscopy = creation_sequence[:]
+    else:
+        raise TypeError("Not a valid creation sequence type")
+    cs = []
+    while ccscopy:
+        cs.extend(ccscopy.pop(0) * ["d"])
+        if ccscopy:
+            cs.extend(ccscopy.pop(0) * ["i"])
+    return cs


 def creation_sequence_to_weights(creation_sequence):
@@ -125,11 +198,49 @@ def creation_sequence_to_weights(creation_sequence):
     are scaled so that the threshold is 1.0.  The order of the
     nodes is the same as that in the creation sequence.
     """
-    pass
-
-
-def weights_to_creation_sequence(weights, threshold=1, with_labels=False,
-    compact=False):
+    # Turn input sequence into a labeled creation sequence
+    first = creation_sequence[0]
+    if isinstance(first, str):  # creation sequence
+        if isinstance(creation_sequence, list):
+            wseq = creation_sequence[:]
+        else:
+            wseq = list(creation_sequence)  # string like 'ddidid'
+    elif isinstance(first, tuple):  # labeled creation sequence
+        wseq = [v[1] for v in creation_sequence]
+    elif isinstance(first, int):  # compact creation sequence
+        wseq = uncompact(creation_sequence)
+    else:
+        raise TypeError("Not a valid creation sequence type")
+    # pass through twice--first backwards
+    wseq.reverse()
+    w = 0
+    prev = "i"
+    for j, s in enumerate(wseq):
+        if s == "i":
+            wseq[j] = w
+            prev = s
+        elif prev == "i":
+            prev = s
+            w += 1
+    wseq.reverse()  # now pass through forwards
+    for j, s in enumerate(wseq):
+        if s == "d":
+            wseq[j] = w
+            prev = s
+        elif prev == "d":
+            prev = s
+            w += 1
+    # Now scale weights
+    if prev == "d":
+        w += 1
+    wscale = 1 / w
+    return [ww * wscale for ww in wseq]
+    # return wseq
+
+
+def weights_to_creation_sequence(
+    weights, threshold=1, with_labels=False, compact=False
+):
     """
     Returns a creation sequence for a threshold graph
     determined by the weights and threshold given as input.
@@ -157,9 +268,39 @@ def weights_to_creation_sequence(weights, threshold=1, with_labels=False,

     with_labels and compact cannot both be True.
     """
-    pass
-
-
+    if with_labels and compact:
+        raise ValueError("compact sequences cannot be labeled")
+
+    # make an indexed copy
+    if isinstance(weights, dict):  # labeled weights
+        wseq = [[w, label] for (label, w) in weights.items()]
+    else:
+        wseq = [[w, i] for i, w in enumerate(weights)]
+    wseq.sort()
+    cs = []  # creation sequence
+    cutoff = threshold - wseq[-1][0]
+    while wseq:
+        if wseq[0][0] < cutoff:  # isolated node
+            (w, label) = wseq.pop(0)
+            cs.append((label, "i"))
+        else:
+            (w, label) = wseq.pop()
+            cs.append((label, "d"))
+            cutoff = threshold - wseq[-1][0]
+        if len(wseq) == 1:  # make sure we start with a d
+            (w, label) = wseq.pop()
+            cs.append((label, "d"))
+    # put in correct order
+    cs.reverse()
+
+    if with_labels:
+        return cs
+    if compact:
+        return make_compact(cs)
+    return [v[1] for v in cs]  # not labeled
+
+
+# Manipulating NetworkX.Graphs in context of threshold graphs
 @nx._dispatchable(graphs=None, returns_graph=True)
 def threshold_graph(creation_sequence, create_using=None):
     """
@@ -177,7 +318,39 @@ def threshold_graph(creation_sequence, create_using=None):

     Returns None if the sequence is not valid
     """
-    pass
+    # Turn input sequence into a labeled creation sequence
+    first = creation_sequence[0]
+    if isinstance(first, str):  # creation sequence
+        ci = list(enumerate(creation_sequence))
+    elif isinstance(first, tuple):  # labeled creation sequence
+        ci = creation_sequence[:]
+    elif isinstance(first, int):  # compact creation sequence
+        cs = uncompact(creation_sequence)
+        ci = list(enumerate(cs))
+    else:
+        print("not a valid creation sequence type")
+        return None
+
+    G = nx.empty_graph(0, create_using)
+    if G.is_directed():
+        raise nx.NetworkXError("Directed Graph not supported")
+
+    G.name = "Threshold Graph"
+
+    # add nodes and edges
+    # if type is 'i' just add nodea
+    # if type is a d connect to everything previous
+    while ci:
+        (v, node_type) = ci.pop(0)
+        if node_type == "d":  # dominating type, connect to all existing nodes
+            # We use `for u in list(G):` instead of
+            # `for u in G:` because we edit the graph `G` in
+            # the loop. Hence using an iterator will result in
+            # `RuntimeError: dictionary changed size during iteration`
+            for u in list(G):
+                G.add_edge(v, u)
+        G.add_node(v)
+    return G


 @nx._dispatchable
@@ -187,7 +360,13 @@ def find_alternating_4_cycle(G):
     Otherwise returns the cycle as [a,b,c,d] where (a,b)
     and (c,d) are edges and (a,c) and (b,d) are not.
     """
-    pass
+    for u, v in G.edges():
+        for w in G.nodes():
+            if not G.has_edge(u, w) and u != w:
+                for x in G.neighbors(w):
+                    if not G.has_edge(v, x) and v != x:
+                        return [u, v, w, x]
+    return False


 @nx._dispatchable(returns_graph=True)
@@ -222,7 +401,7 @@ def find_threshold_graph(G, create_using=None):
     ----------
     .. [1] Threshold graphs: https://en.wikipedia.org/wiki/Threshold_graph
     """
-    pass
+    return threshold_graph(find_creation_sequence(G), create_using)


 @nx._dispatchable
@@ -231,15 +410,51 @@ def find_creation_sequence(G):
     Find a threshold subgraph that is close to largest in G.
     Returns the labeled creation sequence of that threshold graph.
     """
-    pass
-
-
+    cs = []
+    # get a local pointer to the working part of the graph
+    H = G
+    while H.order() > 0:
+        # get new degree sequence on subgraph
+        dsdict = dict(H.degree())
+        ds = [(d, v) for v, d in dsdict.items()]
+        ds.sort()
+        # Update threshold graph nodes
+        if ds[-1][0] == 0:  # all are isolated
+            cs.extend(zip(dsdict, ["i"] * (len(ds) - 1) + ["d"]))
+            break  # Done!
+        # pull off isolated nodes
+        while ds[0][0] == 0:
+            (d, iso) = ds.pop(0)
+            cs.append((iso, "i"))
+        # find new biggest node
+        (d, bigv) = ds.pop()
+        # add edges of star to t_g
+        cs.append((bigv, "d"))
+        # form subgraph of neighbors of big node
+        H = H.subgraph(H.neighbors(bigv))
+    cs.reverse()
+    return cs
+
+
+# Properties of Threshold Graphs
 def triangles(creation_sequence):
     """
     Compute number of triangles in the threshold graph with the
     given creation sequence.
     """
-    pass
+    # shortcut algorithm that doesn't require computing number
+    # of triangles at each node.
+    cs = creation_sequence  # alias
+    dr = cs.count("d")  # number of d's in sequence
+    ntri = dr * (dr - 1) * (dr - 2) / 6  # number of triangles in clique of nd d's
+    # now add dr choose 2 triangles for every 'i' in sequence where
+    # dr is the number of d's to the right of the current i
+    for i, typ in enumerate(cs):
+        if typ == "i":
+            ntri += dr * (dr - 1) / 2
+        else:
+            dr -= 1
+    return ntri


 def triangle_sequence(creation_sequence):
@@ -247,14 +462,44 @@ def triangle_sequence(creation_sequence):
     Return triangle sequence for the given threshold graph creation sequence.

     """
-    pass
+    cs = creation_sequence
+    seq = []
+    dr = cs.count("d")  # number of d's to the right of the current pos
+    dcur = (dr - 1) * (dr - 2) // 2  # number of triangles through a node of clique dr
+    irun = 0  # number of i's in the last run
+    drun = 0  # number of d's in the last run
+    for i, sym in enumerate(cs):
+        if sym == "d":
+            drun += 1
+            tri = dcur + (dr - 1) * irun  # new triangles at this d
+        else:  # cs[i]="i":
+            if prevsym == "d":  # new string of i's
+                dcur += (dr - 1) * irun  # accumulate shared shortest paths
+                irun = 0  # reset i run counter
+                dr -= drun  # reduce number of d's to right
+                drun = 0  # reset d run counter
+            irun += 1
+            tri = dr * (dr - 1) // 2  # new triangles at this i
+        seq.append(tri)
+        prevsym = sym
+    return seq


 def cluster_sequence(creation_sequence):
     """
     Return cluster sequence for the given threshold graph creation sequence.
     """
-    pass
+    triseq = triangle_sequence(creation_sequence)
+    degseq = degree_sequence(creation_sequence)
+    cseq = []
+    for i, deg in enumerate(degseq):
+        tri = triseq[i]
+        if deg <= 1:  # isolated vertex or single pair gets cc 0
+            cseq.append(0)
+            continue
+        max_size = (deg * (deg - 1)) // 2
+        cseq.append(tri / max_size)
+    return cseq


 def degree_sequence(creation_sequence):
@@ -262,7 +507,16 @@ def degree_sequence(creation_sequence):
     Return degree sequence for the threshold graph with the given
     creation sequence
     """
-    pass
+    cs = creation_sequence  # alias
+    seq = []
+    rd = cs.count("d")  # number of d to the right
+    for i, sym in enumerate(cs):
+        if sym == "d":
+            rd -= 1
+            seq.append(rd + i)
+        else:
+            seq.append(rd)
+    return seq


 def density(creation_sequence):
@@ -270,14 +524,45 @@ def density(creation_sequence):
     Return the density of the graph with this creation_sequence.
     The density is the fraction of possible edges present.
     """
-    pass
+    N = len(creation_sequence)
+    two_size = sum(degree_sequence(creation_sequence))
+    two_possible = N * (N - 1)
+    den = two_size / two_possible
+    return den


 def degree_correlation(creation_sequence):
     """
     Return the degree-degree correlation over all edges.
     """
-    pass
+    cs = creation_sequence
+    s1 = 0  # deg_i*deg_j
+    s2 = 0  # deg_i^2+deg_j^2
+    s3 = 0  # deg_i+deg_j
+    m = 0  # number of edges
+    rd = cs.count("d")  # number of d nodes to the right
+    rdi = [i for i, sym in enumerate(cs) if sym == "d"]  # index of "d"s
+    ds = degree_sequence(cs)
+    for i, sym in enumerate(cs):
+        if sym == "d":
+            if i != rdi[0]:
+                print("Logic error in degree_correlation", i, rdi)
+                raise ValueError
+            rdi.pop(0)
+        degi = ds[i]
+        for dj in rdi:
+            degj = ds[dj]
+            s1 += degj * degi
+            s2 += degi**2 + degj**2
+            s3 += degi + degj
+            m += 1
+    denom = 2 * m * s2 - s3 * s3
+    numer = 4 * m * s1 - s3 * s3
+    if denom == 0:
+        if numer == 0:
+            return 1
+        raise ValueError(f"Zero Denominator but Numerator is {numer}")
+    return numer / denom


 def shortest_path(creation_sequence, u, v):
@@ -297,7 +582,40 @@ def shortest_path(creation_sequence, u, v):
     Returns a list of vertices from u to v.
     Example: if they are neighbors, it returns [u,v]
     """
-    pass
+    # Turn input sequence into a labeled creation sequence
+    first = creation_sequence[0]
+    if isinstance(first, str):  # creation sequence
+        cs = [(i, creation_sequence[i]) for i in range(len(creation_sequence))]
+    elif isinstance(first, tuple):  # labeled creation sequence
+        cs = creation_sequence[:]
+    elif isinstance(first, int):  # compact creation sequence
+        ci = uncompact(creation_sequence)
+        cs = [(i, ci[i]) for i in range(len(ci))]
+    else:
+        raise TypeError("Not a valid creation sequence type")
+
+    verts = [s[0] for s in cs]
+    if v not in verts:
+        raise ValueError(f"Vertex {v} not in graph from creation_sequence")
+    if u not in verts:
+        raise ValueError(f"Vertex {u} not in graph from creation_sequence")
+    # Done checking
+    if u == v:
+        return [u]
+
+    uindex = verts.index(u)
+    vindex = verts.index(v)
+    bigind = max(uindex, vindex)
+    if cs[bigind][1] == "d":
+        return [u, v]
+    # must be that cs[bigind][1]=='i'
+    cs = cs[bigind:]
+    while cs:
+        vert = cs.pop()
+        if vert[1] == "d":
+            return [u, vert[0], v]
+    # All after u are type 'i' so no connection
+    return -1


 def shortest_path_length(creation_sequence, i):
@@ -312,7 +630,38 @@ def shortest_path_length(creation_sequence, i):
     Paths lengths in threshold graphs are at most 2.
     Length to unreachable nodes is set to -1.
     """
-    pass
+    # Turn input sequence into a labeled creation sequence
+    first = creation_sequence[0]
+    if isinstance(first, str):  # creation sequence
+        if isinstance(creation_sequence, list):
+            cs = creation_sequence[:]
+        else:
+            cs = list(creation_sequence)
+    elif isinstance(first, tuple):  # labeled creation sequence
+        cs = [v[1] for v in creation_sequence]
+        i = [v[0] for v in creation_sequence].index(i)
+    elif isinstance(first, int):  # compact creation sequence
+        cs = uncompact(creation_sequence)
+    else:
+        raise TypeError("Not a valid creation sequence type")
+
+    # Compute
+    N = len(cs)
+    spl = [2] * N  # length 2 to every node
+    spl[i] = 0  # except self which is 0
+    # 1 for all d's to the right
+    for j in range(i + 1, N):
+        if cs[j] == "d":
+            spl[j] = 1
+    if cs[i] == "d":  # 1 for all nodes to the left
+        for j in range(i):
+            spl[j] = 1
+    # and -1 for any trailing i to indicate unreachable
+    for j in range(N - 1, 0, -1):
+        if cs[j] == "d":
+            break
+        spl[j] = -1
+    return spl


 def betweenness_sequence(creation_sequence, normalized=True):
@@ -321,7 +670,38 @@ def betweenness_sequence(creation_sequence, normalized=True):
     sequence.  The result is unscaled.  To scale the values
     to the interval [0,1] divide by (n-1)*(n-2).
     """
-    pass
+    cs = creation_sequence
+    seq = []  # betweenness
+    lastchar = "d"  # first node is always a 'd'
+    dr = float(cs.count("d"))  # number of d's to the right of current pos
+    irun = 0  # number of i's in the last run
+    drun = 0  # number of d's in the last run
+    dlast = 0.0  # betweenness of last d
+    for i, c in enumerate(cs):
+        if c == "d":  # cs[i]=="d":
+            # betweenness = amt shared with earlier d's and i's
+            #             + new isolated nodes covered
+            #             + new paths to all previous nodes
+            b = dlast + (irun - 1) * irun / dr + 2 * irun * (i - drun - irun) / dr
+            drun += 1  # update counter
+        else:  # cs[i]="i":
+            if lastchar == "d":  # if this is a new run of i's
+                dlast = b  # accumulate betweenness
+                dr -= drun  # update number of d's to the right
+                drun = 0  # reset d counter
+                irun = 0  # reset i counter
+            b = 0  # isolated nodes have zero betweenness
+            irun += 1  # add another i to the run
+        seq.append(float(b))
+        lastchar = c
+
+    # normalize by the number of possible shortest paths
+    if normalized:
+        order = len(cs)
+        scale = 1.0 / ((order - 1) * (order - 2))
+        seq = [s * scale for s in seq]
+
+    return seq


 def eigenvectors(creation_sequence):
@@ -336,7 +716,50 @@ def eigenvectors(creation_sequence):
     Notice that the order of the eigenvalues returned by eigenvalues(cs)
     may not correspond to the order of these eigenvectors.
     """
-    pass
+    ccs = make_compact(creation_sequence)
+    N = sum(ccs)
+    vec = [0] * N
+    val = vec[:]
+    # get number of type d nodes to the right (all for first node)
+    dr = sum(ccs[::2])
+
+    nn = ccs[0]
+    vec[0] = [1.0 / sqrt(N)] * N
+    val[0] = 0
+    e = dr
+    dr -= nn
+    type_d = True
+    i = 1
+    dd = 1
+    while dd < nn:
+        scale = 1.0 / sqrt(dd * dd + i)
+        vec[i] = i * [-scale] + [dd * scale] + [0] * (N - i - 1)
+        val[i] = e
+        i += 1
+        dd += 1
+    if len(ccs) == 1:
+        return (val, vec)
+    for nn in ccs[1:]:
+        scale = 1.0 / sqrt(nn * i * (i + nn))
+        vec[i] = i * [-nn * scale] + nn * [i * scale] + [0] * (N - i - nn)
+        # find eigenvalue
+        type_d = not type_d
+        if type_d:
+            e = i + dr
+            dr -= nn
+        else:
+            e = dr
+        val[i] = e
+        st = i
+        i += 1
+        dd = 1
+        while dd < nn:
+            scale = 1.0 / sqrt(i - st + dd * dd)
+            vec[i] = [0] * st + (i - st) * [-scale] + [dd * scale] + [0] * (N - i - 1)
+            val[i] = e
+            i += 1
+            dd += 1
+    return (val, vec)


 def spectral_projection(u, eigenpairs):
@@ -352,7 +775,12 @@ def spectral_projection(u, eigenpairs):
     There's not a lot of error checking on lengths of
     arrays, etc. so be careful.
     """
-    pass
+    coeff = []
+    evect = eigenpairs[1]
+    for ev in evect:
+        c = sum(evv * uv for (evv, uv) in zip(ev, u))
+        coeff.append(c)
+    return coeff


 def eigenvalues(creation_sequence):
@@ -375,7 +803,26 @@ def eigenvalues(creation_sequence):
       }

     """
-    pass
+    degseq = degree_sequence(creation_sequence)
+    degseq.sort()
+    eiglist = []  # zero is always one eigenvalue
+    eig = 0
+    row = len(degseq)
+    bigdeg = degseq.pop()
+    while row:
+        if bigdeg < row:
+            eiglist.append(eig)
+            row -= 1
+        else:
+            eig += 1
+            if degseq:
+                bigdeg = degseq.pop()
+            else:
+                bigdeg = 0
+    return eiglist
+
+
+# Threshold graph creation routines


 @py_random_state(2)
@@ -398,9 +845,21 @@ def random_threshold_sequence(n, p, seed=None):
         Indicator of random number generation state.
         See :ref:`Randomness<randomness>`.
     """
-    pass
+    if not (0 <= p <= 1):
+        raise ValueError("p must be in [0,1]")
+
+    cs = ["d"]  # threshold sequences always start with a d
+    for i in range(1, n):
+        if seed.random() < p:
+            cs.append("d")
+        else:
+            cs.append("i")
+    return cs


+# maybe *_d_threshold_sequence routines should
+# be (or be called from) a single routine with a more descriptive name
+# and a keyword parameter?
 def right_d_threshold_sequence(n, m):
     """
     Create a skewed threshold graph with a given number
@@ -412,7 +871,27 @@ def right_d_threshold_sequence(n, m):
     FIXME: describe algorithm

     """
-    pass
+    cs = ["d"] + ["i"] * (n - 1)  # create sequence with n insolated nodes
+
+    #  m <n : not enough edges, make disconnected
+    if m < n:
+        cs[m] = "d"
+        return cs
+
+    # too many edges
+    if m > n * (n - 1) / 2:
+        raise ValueError("Too many edges for this many nodes.")
+
+    # connected case m >n-1
+    ind = n - 1
+    sum = n - 1
+    while sum < m:
+        cs[ind] = "d"
+        ind -= 1
+        sum += ind
+    ind = m - (sum - ind)
+    cs[ind] = "d"
+    return cs


 def left_d_threshold_sequence(n, m):
@@ -426,7 +905,28 @@ def left_d_threshold_sequence(n, m):
     FIXME: describe algorithm

     """
-    pass
+    cs = ["d"] + ["i"] * (n - 1)  # create sequence with n insolated nodes
+
+    #  m <n : not enough edges, make disconnected
+    if m < n:
+        cs[m] = "d"
+        return cs
+
+    # too many edges
+    if m > n * (n - 1) / 2:
+        raise ValueError("Too many edges for this many nodes.")
+
+    # Connected case when M>N-1
+    cs[n - 1] = "d"
+    sum = n - 1
+    ind = 1
+    while sum < m:
+        cs[ind] = "d"
+        sum += ind
+        ind += 1
+    if sum > m:  # be sure not to change the first vertex
+        cs[sum - m] = "i"
+    return cs


 @py_random_state(3)
@@ -448,4 +948,32 @@ def swap_d(cs, p_split=1.0, p_combine=1.0, seed=None):
         Indicator of random number generation state.
         See :ref:`Randomness<randomness>`.
     """
-    pass
+    # preprocess the creation sequence
+    dlist = [i for (i, node_type) in enumerate(cs[1:-1]) if node_type == "d"]
+    # split
+    if seed.random() < p_split:
+        choice = seed.choice(dlist)
+        split_to = seed.choice(range(choice))
+        flip_side = choice - split_to
+        if split_to != flip_side and cs[split_to] == "i" and cs[flip_side] == "i":
+            cs[choice] = "i"
+            cs[split_to] = "d"
+            cs[flip_side] = "d"
+            dlist.remove(choice)
+            # don't add or combine may reverse this action
+            # dlist.extend([split_to,flip_side])
+    #            print >>sys.stderr,"split at %s to %s and %s"%(choice,split_to,flip_side)
+    # combine
+    if seed.random() < p_combine and dlist:
+        first_choice = seed.choice(dlist)
+        second_choice = seed.choice(dlist)
+        target = first_choice + second_choice
+        if target >= len(cs) or cs[target] == "d" or first_choice == second_choice:
+            return cs
+        # OK to combine
+        cs[first_choice] = "i"
+        cs[second_choice] = "i"
+        cs[target] = "d"
+    #        print >>sys.stderr,"combine %s and %s to make %s."%(first_choice,second_choice,target)
+
+    return cs
diff --git a/networkx/algorithms/time_dependent.py b/networkx/algorithms/time_dependent.py
index 6990d4d0b..d67cdcf0b 100644
--- a/networkx/algorithms/time_dependent.py
+++ b/networkx/algorithms/time_dependent.py
@@ -1,14 +1,16 @@
 """Time dependent algorithms."""
+
 import networkx as nx
 from networkx.utils import not_implemented_for
-__all__ = ['cd_index']
+
+__all__ = ["cd_index"]


-@not_implemented_for('undirected')
-@not_implemented_for('multigraph')
-@nx._dispatchable(node_attrs={'time': None, 'weight': 1})
-def cd_index(G, node, time_delta, *, time='time', weight=None):
-    """Compute the CD index for `node` within the graph `G`.
+@not_implemented_for("undirected")
+@not_implemented_for("multigraph")
+@nx._dispatchable(node_attrs={"time": None, "weight": 1})
+def cd_index(G, node, time_delta, *, time="time", weight=None):
+    r"""Compute the CD index for `node` within the graph `G`.

     Calculates the CD index for the given node of the graph,
     considering only its predecessors who have the `time` attribute
@@ -85,7 +87,7 @@ def cd_index(G, node, time_delta, *, time='time', weight=None):
     below:

     .. math::
-        CD_{t}=\\frac{1}{n_{t}}\\sum_{i=1}^{n}\\frac{-2f_{it}b_{it}+f_{it}}{w_{it}},
+        CD_{t}=\frac{1}{n_{t}}\sum_{i=1}^{n}\frac{-2f_{it}b_{it}+f_{it}}{w_{it}},

     where `f_{it}` equals 1 if `i` cites the focal patent else 0, `b_{it}` equals
     1 if `i` cites any of the focal patents successors else 0, `n_{t}` is the number
@@ -108,4 +110,33 @@ def cd_index(G, node, time_delta, *, time='time', weight=None):
            http://russellfunk.org/cdindex/static/papers/funk_ms_2017.pdf

     """
-    pass
+    if not all(time in G.nodes[n] for n in G):
+        raise nx.NetworkXError("Not all nodes have a 'time' attribute.")
+
+    try:
+        # get target_date
+        target_date = G.nodes[node][time] + time_delta
+        # keep the predecessors that existed before the target date
+        pred = {i for i in G.pred[node] if G.nodes[i][time] <= target_date}
+    except:
+        raise nx.NetworkXError(
+            "Addition and comparison are not supported between 'time_delta' "
+            "and 'time' types."
+        )
+
+    # -1 if any edge between node's predecessors and node's successors, else 1
+    b = [-1 if any(j in G[i] for j in G[node]) else 1 for i in pred]
+
+    # n is size of the union of the focal node's predecessors and its successors' predecessors
+    n = len(pred.union(*(G.pred[s].keys() - {node} for s in G[node])))
+    if n == 0:
+        raise nx.NetworkXError("The cd index cannot be defined.")
+
+    # calculate cd index
+    if weight is None:
+        return round(sum(bi for bi in b) / n, 2)
+    else:
+        # If a node has the specified weight attribute, its weight is used in the calculation
+        # otherwise, a weight of 1 is assumed for that node
+        weights = [G.nodes[i].get(weight, 1) for i in pred]
+        return round(sum(bi / wt for bi, wt in zip(b, weights)) / n, 2)
diff --git a/networkx/algorithms/tournament.py b/networkx/algorithms/tournament.py
index 81de98140..43a71faa7 100644
--- a/networkx/algorithms/tournament.py
+++ b/networkx/algorithms/tournament.py
@@ -21,11 +21,19 @@ To access the functions in this module, you must access them through the

 """
 from itertools import combinations
+
 import networkx as nx
 from networkx.algorithms.simple_paths import is_simple_path as is_path
 from networkx.utils import arbitrary_element, not_implemented_for, py_random_state
-__all__ = ['hamiltonian_path', 'is_reachable', 'is_strongly_connected',
-    'is_tournament', 'random_tournament', 'score_sequence']
+
+__all__ = [
+    "hamiltonian_path",
+    "is_reachable",
+    "is_strongly_connected",
+    "is_tournament",
+    "random_tournament",
+    "score_sequence",
+]


 def index_satisfying(iterable, condition):
@@ -40,11 +48,23 @@ def index_satisfying(iterable, condition):
     function raises :exc:`ValueError`.

     """
-    pass
-
-
-@not_implemented_for('undirected')
-@not_implemented_for('multigraph')
+    # Pre-condition: iterable must not be empty.
+    for i, x in enumerate(iterable):
+        if condition(x):
+            return i
+    # If we reach the end of the iterable without finding an element
+    # that satisfies the condition, return the length of the iterable,
+    # which is one greater than the index of its last element. If the
+    # iterable was empty, `i` will not be defined, so we raise an
+    # exception.
+    try:
+        return i + 1
+    except NameError as err:
+        raise ValueError("iterable must be non-empty") from err
+
+
+@not_implemented_for("undirected")
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def is_tournament(G):
     """Returns True if and only if `G` is a tournament.
@@ -75,11 +95,15 @@ def is_tournament(G):
     the convention used here.

     """
-    pass
+    # In a tournament, there is exactly one directed edge joining each pair.
+    return (
+        all((v in G[u]) ^ (u in G[v]) for u, v in combinations(G, 2))
+        and nx.number_of_selfloops(G) == 0
+    )


-@not_implemented_for('undirected')
-@not_implemented_for('multigraph')
+@not_implemented_for("undirected")
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def hamiltonian_path(G):
     """Returns a Hamiltonian path in the given tournament graph.
@@ -113,13 +137,23 @@ def hamiltonian_path(G):
     $n$ is the number of nodes in the graph.

     """
-    pass
+    if len(G) == 0:
+        return []
+    if len(G) == 1:
+        return [arbitrary_element(G)]
+    v = arbitrary_element(G)
+    hampath = hamiltonian_path(G.subgraph(set(G) - {v}))
+    # Get the index of the first node in the path that does *not* have
+    # an edge to `v`, then insert `v` before that node.
+    index = index_satisfying(hampath, lambda u: v not in G[u])
+    hampath.insert(index, v)
+    return hampath


 @py_random_state(1)
 @nx._dispatchable(graphs=None, returns_graph=True)
 def random_tournament(n, seed=None):
-    """Returns a random tournament graph on `n` nodes.
+    r"""Returns a random tournament graph on `n` nodes.

     Parameters
     ----------
@@ -138,16 +172,20 @@ def random_tournament(n, seed=None):
     Notes
     -----
     This algorithm adds, for each pair of distinct nodes, an edge with
-    uniformly random orientation. In other words, `\\binom{n}{2}` flips
+    uniformly random orientation. In other words, `\binom{n}{2}` flips
     of an unbiased coin decide the orientations of the edges in the
     graph.

     """
-    pass
+    # Flip an unbiased coin for each pair of distinct nodes.
+    coins = (seed.random() for i in range((n * (n - 1)) // 2))
+    pairs = combinations(range(n), 2)
+    edges = ((u, v) if r < 0.5 else (v, u) for (u, v), r in zip(pairs, coins))
+    return nx.DiGraph(edges)


-@not_implemented_for('undirected')
-@not_implemented_for('multigraph')
+@not_implemented_for("undirected")
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def score_sequence(G):
     """Returns the score sequence for the given tournament graph.
@@ -174,14 +212,14 @@ def score_sequence(G):
     [1, 1, 2, 2]

     """
-    pass
+    return sorted(d for v, d in G.out_degree())


-@not_implemented_for('undirected')
-@not_implemented_for('multigraph')
-@nx._dispatchable(preserve_edge_attrs={'G': {'weight': 1}})
+@not_implemented_for("undirected")
+@not_implemented_for("multigraph")
+@nx._dispatchable(preserve_edge_attrs={"G": {"weight": 1}})
 def tournament_matrix(G):
-    """Returns the tournament matrix for the given tournament graph.
+    r"""Returns the tournament matrix for the given tournament graph.

     This function requires SciPy.

@@ -191,11 +229,11 @@ def tournament_matrix(G):
     .. math::

        T_{i j} =
-       \\begin{cases}
-       +1 & \\text{if } (i, j) \\in E \\\\
-       -1 & \\text{if } (j, i) \\in E \\\\
-       0 & \\text{if } i == j.
-       \\end{cases}
+       \begin{cases}
+       +1 & \text{if } (i, j) \in E \\
+       -1 & \text{if } (j, i) \in E \\
+       0 & \text{if } i == j.
+       \end{cases}

     An equivalent definition is `T = A - A^T`, where *A* is the
     adjacency matrix of the graph `G`.
@@ -216,11 +254,12 @@ def tournament_matrix(G):
         If SciPy is not available.

     """
-    pass
+    A = nx.adjacency_matrix(G)
+    return A - A.T


-@not_implemented_for('undirected')
-@not_implemented_for('multigraph')
+@not_implemented_for("undirected")
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def is_reachable(G, s, t):
     """Decides whether there is a path from `s` to `t` in the
@@ -276,12 +315,41 @@ def is_reachable(G, s, t):
            *Electronic Colloquium on Computational Complexity*. 2001.
            <http://eccc.hpi-web.de/report/2001/092/>
     """
-    pass
+
+    def two_neighborhood(G, v):
+        """Returns the set of nodes at distance at most two from `v`.
+
+        `G` must be a graph and `v` a node in that graph.
+
+        The returned set includes the nodes at distance zero (that is,
+        the node `v` itself), the nodes at distance one (that is, the
+        out-neighbors of `v`), and the nodes at distance two.
+
+        """
+        # TODO This is trivially parallelizable.
+        return {
+            x for x in G if x == v or x in G[v] or any(is_path(G, [v, z, x]) for z in G)
+        }
+
+    def is_closed(G, nodes):
+        """Decides whether the given set of nodes is closed.
+
+        A set *S* of nodes is *closed* if for each node *u* in the graph
+        not in *S* and for each node *v* in *S*, there is an edge from
+        *u* to *v*.
+
+        """
+        # TODO This is trivially parallelizable.
+        return all(v in G[u] for u in set(G) - nodes for v in nodes)
+
+    # TODO This is trivially parallelizable.
+    neighborhoods = [two_neighborhood(G, v) for v in G]
+    return all(not (is_closed(G, S) and s in S and t not in S) for S in neighborhoods)


-@not_implemented_for('undirected')
-@not_implemented_for('multigraph')
-@nx._dispatchable(name='tournament_is_strongly_connected')
+@not_implemented_for("undirected")
+@not_implemented_for("multigraph")
+@nx._dispatchable(name="tournament_is_strongly_connected")
 def is_strongly_connected(G):
     """Decides whether the given tournament is strongly connected.

@@ -334,4 +402,5 @@ def is_strongly_connected(G):
            <http://eccc.hpi-web.de/report/2001/092/>

     """
-    pass
+    # TODO This is trivially parallelizable.
+    return all(is_reachable(G, u, v) for u in G for v in G)
diff --git a/networkx/algorithms/traversal/beamsearch.py b/networkx/algorithms/traversal/beamsearch.py
index 05b79ba54..ab90f44ae 100644
--- a/networkx/algorithms/traversal/beamsearch.py
+++ b/networkx/algorithms/traversal/beamsearch.py
@@ -1,6 +1,7 @@
 """Basic algorithms for breadth-first searching the nodes of a graph."""
 import networkx as nx
-__all__ = ['bfs_beam_edges']
+
+__all__ = ["bfs_beam_edges"]


 @nx._dispatchable
@@ -59,4 +60,30 @@ def bfs_beam_edges(G, source, value, width=None):
     >>> list(nx.bfs_beam_edges(G, source=0, value=centrality.get, width=3))
     [(0, 2), (0, 1), (0, 8), (2, 32), (1, 13), (8, 33)]
     """
-    pass
+
+    if width is None:
+        width = len(G)
+
+    def successors(v):
+        """Returns a list of the best neighbors of a node.
+
+        `v` is a node in the graph `G`.
+
+        The "best" neighbors are chosen according to the `value`
+        function (higher is better). Only the `width` best neighbors of
+        `v` are returned.
+        """
+        # TODO The Python documentation states that for small values, it
+        # is better to use `heapq.nlargest`. We should determine the
+        # threshold at which its better to use `heapq.nlargest()`
+        # instead of `sorted()[:]` and apply that optimization here.
+        #
+        # If `width` is greater than the number of neighbors of `v`, all
+        # neighbors are returned by the semantics of slicing in
+        # Python. This occurs in the special case that the user did not
+        # specify a `width`: in this case all neighbors are always
+        # returned, so this is just a (slower) implementation of
+        # `bfs_edges(G, source)` but with a sorted enqueue step.
+        return iter(sorted(G.neighbors(v), key=value, reverse=True)[:width])
+
+    yield from nx.generic_bfs_edges(G, source, successors)
diff --git a/networkx/algorithms/traversal/breadth_first_search.py b/networkx/algorithms/traversal/breadth_first_search.py
index 3e1cee697..b87cca124 100644
--- a/networkx/algorithms/traversal/breadth_first_search.py
+++ b/networkx/algorithms/traversal/breadth_first_search.py
@@ -1,14 +1,22 @@
 """Basic algorithms for breadth-first searching the nodes of a graph."""
 from collections import deque
+
 import networkx as nx
-__all__ = ['bfs_edges', 'bfs_tree', 'bfs_predecessors', 'bfs_successors',
-    'descendants_at_distance', 'bfs_layers', 'bfs_labeled_edges',
-    'generic_bfs_edges']
+
+__all__ = [
+    "bfs_edges",
+    "bfs_tree",
+    "bfs_predecessors",
+    "bfs_successors",
+    "descendants_at_distance",
+    "bfs_layers",
+    "bfs_labeled_edges",
+    "generic_bfs_edges",
+]


 @nx._dispatchable
-def generic_bfs_edges(G, source, neighbors=None, depth_limit=None,
-    sort_neighbors=None):
+def generic_bfs_edges(G, source, neighbors=None, depth_limit=None, sort_neighbors=None):
     """Iterate over edges in a breadth-first search.

     The breadth-first search begins at `source` and enqueues the
@@ -85,7 +93,40 @@ def generic_bfs_edges(G, source, neighbors=None, depth_limit=None,
     .. _PADS: http://www.ics.uci.edu/~eppstein/PADS/BFS.py
     .. _Depth-limited-search: https://en.wikipedia.org/wiki/Depth-limited_search
     """
-    pass
+    if neighbors is None:
+        neighbors = G.neighbors
+    if sort_neighbors is not None:
+        import warnings
+
+        warnings.warn(
+            (
+                "The sort_neighbors parameter is deprecated and will be removed\n"
+                "in NetworkX 3.4, use the neighbors parameter instead."
+            ),
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        _neighbors = neighbors
+        neighbors = lambda node: iter(sort_neighbors(_neighbors(node)))
+    if depth_limit is None:
+        depth_limit = len(G)
+
+    seen = {source}
+    n = len(G)
+    depth = 0
+    next_parents_children = [(source, neighbors(source))]
+    while next_parents_children and depth < depth_limit:
+        this_parents_children = next_parents_children
+        next_parents_children = []
+        for parent, children in this_parents_children:
+            for child in children:
+                if child not in seen:
+                    seen.add(child)
+                    next_parents_children.append((child, neighbors(child)))
+                    yield parent, child
+            if len(seen) == n:
+                return
+        depth += 1


 @nx._dispatchable
@@ -164,7 +205,17 @@ def bfs_edges(G, source, reverse=False, depth_limit=None, sort_neighbors=None):
     :func:`~networkx.algorithms.traversal.edgebfs.edge_bfs`

     """
-    pass
+    if reverse and G.is_directed():
+        successors = G.predecessors
+    else:
+        successors = G.neighbors
+
+    if sort_neighbors is not None:
+        yield from generic_bfs_edges(
+            G, source, lambda node: iter(sort_neighbors(successors(node))), depth_limit
+        )
+    else:
+        yield from generic_bfs_edges(G, source, successors, depth_limit)


 @nx._dispatchable(returns_graph=True)
@@ -222,7 +273,17 @@ def bfs_tree(G, source, reverse=False, depth_limit=None, sort_neighbors=None):
     bfs_edges
     edge_bfs
     """
-    pass
+    T = nx.DiGraph()
+    T.add_node(source)
+    edges_gen = bfs_edges(
+        G,
+        source,
+        reverse=reverse,
+        depth_limit=depth_limit,
+        sort_neighbors=sort_neighbors,
+    )
+    T.add_edges_from(edges_gen)
+    return T


 @nx._dispatchable
@@ -285,7 +346,10 @@ def bfs_predecessors(G, source, depth_limit=None, sort_neighbors=None):
     bfs_edges
     edge_bfs
     """
-    pass
+    for s, t in bfs_edges(
+        G, source, depth_limit=depth_limit, sort_neighbors=sort_neighbors
+    ):
+        yield (t, s)


 @nx._dispatchable
@@ -348,7 +412,18 @@ def bfs_successors(G, source, depth_limit=None, sort_neighbors=None):
     bfs_edges
     edge_bfs
     """
-    pass
+    parent = source
+    children = []
+    for p, c in bfs_edges(
+        G, source, depth_limit=depth_limit, sort_neighbors=sort_neighbors
+    ):
+        if p == parent:
+            children.append(c)
+            continue
+        yield (parent, children)
+        children = [c]
+        parent = p
+    yield (parent, children)


 @nx._dispatchable
@@ -380,13 +455,33 @@ def bfs_layers(G, sources):
     >>> dict(enumerate(nx.bfs_layers(H, [1, 6])))
     {0: [1, 6], 1: [0, 3, 4, 2], 2: [5]}
     """
-    pass
+    if sources in G:
+        sources = [sources]
+
+    current_layer = list(sources)
+    visited = set(sources)
+
+    for source in current_layer:
+        if source not in G:
+            raise nx.NetworkXError(f"The node {source} is not in the graph.")
+
+    # this is basically BFS, except that the current layer only stores the nodes at
+    # same distance from sources at each iteration
+    while current_layer:
+        yield current_layer
+        next_layer = []
+        for node in current_layer:
+            for child in G[node]:
+                if child not in visited:
+                    visited.add(child)
+                    next_layer.append(child)
+        current_layer = next_layer


-REVERSE_EDGE = 'reverse'
-TREE_EDGE = 'tree'
-FORWARD_EDGE = 'forward'
-LEVEL_EDGE = 'level'
+REVERSE_EDGE = "reverse"
+TREE_EDGE = "tree"
+FORWARD_EDGE = "forward"
+LEVEL_EDGE = "level"


 @nx._dispatchable
@@ -430,7 +525,38 @@ def bfs_labeled_edges(G, sources):
     >>> list(nx.bfs_labeled_edges(G, [0, 1]))
     [(0, 1, 'level'), (0, 2, 'tree'), (1, 2, 'forward')]
     """
-    pass
+    if sources in G:
+        sources = [sources]
+
+    neighbors = G._adj
+    directed = G.is_directed()
+    visited = set()
+    visit = visited.discard if directed else visited.add
+    # We use visited in a negative sense, so the visited set stays empty for the
+    # directed case and level edges are reported on their first occurrence in
+    # the undirected case.  Note our use of visited.discard -- this is built-in
+    # thus somewhat faster than a python-defined def nop(x): pass
+    depth = {s: 0 for s in sources}
+    queue = deque(depth.items())
+    push = queue.append
+    pop = queue.popleft
+    while queue:
+        u, du = pop()
+        for v in neighbors[u]:
+            if v not in depth:
+                depth[v] = dv = du + 1
+                push((v, dv))
+                yield u, v, TREE_EDGE
+            else:
+                dv = depth[v]
+                if du == dv:
+                    if v not in visited:
+                        yield u, v, LEVEL_EDGE
+                elif du < dv:
+                    yield u, v, FORWARD_EDGE
+                elif directed:
+                    yield u, v, REVERSE_EDGE
+        visit(u)


 @nx._dispatchable
@@ -463,4 +589,11 @@ def descendants_at_distance(G, source, distance):
     >>> nx.descendants_at_distance(H, 5, 1)
     set()
     """
-    pass
+    if source not in G:
+        raise nx.NetworkXError(f"The node {source} is not in the graph.")
+
+    bfs_generator = nx.bfs_layers(G, source)
+    for i, layer in enumerate(bfs_generator):
+        if i == distance:
+            return set(layer)
+    return set()
diff --git a/networkx/algorithms/traversal/depth_first_search.py b/networkx/algorithms/traversal/depth_first_search.py
index 104f282d5..3ca0f91d9 100644
--- a/networkx/algorithms/traversal/depth_first_search.py
+++ b/networkx/algorithms/traversal/depth_first_search.py
@@ -1,8 +1,17 @@
 """Basic algorithms for depth-first searching the nodes of a graph."""
 from collections import defaultdict
+
 import networkx as nx
-__all__ = ['dfs_edges', 'dfs_tree', 'dfs_predecessors', 'dfs_successors',
-    'dfs_preorder_nodes', 'dfs_postorder_nodes', 'dfs_labeled_edges']
+
+__all__ = [
+    "dfs_edges",
+    "dfs_tree",
+    "dfs_predecessors",
+    "dfs_successors",
+    "dfs_preorder_nodes",
+    "dfs_postorder_nodes",
+    "dfs_labeled_edges",
+]


 @nx._dispatchable
@@ -65,7 +74,41 @@ def dfs_edges(G, source=None, depth_limit=None, *, sort_neighbors=None):
     .. [1] http://www.ics.uci.edu/~eppstein/PADS
     .. [2] https://en.wikipedia.org/wiki/Depth-limited_search
     """
-    pass
+    if source is None:
+        # edges for all components
+        nodes = G
+    else:
+        # edges for components with source
+        nodes = [source]
+    if depth_limit is None:
+        depth_limit = len(G)
+
+    get_children = (
+        G.neighbors
+        if sort_neighbors is None
+        else lambda n: iter(sort_neighbors(G.neighbors(n)))
+    )
+
+    visited = set()
+    for start in nodes:
+        if start in visited:
+            continue
+        visited.add(start)
+        stack = [(start, get_children(start))]
+        depth_now = 1
+        while stack:
+            parent, children = stack[-1]
+            for child in children:
+                if child not in visited:
+                    yield parent, child
+                    visited.add(child)
+                    if depth_now < depth_limit:
+                        stack.append((child, get_children(child)))
+                        depth_now += 1
+                        break
+            else:
+                stack.pop()
+                depth_now -= 1


 @nx._dispatchable(returns_graph=True)
@@ -110,7 +153,13 @@ def dfs_tree(G, source=None, depth_limit=None, *, sort_neighbors=None):
     :func:`~networkx.algorithms.traversal.edgedfs.edge_dfs`
     :func:`~networkx.algorithms.traversal.breadth_first_search.bfs_tree`
     """
-    pass
+    T = nx.DiGraph()
+    if source is None:
+        T.add_nodes_from(G)
+    else:
+        T.add_node(source)
+    T.add_edges_from(dfs_edges(G, source, depth_limit, sort_neighbors=sort_neighbors))
+    return T


 @nx._dispatchable
@@ -169,7 +218,10 @@ def dfs_predecessors(G, source=None, depth_limit=None, *, sort_neighbors=None):
     :func:`~networkx.algorithms.traversal.edgedfs.edge_dfs`
     :func:`~networkx.algorithms.traversal.breadth_first_search.bfs_tree`
     """
-    pass
+    return {
+        t: s
+        for s, t in dfs_edges(G, source, depth_limit, sort_neighbors=sort_neighbors)
+    }


 @nx._dispatchable
@@ -228,12 +280,19 @@ def dfs_successors(G, source=None, depth_limit=None, *, sort_neighbors=None):
     :func:`~networkx.algorithms.traversal.edgedfs.edge_dfs`
     :func:`~networkx.algorithms.traversal.breadth_first_search.bfs_tree`
     """
-    pass
+    d = defaultdict(list)
+    for s, t in dfs_edges(
+        G,
+        source=source,
+        depth_limit=depth_limit,
+        sort_neighbors=sort_neighbors,
+    ):
+        d[s].append(t)
+    return dict(d)


 @nx._dispatchable
-def dfs_postorder_nodes(G, source=None, depth_limit=None, *, sort_neighbors
-    =None):
+def dfs_postorder_nodes(G, source=None, depth_limit=None, *, sort_neighbors=None):
     """Generate nodes in a depth-first-search post-ordering starting at source.

     Parameters
@@ -285,12 +344,14 @@ def dfs_postorder_nodes(G, source=None, depth_limit=None, *, sort_neighbors
     :func:`~networkx.algorithms.traversal.edgedfs.edge_dfs`
     :func:`~networkx.algorithms.traversal.breadth_first_search.bfs_tree`
     """
-    pass
+    edges = nx.dfs_labeled_edges(
+        G, source=source, depth_limit=depth_limit, sort_neighbors=sort_neighbors
+    )
+    return (v for u, v, d in edges if d == "reverse")


 @nx._dispatchable
-def dfs_preorder_nodes(G, source=None, depth_limit=None, *, sort_neighbors=None
-    ):
+def dfs_preorder_nodes(G, source=None, depth_limit=None, *, sort_neighbors=None):
     """Generate nodes in a depth-first-search pre-ordering starting at source.

     Parameters
@@ -342,12 +403,14 @@ def dfs_preorder_nodes(G, source=None, depth_limit=None, *, sort_neighbors=None
     dfs_labeled_edges
     :func:`~networkx.algorithms.traversal.breadth_first_search.bfs_edges`
     """
-    pass
+    edges = nx.dfs_labeled_edges(
+        G, source=source, depth_limit=depth_limit, sort_neighbors=sort_neighbors
+    )
+    return (v for u, v, d in edges if d == "forward")


 @nx._dispatchable
-def dfs_labeled_edges(G, source=None, depth_limit=None, *, sort_neighbors=None
-    ):
+def dfs_labeled_edges(G, source=None, depth_limit=None, *, sort_neighbors=None):
     """Iterate over edges in a depth-first-search (DFS) labeled by type.

     Parameters
@@ -418,4 +481,48 @@ def dfs_labeled_edges(G, source=None, depth_limit=None, *, sort_neighbors=None
     dfs_preorder_nodes
     dfs_postorder_nodes
     """
-    pass
+    # Based on http://www.ics.uci.edu/~eppstein/PADS/DFS.py
+    # by D. Eppstein, July 2004.
+    if source is None:
+        # edges for all components
+        nodes = G
+    else:
+        # edges for components with source
+        nodes = [source]
+    if depth_limit is None:
+        depth_limit = len(G)
+
+    get_children = (
+        G.neighbors
+        if sort_neighbors is None
+        else lambda n: iter(sort_neighbors(G.neighbors(n)))
+    )
+
+    visited = set()
+    for start in nodes:
+        if start in visited:
+            continue
+        yield start, start, "forward"
+        visited.add(start)
+        stack = [(start, get_children(start))]
+        depth_now = 1
+        while stack:
+            parent, children = stack[-1]
+            for child in children:
+                if child in visited:
+                    yield parent, child, "nontree"
+                else:
+                    yield parent, child, "forward"
+                    visited.add(child)
+                    if depth_now < depth_limit:
+                        stack.append((child, iter(get_children(child))))
+                        depth_now += 1
+                        break
+                    else:
+                        yield parent, child, "reverse-depth_limit"
+            else:
+                stack.pop()
+                depth_now -= 1
+                if stack:
+                    yield stack[-1][0], parent, "reverse"
+        yield start, start, "reverse"
diff --git a/networkx/algorithms/traversal/edgebfs.py b/networkx/algorithms/traversal/edgebfs.py
index 52b0e9277..484ae12b5 100644
--- a/networkx/algorithms/traversal/edgebfs.py
+++ b/networkx/algorithms/traversal/edgebfs.py
@@ -7,10 +7,13 @@ Algorithms for a breadth-first traversal of edges in a graph.

 """
 from collections import deque
+
 import networkx as nx
-FORWARD = 'forward'
-REVERSE = 'reverse'
-__all__ = ['edge_bfs']
+
+FORWARD = "forward"
+REVERSE = "reverse"
+
+__all__ = ["edge_bfs"]


 @nx._dispatchable
@@ -101,4 +104,74 @@ def edge_bfs(G, source=None, orientation=None):
     edge_dfs

     """
-    pass
+    nodes = list(G.nbunch_iter(source))
+    if not nodes:
+        return
+
+    directed = G.is_directed()
+    kwds = {"data": False}
+    if G.is_multigraph() is True:
+        kwds["keys"] = True
+
+    # set up edge lookup
+    if orientation is None:
+
+        def edges_from(node):
+            return iter(G.edges(node, **kwds))
+
+    elif not directed or orientation == "original":
+
+        def edges_from(node):
+            for e in G.edges(node, **kwds):
+                yield e + (FORWARD,)
+
+    elif orientation == "reverse":
+
+        def edges_from(node):
+            for e in G.in_edges(node, **kwds):
+                yield e + (REVERSE,)
+
+    elif orientation == "ignore":
+
+        def edges_from(node):
+            for e in G.edges(node, **kwds):
+                yield e + (FORWARD,)
+            for e in G.in_edges(node, **kwds):
+                yield e + (REVERSE,)
+
+    else:
+        raise nx.NetworkXError("invalid orientation argument.")
+
+    if directed:
+        neighbors = G.successors
+
+        def edge_id(edge):
+            # remove direction indicator
+            return edge[:-1] if orientation is not None else edge
+
+    else:
+        neighbors = G.neighbors
+
+        def edge_id(edge):
+            return (frozenset(edge[:2]),) + edge[2:]
+
+    check_reverse = directed and orientation in ("reverse", "ignore")
+
+    # start BFS
+    visited_nodes = set(nodes)
+    visited_edges = set()
+    queue = deque([(n, edges_from(n)) for n in nodes])
+    while queue:
+        parent, children_edges = queue.popleft()
+        for edge in children_edges:
+            if check_reverse and edge[-1] == REVERSE:
+                child = edge[0]
+            else:
+                child = edge[1]
+            if child not in visited_nodes:
+                visited_nodes.add(child)
+                queue.append((child, edges_from(child)))
+            edgeid = edge_id(edge)
+            if edgeid not in visited_edges:
+                visited_edges.add(edgeid)
+                yield edge
diff --git a/networkx/algorithms/traversal/edgedfs.py b/networkx/algorithms/traversal/edgedfs.py
index a681b37dc..010f68246 100644
--- a/networkx/algorithms/traversal/edgedfs.py
+++ b/networkx/algorithms/traversal/edgedfs.py
@@ -7,9 +7,11 @@ Algorithms for a depth-first traversal of edges in a graph.

 """
 import networkx as nx
-FORWARD = 'forward'
-REVERSE = 'reverse'
-__all__ = ['edge_dfs']
+
+FORWARD = "forward"
+REVERSE = "reverse"
+
+__all__ = ["edge_dfs"]


 @nx._dispatchable
@@ -89,4 +91,85 @@ def edge_dfs(G, source=None, orientation=None):
     :func:`~networkx.algorithms.traversal.depth_first_search.dfs_edges`

     """
-    pass
+    nodes = list(G.nbunch_iter(source))
+    if not nodes:
+        return
+
+    directed = G.is_directed()
+    kwds = {"data": False}
+    if G.is_multigraph() is True:
+        kwds["keys"] = True
+
+    # set up edge lookup
+    if orientation is None:
+
+        def edges_from(node):
+            return iter(G.edges(node, **kwds))
+
+    elif not directed or orientation == "original":
+
+        def edges_from(node):
+            for e in G.edges(node, **kwds):
+                yield e + (FORWARD,)
+
+    elif orientation == "reverse":
+
+        def edges_from(node):
+            for e in G.in_edges(node, **kwds):
+                yield e + (REVERSE,)
+
+    elif orientation == "ignore":
+
+        def edges_from(node):
+            for e in G.edges(node, **kwds):
+                yield e + (FORWARD,)
+            for e in G.in_edges(node, **kwds):
+                yield e + (REVERSE,)
+
+    else:
+        raise nx.NetworkXError("invalid orientation argument.")
+
+    # set up formation of edge_id to easily look up if edge already returned
+    if directed:
+
+        def edge_id(edge):
+            # remove direction indicator
+            return edge[:-1] if orientation is not None else edge
+
+    else:
+
+        def edge_id(edge):
+            # single id for undirected requires frozenset on nodes
+            return (frozenset(edge[:2]),) + edge[2:]
+
+    # Basic setup
+    check_reverse = directed and orientation in ("reverse", "ignore")
+
+    visited_edges = set()
+    visited_nodes = set()
+    edges = {}
+
+    # start DFS
+    for start_node in nodes:
+        stack = [start_node]
+        while stack:
+            current_node = stack[-1]
+            if current_node not in visited_nodes:
+                edges[current_node] = edges_from(current_node)
+                visited_nodes.add(current_node)
+
+            try:
+                edge = next(edges[current_node])
+            except StopIteration:
+                # No more edges from the current node.
+                stack.pop()
+            else:
+                edgeid = edge_id(edge)
+                if edgeid not in visited_edges:
+                    visited_edges.add(edgeid)
+                    # Mark the traversed "to" node as to-be-explored.
+                    if check_reverse and edge[-1] == REVERSE:
+                        stack.append(edge[0])
+                    else:
+                        stack.append(edge[1])
+                    yield edge
diff --git a/networkx/algorithms/tree/branchings.py b/networkx/algorithms/tree/branchings.py
index 35cadd659..6c0e34906 100644
--- a/networkx/algorithms/tree/branchings.py
+++ b/networkx/algorithms/tree/branchings.py
@@ -7,25 +7,73 @@ This implementation is based on:
     233–240. URL: http://archive.org/details/jresv71Bn4p233

 """
+# TODO: Implement method from Gabow, Galil, Spence and Tarjan:
+#
+# @article{
+#    year={1986},
+#    issn={0209-9683},
+#    journal={Combinatorica},
+#    volume={6},
+#    number={2},
+#    doi={10.1007/BF02579168},
+#    title={Efficient algorithms for finding minimum spanning trees in
+#        undirected and directed graphs},
+#    url={https://doi.org/10.1007/BF02579168},
+#    publisher={Springer-Verlag},
+#    keywords={68 B 15; 68 C 05},
+#    author={Gabow, Harold N. and Galil, Zvi and Spencer, Thomas and Tarjan,
+#        Robert E.},
+#    pages={109-122},
+#    language={English}
+# }
 import string
 from dataclasses import dataclass, field
 from operator import itemgetter
 from queue import PriorityQueue
+
 import networkx as nx
 from networkx.utils import py_random_state
+
 from .recognition import is_arborescence, is_branching
-__all__ = ['branching_weight', 'greedy_branching', 'maximum_branching',
-    'minimum_branching', 'minimal_branching',
-    'maximum_spanning_arborescence', 'minimum_spanning_arborescence',
-    'ArborescenceIterator', 'Edmonds']
-KINDS = {'max', 'min'}
-STYLES = {'branching': 'branching', 'arborescence': 'arborescence',
-    'spanning arborescence': 'arborescence'}
-INF = float('inf')
-
-
-@nx._dispatchable(edge_attrs={'attr': 'default'})
-def branching_weight(G, attr='weight', default=1):
+
+__all__ = [
+    "branching_weight",
+    "greedy_branching",
+    "maximum_branching",
+    "minimum_branching",
+    "minimal_branching",
+    "maximum_spanning_arborescence",
+    "minimum_spanning_arborescence",
+    "ArborescenceIterator",
+    "Edmonds",
+]
+
+KINDS = {"max", "min"}
+
+STYLES = {
+    "branching": "branching",
+    "arborescence": "arborescence",
+    "spanning arborescence": "arborescence",
+}
+
+INF = float("inf")
+
+
+@py_random_state(1)
+def random_string(L=15, seed=None):
+    return "".join([seed.choice(string.ascii_letters) for n in range(L)])
+
+
+def _min_weight(weight):
+    return -weight
+
+
+def _max_weight(weight):
+    return weight
+
+
+@nx._dispatchable(edge_attrs={"attr": "default"})
+def branching_weight(G, attr="weight", default=1):
     """
     Returns the total weight of a branching.

@@ -55,12 +103,12 @@ def branching_weight(G, attr='weight', default=1):
     11

     """
-    pass
+    return sum(edge[2].get(attr, default) for edge in G.edges(data=True))


 @py_random_state(4)
-@nx._dispatchable(edge_attrs={'attr': 'default'}, returns_graph=True)
-def greedy_branching(G, attr='weight', default=1, kind='max', seed=None):
+@nx._dispatchable(edge_attrs={"attr": "default"}, returns_graph=True)
+def greedy_branching(G, attr="weight", default=1, kind="max", seed=None):
     """
     Returns a branching obtained through a greedy algorithm.

@@ -93,7 +141,50 @@ def greedy_branching(G, attr='weight', default=1, kind='max', seed=None):
         The greedily obtained branching.

     """
-    pass
+    if kind not in KINDS:
+        raise nx.NetworkXException("Unknown value for `kind`.")
+
+    if kind == "min":
+        reverse = False
+    else:
+        reverse = True
+
+    if attr is None:
+        # Generate a random string the graph probably won't have.
+        attr = random_string(seed=seed)
+
+    edges = [(u, v, data.get(attr, default)) for (u, v, data) in G.edges(data=True)]
+
+    # We sort by weight, but also by nodes to normalize behavior across runs.
+    try:
+        edges.sort(key=itemgetter(2, 0, 1), reverse=reverse)
+    except TypeError:
+        # This will fail in Python 3.x if the nodes are of varying types.
+        # In that case, we use the arbitrary order.
+        edges.sort(key=itemgetter(2), reverse=reverse)
+
+    # The branching begins with a forest of no edges.
+    B = nx.DiGraph()
+    B.add_nodes_from(G)
+
+    # Now we add edges greedily so long we maintain the branching.
+    uf = nx.utils.UnionFind()
+    for i, (u, v, w) in enumerate(edges):
+        if uf[u] == uf[v]:
+            # Adding this edge would form a directed cycle.
+            continue
+        elif B.in_degree(v) == 1:
+            # The edge would increase the degree to be greater than one.
+            continue
+        else:
+            # If attr was None, then don't insert weights...
+            data = {}
+            if attr is not None:
+                data[attr] = w
+            B.add_edge(u, v, **data)
+            uf.union(u, v)
+
+    return B


 class MultiDiGraph_EdgeKey(nx.MultiDiGraph):
@@ -115,20 +206,60 @@ class MultiDiGraph_EdgeKey(nx.MultiDiGraph):
     def __init__(self, incoming_graph_data=None, **attr):
         cls = super()
         cls.__init__(incoming_graph_data=incoming_graph_data, **attr)
+
         self._cls = cls
         self.edge_index = {}
+
         import warnings
-        msg = (
-            'MultiDiGraph_EdgeKey has been deprecated and will be removed in NetworkX 3.4.'
-            )
+
+        msg = "MultiDiGraph_EdgeKey has been deprecated and will be removed in NetworkX 3.4."
         warnings.warn(msg, DeprecationWarning)

+    def remove_node(self, n):
+        keys = set()
+        for keydict in self.pred[n].values():
+            keys.update(keydict)
+        for keydict in self.succ[n].values():
+            keys.update(keydict)
+
+        for key in keys:
+            del self.edge_index[key]
+
+        self._cls.remove_node(n)
+
+    def remove_nodes_from(self, nbunch):
+        for n in nbunch:
+            self.remove_node(n)
+
     def add_edge(self, u_for_edge, v_for_edge, key_for_edge, **attr):
         """
         Key is now required.

         """
-        pass
+        u, v, key = u_for_edge, v_for_edge, key_for_edge
+        if key in self.edge_index:
+            uu, vv, _ = self.edge_index[key]
+            if (u != uu) or (v != vv):
+                raise Exception(f"Key {key!r} is already in use.")
+
+        self._cls.add_edge(u, v, key, **attr)
+        self.edge_index[key] = (u, v, self.succ[u][v][key])
+
+    def add_edges_from(self, ebunch_to_add, **attr):
+        for u, v, k, d in ebunch_to_add:
+            self.add_edge(u, v, k, **d)
+
+    def remove_edge_with_key(self, key):
+        try:
+            u, v, _ = self.edge_index[key]
+        except KeyError as err:
+            raise KeyError(f"Invalid edge key {key!r}") from err
+        else:
+            del self.edge_index[key]
+            self._cls.remove_edge(u, v, key)
+
+    def remove_edges_from(self, ebunch):
+        raise NotImplementedError


 def get_path(G, u, v):
@@ -139,7 +270,20 @@ def get_path(G, u, v):
     MultiDiGraph_EdgeKey.

     """
-    pass
+    nodes = nx.shortest_path(G, u, v)
+
+    # We are guaranteed that there is only one edge connected every node
+    # in the shortest path.
+
+    def first_key(i, vv):
+        # Needed for 2.x/3.x compatibility
+        keys = G[nodes[i]][vv].keys()
+        # Normalize behavior
+        keys = list(keys)
+        return keys[0]
+
+    edges = [first_key(i, vv) for i, vv in enumerate(nodes[1:])]
+    return nodes, edges


 class Edmonds:
@@ -166,17 +310,23 @@ class Edmonds:

     def __init__(self, G, seed=None):
         self.G_original = G
+
+        # Need to fix this. We need the whole tree.
         self.store = True
+
+        # The final answer.
         self.edges = []
-        self.template = random_string(seed=seed) + '_{0}'
+
+        # Since we will be creating graphs with new nodes, we need to make
+        # sure that our node names do not conflict with the real node names.
+        self.template = random_string(seed=seed) + "_{0}"
+
         import warnings
-        msg = (
-            'Edmonds has been deprecated and will be removed in NetworkX 3.4. Please use the appropriate minimum or maximum branching or arborescence function directly.'
-            )
+
+        msg = "Edmonds has been deprecated and will be removed in NetworkX 3.4. Please use the appropriate minimum or maximum branching or arborescence function directly."
         warnings.warn(msg, DeprecationWarning)

-    def _init(self, attr, default, kind, style, preserve_attrs, seed, partition
-        ):
+    def _init(self, attr, default, kind, style, preserve_attrs, seed, partition):
         """
         So we need the code in _init and find_optimum to successfully run edmonds algorithm.
         Responsibilities of the _init function:
@@ -190,10 +340,93 @@ class Edmonds:
           other edge attributes if we set preserve_attrs = True.
         - Setup the buckets and union find data structures required for the algorithm.
         """
-        pass
-
-    def find_optimum(self, attr='weight', default=1, kind='max', style=
-        'branching', preserve_attrs=False, partition=None, seed=None):
+        if kind not in KINDS:
+            raise nx.NetworkXException("Unknown value for `kind`.")
+
+        # Store inputs.
+        self.attr = attr
+        self.default = default
+        self.kind = kind
+        self.style = style
+
+        # Determine how we are going to transform the weights.
+        if kind == "min":
+            self.trans = trans = _min_weight
+        else:
+            self.trans = trans = _max_weight
+
+        if attr is None:
+            # Generate a random attr the graph probably won't have.
+            attr = random_string(seed=seed)
+
+        # This is the actual attribute used by the algorithm.
+        self._attr = attr
+
+        # This attribute is used to store whether a particular edge is still
+        # a candidate. We generate a random attr to remove clashes with
+        # preserved edges
+        self.candidate_attr = "candidate_" + random_string(seed=seed)
+
+        # The object we manipulate at each step is a multidigraph.
+        self.G = G = MultiDiGraph_EdgeKey()
+        self.G.__networkx_cache__ = None  # Disable caching
+        for key, (u, v, data) in enumerate(self.G_original.edges(data=True)):
+            d = {attr: trans(data.get(attr, default))}
+
+            if data.get(partition) is not None:
+                d[partition] = data.get(partition)
+
+            if preserve_attrs:
+                for d_k, d_v in data.items():
+                    if d_k != attr:
+                        d[d_k] = d_v
+
+            G.add_edge(u, v, key, **d)
+
+        self.level = 0
+
+        # These are the "buckets" from the paper.
+        #
+        # As in the paper, G^i are modified versions of the original graph.
+        # D^i and E^i are nodes and edges of the maximal edges that are
+        # consistent with G^i. These are dashed edges in figures A-F of the
+        # paper. In this implementation, we store D^i and E^i together as a
+        # graph B^i. So we will have strictly more B^i than the paper does.
+        self.B = MultiDiGraph_EdgeKey()
+        self.B.edge_index = {}
+        self.graphs = []  # G^i
+        self.branchings = []  # B^i
+        self.uf = nx.utils.UnionFind()
+
+        # A list of lists of edge indexes. Each list is a circuit for graph G^i.
+        # Note the edge list will not, in general, be a circuit in graph G^0.
+        self.circuits = []
+        # Stores the index of the minimum edge in the circuit found in G^i
+        # and B^i. The ordering of the edges seems to preserve the weight
+        # ordering from G^0. So even if the circuit does not form a circuit
+        # in G^0, it is still true that the minimum edge of the circuit in
+        # G^i is still the minimum edge in circuit G^0 (despite their weights
+        # being different).
+        self.minedge_circuit = []
+
+    # TODO: separate each step into an inner function. Then the overall loop would become
+    # while True:
+    #     step_I1()
+    #     if cycle detected:
+    #         step_I2()
+    #     elif every node of G is in D and E is a branching
+    #         break
+
+    def find_optimum(
+        self,
+        attr="weight",
+        default=1,
+        kind="max",
+        style="branching",
+        preserve_attrs=False,
+        partition=None,
+        seed=None,
+    ):
         """
         Returns a branching from G.

@@ -228,13 +461,741 @@ class Edmonds:
             The branching.

         """
-        pass
+        self._init(attr, default, kind, style, preserve_attrs, seed, partition)
+        uf = self.uf
+
+        # This enormous while loop could use some refactoring...
+
+        G, B = self.G, self.B
+        D = set()
+        nodes = iter(list(G.nodes()))
+        attr = self._attr
+
+        def desired_edge(v):
+            """
+            Find the edge directed toward v with maximal weight.
+
+            If an edge partition exists in this graph, return the included edge
+            if it exists and no not return any excluded edges. There can only
+            be one included edge for each vertex otherwise the edge partition is
+            empty.
+            """
+            edge = None
+            weight = -INF
+            for u, _, key, data in G.in_edges(v, data=True, keys=True):
+                # Skip excluded edges
+                if data.get(partition) == nx.EdgePartition.EXCLUDED:
+                    continue
+                new_weight = data[attr]
+                # Return the included edge
+                if data.get(partition) == nx.EdgePartition.INCLUDED:
+                    weight = new_weight
+                    edge = (u, v, key, new_weight, data)
+                    return edge, weight
+                # Find the best open edge
+                if new_weight > weight:
+                    weight = new_weight
+                    edge = (u, v, key, new_weight, data)
+
+            return edge, weight
+
+        while True:
+            # (I1): Choose a node v in G^i not in D^i.
+            try:
+                v = next(nodes)
+            except StopIteration:
+                # If there are no more new nodes to consider, then we *should*
+                # meet the break condition (b) from the paper:
+                #   (b) every node of G^i is in D^i and E^i is a branching
+                # Construction guarantees that it's a branching.
+                assert len(G) == len(B)
+                if len(B):
+                    assert is_branching(B)
+
+                if self.store:
+                    self.graphs.append(G.copy())
+                    self.branchings.append(B.copy())
+
+                    # Add these to keep the lengths equal. Element i is the
+                    # circuit at level i that was merged to form branching i+1.
+                    # There is no circuit for the last level.
+                    self.circuits.append([])
+                    self.minedge_circuit.append(None)
+                break
+            else:
+                if v in D:
+                    # print("v in D", v)
+                    continue
+
+            # Put v into bucket D^i.
+            # print(f"Adding node {v}")
+            D.add(v)
+            B.add_node(v)
+            # End (I1)
+
+            # Start cycle detection
+            edge, weight = desired_edge(v)
+            # print(f"Max edge is {edge!r}")
+            if edge is None:
+                # If there is no edge, continue with a new node at (I1).
+                continue
+            else:
+                # Determine if adding the edge to E^i would mean its no longer
+                # a branching. Presently, v has indegree 0 in B---it is a root.
+                u = edge[0]
+
+                if uf[u] == uf[v]:
+                    # Then adding the edge will create a circuit. Then B
+                    # contains a unique path P from v to u. So condition (a)
+                    # from the paper does hold. We need to store the circuit
+                    # for future reference.
+                    Q_nodes, Q_edges = get_path(B, v, u)
+                    Q_edges.append(edge[2])  # Edge key
+                else:
+                    # Then B with the edge is still a branching and condition
+                    # (a) from the paper does not hold.
+                    Q_nodes, Q_edges = None, None
+                # End cycle detection
+
+                # THIS WILL PROBABLY BE REMOVED? MAYBE A NEW ARG FOR THIS FEATURE?
+                # Conditions for adding the edge.
+                # If weight < 0, then it cannot help in finding a maximum branching.
+                # This is the root of the problem with minimum branching.
+                if self.style == "branching" and weight <= 0:
+                    acceptable = False
+                else:
+                    acceptable = True
+
+                # print(f"Edge is acceptable: {acceptable}")
+                if acceptable:
+                    dd = {attr: weight}
+                    if edge[4].get(partition) is not None:
+                        dd[partition] = edge[4].get(partition)
+                    B.add_edge(u, v, edge[2], **dd)
+                    G[u][v][edge[2]][self.candidate_attr] = True
+                    uf.union(u, v)
+                    if Q_edges is not None:
+                        # print("Edge introduced a simple cycle:")
+                        # print(Q_nodes, Q_edges)
+
+                        # Move to method
+                        # Previous meaning of u and v is no longer important.
+
+                        # Apply (I2).
+                        # Get the edge in the cycle with the minimum weight.
+                        # Also, save the incoming weights for each node.
+                        minweight = INF
+                        minedge = None
+                        Q_incoming_weight = {}
+                        for edge_key in Q_edges:
+                            u, v, data = B.edge_index[edge_key]
+                            # We cannot remove an included edges, even if it is
+                            # the minimum edge in the circuit
+                            w = data[attr]
+                            Q_incoming_weight[v] = w
+                            if data.get(partition) == nx.EdgePartition.INCLUDED:
+                                continue
+                            if w < minweight:
+                                minweight = w
+                                minedge = edge_key
+
+                        self.circuits.append(Q_edges)
+                        self.minedge_circuit.append(minedge)
+
+                        if self.store:
+                            self.graphs.append(G.copy())
+                        # Always need the branching with circuits.
+                        self.branchings.append(B.copy())
+
+                        # Now we mutate it.
+                        new_node = self.template.format(self.level)
+
+                        # print(minweight, minedge, Q_incoming_weight)
+
+                        G.add_node(new_node)
+                        new_edges = []
+                        for u, v, key, data in G.edges(data=True, keys=True):
+                            if u in Q_incoming_weight:
+                                if v in Q_incoming_weight:
+                                    # Circuit edge, do nothing for now.
+                                    # Eventually delete it.
+                                    continue
+                                else:
+                                    # Outgoing edge. Make it from new node
+                                    dd = data.copy()
+                                    new_edges.append((new_node, v, key, dd))
+                            else:
+                                if v in Q_incoming_weight:
+                                    # Incoming edge. Change its weight
+                                    w = data[attr]
+                                    w += minweight - Q_incoming_weight[v]
+                                    dd = data.copy()
+                                    dd[attr] = w
+                                    new_edges.append((u, new_node, key, dd))
+                                else:
+                                    # Outside edge. No modification necessary.
+                                    continue
+
+                        G.remove_nodes_from(Q_nodes)
+                        B.remove_nodes_from(Q_nodes)
+                        D.difference_update(set(Q_nodes))
+
+                        for u, v, key, data in new_edges:
+                            G.add_edge(u, v, key, **data)
+                            if self.candidate_attr in data:
+                                del data[self.candidate_attr]
+                                B.add_edge(u, v, key, **data)
+                                uf.union(u, v)
+
+                        nodes = iter(list(G.nodes()))
+                        self.level += 1
+                    # END STEP (I2)?
+
+        # (I3) Branch construction.
+        # print(self.level)
+        H = self.G_original.__class__()
+
+        def is_root(G, u, edgekeys):
+            """
+            Returns True if `u` is a root node in G.
+
+            Node `u` will be a root node if its in-degree, restricted to the
+            specified edges, is equal to 0.
+
+            """
+            if u not in G:
+                # print(G.nodes(), u)
+                raise Exception(f"{u!r} not in G")
+            for v in G.pred[u]:
+                for edgekey in G.pred[u][v]:
+                    if edgekey in edgekeys:
+                        return False, edgekey
+            else:
+                return True, None
+
+        # Start with the branching edges in the last level.
+        edges = set(self.branchings[self.level].edge_index)
+        while self.level > 0:
+            self.level -= 1
+
+            # The current level is i, and we start counting from 0.
+
+            # We need the node at level i+1 that results from merging a circuit
+            # at level i. randomname_0 is the first merged node and this
+            # happens at level 1. That is, randomname_0 is a node at level 1
+            # that results from merging a circuit at level 0.
+            merged_node = self.template.format(self.level)
+
+            # The circuit at level i that was merged as a node the graph
+            # at level i+1.
+            circuit = self.circuits[self.level]
+            # print
+            # print(merged_node, self.level, circuit)
+            # print("before", edges)
+            # Note, we ask if it is a root in the full graph, not the branching.
+            # The branching alone doesn't have all the edges.
+            isroot, edgekey = is_root(self.graphs[self.level + 1], merged_node, edges)
+            edges.update(circuit)
+            if isroot:
+                minedge = self.minedge_circuit[self.level]
+                if minedge is None:
+                    raise Exception
+
+                # Remove the edge in the cycle with minimum weight.
+                edges.remove(minedge)
+            else:
+                # We have identified an edge at next higher level that
+                # transitions into the merged node at the level. That edge
+                # transitions to some corresponding node at the current level.
+                # We want to remove an edge from the cycle that transitions
+                # into the corresponding node.
+                # print("edgekey is: ", edgekey)
+                # print("circuit is: ", circuit)
+                # The branching at level i
+                G = self.graphs[self.level]
+                # print(G.edge_index)
+                target = G.edge_index[edgekey][1]
+                for edgekey in circuit:
+                    u, v, data = G.edge_index[edgekey]
+                    if v == target:
+                        break
+                else:
+                    raise Exception("Couldn't find edge incoming to merged node.")
+
+                edges.remove(edgekey)
+
+        self.edges = edges
+
+        H.add_nodes_from(self.G_original)
+        for edgekey in edges:
+            u, v, d = self.graphs[0].edge_index[edgekey]
+            dd = {self.attr: self.trans(d[self.attr])}
+
+            # Optionally, preserve the other edge attributes of the original
+            # graph
+            if preserve_attrs:
+                for key, value in d.items():
+                    if key not in [self.attr, self.candidate_attr]:
+                        dd[key] = value
+
+            # TODO: make this preserve the key.
+            H.add_edge(u, v, **dd)
+
+        return H
+
+
+@nx._dispatchable(preserve_edge_attrs=True, returns_graph=True)
+def maximum_branching(
+    G,
+    attr="weight",
+    default=1,
+    preserve_attrs=False,
+    partition=None,
+):
+    #######################################
+    ### Data Structure Helper Functions ###
+    #######################################
+
+    def edmonds_add_edge(G, edge_index, u, v, key, **d):
+        """
+        Adds an edge to `G` while also updating the edge index.
+
+        This algorithm requires the use of an external dictionary to track
+        the edge keys since it is possible that the source or destination
+        node of an edge will be changed and the default key-handling
+        capabilities of the MultiDiGraph class do not account for this.
+
+        Parameters
+        ----------
+        G : MultiDiGraph
+            The graph to insert an edge into.
+        edge_index : dict
+            A mapping from integers to the edges of the graph.
+        u : node
+            The source node of the new edge.
+        v : node
+            The destination node of the new edge.
+        key : int
+            The key to use from `edge_index`.
+        d : keyword arguments, optional
+            Other attributes to store on the new edge.
+        """
+
+        if key in edge_index:
+            uu, vv, _ = edge_index[key]
+            if (u != uu) or (v != vv):
+                raise Exception(f"Key {key!r} is already in use.")
+
+        G.add_edge(u, v, key, **d)
+        edge_index[key] = (u, v, G.succ[u][v][key])
+
+    def edmonds_remove_node(G, edge_index, n):
+        """
+        Remove a node from the graph, updating the edge index to match.
+
+        Parameters
+        ----------
+        G : MultiDiGraph
+            The graph to remove an edge from.
+        edge_index : dict
+            A mapping from integers to the edges of the graph.
+        n : node
+            The node to remove from `G`.
+        """
+        keys = set()
+        for keydict in G.pred[n].values():
+            keys.update(keydict)
+        for keydict in G.succ[n].values():
+            keys.update(keydict)
+
+        for key in keys:
+            del edge_index[key]
+
+        G.remove_node(n)
+
+    #######################
+    ### Algorithm Setup ###
+    #######################
+
+    # Pick an attribute name that the original graph is unlikly to have
+    candidate_attr = "edmonds' secret candidate attribute"
+    new_node_base_name = "edmonds new node base name "
+
+    G_original = G
+    G = nx.MultiDiGraph()
+    G.__networkx_cache__ = None  # Disable caching
+
+    # A dict to reliably track mutations to the edges using the key of the edge.
+    G_edge_index = {}
+    # Each edge is given an arbitrary numerical key
+    for key, (u, v, data) in enumerate(G_original.edges(data=True)):
+        d = {attr: data.get(attr, default)}
+
+        if data.get(partition) is not None:
+            d[partition] = data.get(partition)
+
+        if preserve_attrs:
+            for d_k, d_v in data.items():
+                if d_k != attr:
+                    d[d_k] = d_v
+
+        edmonds_add_edge(G, G_edge_index, u, v, key, **d)
+
+    level = 0  # Stores the number of contracted nodes
+
+    # These are the buckets from the paper.
+    #
+    # In the paper, G^i are modified versions of the original graph.
+    # D^i and E^i are the nodes and edges of the maximal edges that are
+    # consistent with G^i. In this implementation, D^i and E^i are stored
+    # together as the graph B^i. We will have strictly more B^i then the
+    # paper will have.
+    #
+    # Note that the data in graphs and branchings are tuples with the graph as
+    # the first element and the edge index as the second.
+    B = nx.MultiDiGraph()
+    B_edge_index = {}
+    graphs = []  # G^i list
+    branchings = []  # B^i list
+    selected_nodes = set()  # D^i bucket
+    uf = nx.utils.UnionFind()
+
+    # A list of lists of edge indices. Each list is a circuit for graph G^i.
+    # Note the edge list is not required to be a circuit in G^0.
+    circuits = []
+
+    # Stores the index of the minimum edge in the circuit found in G^i and B^i.
+    # The ordering of the edges seems to preserver the weight ordering from
+    # G^0. So even if the circuit does not form a circuit in G^0, it is still
+    # true that the minimum edges in circuit G^0 (despite their weights being
+    # different)
+    minedge_circuit = []
+
+    ###########################
+    ### Algorithm Structure ###
+    ###########################
+
+    # Each step listed in the algorithm is an inner function. Thus, the overall
+    # loop structure is:
+    #
+    # while True:
+    #     step_I1()
+    #     if cycle detected:
+    #         step_I2()
+    #     elif every node of G is in D and E is a branching:
+    #         break
+
+    ##################################
+    ### Algorithm Helper Functions ###
+    ##################################
+
+    def edmonds_find_desired_edge(v):
+        """
+        Find the edge directed towards v with maximal weight.
+
+        If an edge partition exists in this graph, return the included
+        edge if it exists and never return any excluded edge.
+
+        Note: There can only be one included edge for each vertex otherwise
+        the edge partition is empty.
+
+        Parameters
+        ----------
+        v : node
+            The node to search for the maximal weight incoming edge.
+        """
+        edge = None
+        max_weight = -INF
+        for u, _, key, data in G.in_edges(v, data=True, keys=True):
+            # Skip excluded edges
+            if data.get(partition) == nx.EdgePartition.EXCLUDED:
+                continue
+
+            new_weight = data[attr]
+
+            # Return the included edge
+            if data.get(partition) == nx.EdgePartition.INCLUDED:
+                max_weight = new_weight
+                edge = (u, v, key, new_weight, data)
+                break
+
+            # Find the best open edge
+            if new_weight > max_weight:
+                max_weight = new_weight
+                edge = (u, v, key, new_weight, data)
+
+        return edge, max_weight
+
+    def edmonds_step_I2(v, desired_edge, level):
+        """
+        Perform step I2 from Edmonds' paper
+
+        First, check if the last step I1 created a cycle. If it did not, do nothing.
+        If it did, store the cycle for later reference and contract it.
+
+        Parameters
+        ----------
+        v : node
+            The current node to consider
+        desired_edge : edge
+            The minimum desired edge to remove from the cycle.
+        level : int
+            The current level, i.e. the number of cycles that have already been removed.
+        """
+        u = desired_edge[0]
+
+        Q_nodes = nx.shortest_path(B, v, u)
+        Q_edges = [
+            list(B[Q_nodes[i]][vv].keys())[0] for i, vv in enumerate(Q_nodes[1:])
+        ]
+        Q_edges.append(desired_edge[2])  # Add the new edge key to complete the circuit
+
+        # Get the edge in the circuit with the minimum weight.
+        # Also, save the incoming weights for each node.
+        minweight = INF
+        minedge = None
+        Q_incoming_weight = {}
+        for edge_key in Q_edges:
+            u, v, data = B_edge_index[edge_key]
+            w = data[attr]
+            # We cannot remove an included edge, even if it is the
+            # minimum edge in the circuit
+            Q_incoming_weight[v] = w
+            if data.get(partition) == nx.EdgePartition.INCLUDED:
+                continue
+            if w < minweight:
+                minweight = w
+                minedge = edge_key
+
+        circuits.append(Q_edges)
+        minedge_circuit.append(minedge)
+        graphs.append((G.copy(), G_edge_index.copy()))
+        branchings.append((B.copy(), B_edge_index.copy()))
+
+        # Mutate the graph to contract the circuit
+        new_node = new_node_base_name + str(level)
+        G.add_node(new_node)
+        new_edges = []
+        for u, v, key, data in G.edges(data=True, keys=True):
+            if u in Q_incoming_weight:
+                if v in Q_incoming_weight:
+                    # Circuit edge. For the moment do nothing,
+                    # eventually it will be removed.
+                    continue
+                else:
+                    # Outgoing edge from a node in the circuit.
+                    # Make it come from the new node instead
+                    dd = data.copy()
+                    new_edges.append((new_node, v, key, dd))
+            else:
+                if v in Q_incoming_weight:
+                    # Incoming edge to the circuit.
+                    # Update it's weight
+                    w = data[attr]
+                    w += minweight - Q_incoming_weight[v]
+                    dd = data.copy()
+                    dd[attr] = w
+                    new_edges.append((u, new_node, key, dd))
+                else:
+                    # Outside edge. No modification needed
+                    continue
+
+        for node in Q_nodes:
+            edmonds_remove_node(G, G_edge_index, node)
+            edmonds_remove_node(B, B_edge_index, node)
+
+        selected_nodes.difference_update(set(Q_nodes))
+
+        for u, v, key, data in new_edges:
+            edmonds_add_edge(G, G_edge_index, u, v, key, **data)
+            if candidate_attr in data:
+                del data[candidate_attr]
+                edmonds_add_edge(B, B_edge_index, u, v, key, **data)
+                uf.union(u, v)
+
+    def is_root(G, u, edgekeys):
+        """
+        Returns True if `u` is a root node in G.
+
+        Node `u` is a root node if its in-degree over the specified edges is zero.
+
+        Parameters
+        ----------
+        G : Graph
+            The current graph.
+        u : node
+            The node in `G` to check if it is a root.
+        edgekeys : iterable of edges
+            The edges for which to check if `u` is a root of.
+        """
+        if u not in G:
+            raise Exception(f"{u!r} not in G")
+
+        for v in G.pred[u]:
+            for edgekey in G.pred[u][v]:
+                if edgekey in edgekeys:
+                    return False, edgekey
+        else:
+            return True, None
+
+    nodes = iter(list(G.nodes))
+    while True:
+        try:
+            v = next(nodes)
+        except StopIteration:
+            # If there are no more new nodes to consider, then we should
+            # meet stopping condition (b) from the paper:
+            #   (b) every node of G^i is in D^i and E^i is a branching
+            assert len(G) == len(B)
+            if len(B):
+                assert is_branching(B)
+
+            graphs.append((G.copy(), G_edge_index.copy()))
+            branchings.append((B.copy(), B_edge_index.copy()))
+            circuits.append([])
+            minedge_circuit.append(None)
+
+            break
+        else:
+            #####################
+            ### BEGIN STEP I1 ###
+            #####################
+
+            # This is a very simple step, so I don't think it needs a method of it's own
+            if v in selected_nodes:
+                continue
+
+        selected_nodes.add(v)
+        B.add_node(v)
+        desired_edge, desired_edge_weight = edmonds_find_desired_edge(v)
+
+        # There might be no desired edge if all edges are excluded or
+        # v is the last node to be added to B, the ultimate root of the branching
+        if desired_edge is not None and desired_edge_weight > 0:
+            u = desired_edge[0]
+            # Flag adding the edge will create a circuit before merging the two
+            # connected components of u and v in B
+            circuit = uf[u] == uf[v]
+            dd = {attr: desired_edge_weight}
+            if desired_edge[4].get(partition) is not None:
+                dd[partition] = desired_edge[4].get(partition)
+
+            edmonds_add_edge(B, B_edge_index, u, v, desired_edge[2], **dd)
+            G[u][v][desired_edge[2]][candidate_attr] = True
+            uf.union(u, v)
+
+            ###################
+            ### END STEP I1 ###
+            ###################
+
+            #####################
+            ### BEGIN STEP I2 ###
+            #####################
+
+            if circuit:
+                edmonds_step_I2(v, desired_edge, level)
+                nodes = iter(list(G.nodes()))
+                level += 1
+
+            ###################
+            ### END STEP I2 ###
+            ###################
+
+    #####################
+    ### BEGIN STEP I3 ###
+    #####################
+
+    # Create a new graph of the same class as the input graph
+    H = G_original.__class__()
+
+    # Start with the branching edges in the last level.
+    edges = set(branchings[level][1])
+    while level > 0:
+        level -= 1
+
+        # The current level is i, and we start counting from 0.
+        #
+        # We need the node at level i+1 that results from merging a circuit
+        # at level i. basename_0 is the first merged node and this happens
+        # at level 1. That is basename_0 is a node at level 1 that results
+        # from merging a circuit at level 0.
+
+        merged_node = new_node_base_name + str(level)
+        circuit = circuits[level]
+        isroot, edgekey = is_root(graphs[level + 1][0], merged_node, edges)
+        edges.update(circuit)
+
+        if isroot:
+            minedge = minedge_circuit[level]
+            if minedge is None:
+                raise Exception
+
+            # Remove the edge in the cycle with minimum weight
+            edges.remove(minedge)
+        else:
+            # We have identified an edge at the next higher level that
+            # transitions into the merged node at this level. That edge
+            # transitions to some corresponding node at the current level.
+            #
+            # We want to remove an edge from the cycle that transitions
+            # into the corresponding node, otherwise the result would not
+            # be a branching.
+
+            G, G_edge_index = graphs[level]
+            target = G_edge_index[edgekey][1]
+            for edgekey in circuit:
+                u, v, data = G_edge_index[edgekey]
+                if v == target:
+                    break
+            else:
+                raise Exception("Couldn't find edge incoming to merged node.")

+            edges.remove(edgekey)

-@nx._dispatchable(preserve_edge_attrs=True, mutates_input=True,
-    returns_graph=True)
-def minimal_branching(G, /, *, attr='weight', default=1, preserve_attrs=
-    False, partition=None):
+    H.add_nodes_from(G_original)
+    for edgekey in edges:
+        u, v, d = graphs[0][1][edgekey]
+        dd = {attr: d[attr]}
+
+        if preserve_attrs:
+            for key, value in d.items():
+                if key not in [attr, candidate_attr]:
+                    dd[key] = value
+
+        H.add_edge(u, v, **dd)
+
+    ###################
+    ### END STEP I3 ###
+    ###################
+
+    return H
+
+
+@nx._dispatchable(preserve_edge_attrs=True, mutates_input=True, returns_graph=True)
+def minimum_branching(
+    G, attr="weight", default=1, preserve_attrs=False, partition=None
+):
+    for _, _, d in G.edges(data=True):
+        d[attr] = -d.get(attr, default)
+    nx._clear_cache(G)
+
+    B = maximum_branching(G, attr, default, preserve_attrs, partition)
+
+    for _, _, d in G.edges(data=True):
+        d[attr] = -d.get(attr, default)
+    nx._clear_cache(G)
+
+    for _, _, d in B.edges(data=True):
+        d[attr] = -d.get(attr, default)
+    nx._clear_cache(B)
+
+    return B
+
+
+@nx._dispatchable(preserve_edge_attrs=True, mutates_input=True, returns_graph=True)
+def minimal_branching(
+    G, /, *, attr="weight", default=1, preserve_attrs=False, partition=None
+):
     """
     Returns a minimal branching from `G`.

@@ -265,7 +1226,94 @@ def minimal_branching(G, /, *, attr='weight', default=1, preserve_attrs=
     B : (multi)digraph-like
         A minimal branching.
     """
-    pass
+    max_weight = -INF
+    min_weight = INF
+    for _, _, w in G.edges(data=attr, default=default):
+        if w > max_weight:
+            max_weight = w
+        if w < min_weight:
+            min_weight = w
+
+    for _, _, d in G.edges(data=True):
+        # Transform the weights so that the minimum weight is larger than
+        # the difference between the max and min weights. This is important
+        # in order to prevent the edge weights from becoming negative during
+        # computation
+        d[attr] = max_weight + 1 + (max_weight - min_weight) - d.get(attr, default)
+    nx._clear_cache(G)
+
+    B = maximum_branching(G, attr, default, preserve_attrs, partition)
+
+    # Reverse the weight transformations
+    for _, _, d in G.edges(data=True):
+        d[attr] = max_weight + 1 + (max_weight - min_weight) - d.get(attr, default)
+    nx._clear_cache(G)
+
+    for _, _, d in B.edges(data=True):
+        d[attr] = max_weight + 1 + (max_weight - min_weight) - d.get(attr, default)
+    nx._clear_cache(B)
+
+    return B
+
+
+@nx._dispatchable(preserve_edge_attrs=True, mutates_input=True, returns_graph=True)
+def maximum_spanning_arborescence(
+    G, attr="weight", default=1, preserve_attrs=False, partition=None
+):
+    # In order to use the same algorithm is the maximum branching, we need to adjust
+    # the weights of the graph. The branching algorithm can choose to not include an
+    # edge if it doesn't help find a branching, mainly triggered by edges with negative
+    # weights.
+    #
+    # To prevent this from happening while trying to find a spanning arborescence, we
+    # just have to tweak the edge weights so that they are all positive and cannot
+    # become negative during the branching algorithm, find the maximum branching and
+    # then return them to their original values.
+
+    min_weight = INF
+    max_weight = -INF
+    for _, _, w in G.edges(data=attr, default=default):
+        if w < min_weight:
+            min_weight = w
+        if w > max_weight:
+            max_weight = w
+
+    for _, _, d in G.edges(data=True):
+        d[attr] = d.get(attr, default) - min_weight + 1 - (min_weight - max_weight)
+    nx._clear_cache(G)
+
+    B = maximum_branching(G, attr, default, preserve_attrs, partition)
+
+    for _, _, d in G.edges(data=True):
+        d[attr] = d.get(attr, default) + min_weight - 1 + (min_weight - max_weight)
+    nx._clear_cache(G)
+
+    for _, _, d in B.edges(data=True):
+        d[attr] = d.get(attr, default) + min_weight - 1 + (min_weight - max_weight)
+    nx._clear_cache(B)
+
+    if not is_arborescence(B):
+        raise nx.exception.NetworkXException("No maximum spanning arborescence in G.")
+
+    return B
+
+
+@nx._dispatchable(preserve_edge_attrs=True, mutates_input=True, returns_graph=True)
+def minimum_spanning_arborescence(
+    G, attr="weight", default=1, preserve_attrs=False, partition=None
+):
+    B = minimal_branching(
+        G,
+        attr=attr,
+        default=default,
+        preserve_attrs=preserve_attrs,
+        partition=partition,
+    )
+
+    if not is_arborescence(B):
+        raise nx.exception.NetworkXException("No minimum spanning arborescence in G.")
+
+    return B


 docstring_branching = """
@@ -293,24 +1341,38 @@ Returns
 B : (multi)digraph-like
     A {kind} {style}.
 """
-docstring_arborescence = docstring_branching + """
+
+docstring_arborescence = (
+    docstring_branching
+    + """
 Raises
 ------
 NetworkXException
     If the graph does not contain a {kind} {style}.

 """
-maximum_branching.__doc__ = docstring_branching.format(kind='maximum',
-    style='branching')
-minimum_branching.__doc__ = docstring_branching.format(kind='minimum', style='branching') + """
+)
+
+maximum_branching.__doc__ = docstring_branching.format(
+    kind="maximum", style="branching"
+)
+
+minimum_branching.__doc__ = (
+    docstring_branching.format(kind="minimum", style="branching")
+    + """
 See Also
 --------
     minimal_branching
 """
-maximum_spanning_arborescence.__doc__ = docstring_arborescence.format(kind=
-    'maximum', style='spanning arborescence')
-minimum_spanning_arborescence.__doc__ = docstring_arborescence.format(kind=
-    'minimum', style='spanning arborescence')
+)
+
+maximum_spanning_arborescence.__doc__ = docstring_arborescence.format(
+    kind="maximum", style="spanning arborescence"
+)
+
+minimum_spanning_arborescence.__doc__ = docstring_arborescence.format(
+    kind="minimum", style="spanning arborescence"
+)


 class ArborescenceIterator:
@@ -334,7 +1396,6 @@ class ArborescenceIterator:
            https://www.scielo.br/j/pope/a/XHswBwRwJyrfL88dmMwYNWp/?lang=en
     """

-
     @dataclass(order=True)
     class Partition:
         """
@@ -342,14 +1403,16 @@ class ArborescenceIterator:
         data and the weight of the minimum spanning arborescence of the
         partition dict.
         """
+
         mst_weight: float
         partition_dict: dict = field(compare=False)

         def __copy__(self):
-            return ArborescenceIterator.Partition(self.mst_weight, self.
-                partition_dict.copy())
+            return ArborescenceIterator.Partition(
+                self.mst_weight, self.partition_dict.copy()
+            )

-    def __init__(self, G, weight='weight', minimum=True, init_partition=None):
+    def __init__(self, G, weight="weight", minimum=True, init_partition=None):
         """
         Initialize the iterator

@@ -375,18 +1438,20 @@ class ArborescenceIterator:
         self.G = G.copy()
         self.weight = weight
         self.minimum = minimum
-        self.method = (minimum_spanning_arborescence if minimum else
-            maximum_spanning_arborescence)
+        self.method = (
+            minimum_spanning_arborescence if minimum else maximum_spanning_arborescence
+        )
+        # Randomly create a key for an edge attribute to hold the partition data
         self.partition_key = (
-            'ArborescenceIterators super secret partition attribute name')
+            "ArborescenceIterators super secret partition attribute name"
+        )
         if init_partition is not None:
             partition_dict = {}
             for e in init_partition[0]:
                 partition_dict[e] = nx.EdgePartition.INCLUDED
             for e in init_partition[1]:
                 partition_dict[e] = nx.EdgePartition.EXCLUDED
-            self.init_partition = ArborescenceIterator.Partition(0,
-                partition_dict)
+            self.init_partition = ArborescenceIterator.Partition(0, partition_dict)
         else:
             self.init_partition = None

@@ -399,13 +1464,27 @@ class ArborescenceIterator:
         """
         self.partition_queue = PriorityQueue()
         self._clear_partition(self.G)
+
+        # Write the initial partition if it exists.
         if self.init_partition is not None:
             self._write_partition(self.init_partition)
-        mst_weight = self.method(self.G, self.weight, partition=self.
-            partition_key, preserve_attrs=True).size(weight=self.weight)
-        self.partition_queue.put(self.Partition(mst_weight if self.minimum else
-            -mst_weight, {} if self.init_partition is None else self.
-            init_partition.partition_dict))
+
+        mst_weight = self.method(
+            self.G,
+            self.weight,
+            partition=self.partition_key,
+            preserve_attrs=True,
+        ).size(weight=self.weight)
+
+        self.partition_queue.put(
+            self.Partition(
+                mst_weight if self.minimum else -mst_weight,
+                {}
+                if self.init_partition is None
+                else self.init_partition.partition_dict,
+            )
+        )
+
         return self

     def __next__(self):
@@ -419,11 +1498,17 @@ class ArborescenceIterator:
         if self.partition_queue.empty():
             del self.G, self.partition_queue
             raise StopIteration
+
         partition = self.partition_queue.get()
         self._write_partition(partition)
-        next_arborescence = self.method(self.G, self.weight, partition=self
-            .partition_key, preserve_attrs=True)
+        next_arborescence = self.method(
+            self.G,
+            self.weight,
+            partition=self.partition_key,
+            preserve_attrs=True,
+        )
         self._partition(partition, next_arborescence)
+
         self._clear_partition(next_arborescence)
         return next_arborescence

@@ -440,7 +1525,32 @@ class ArborescenceIterator:
         partition_arborescence : nx.Graph
             The minimum spanning arborescence of the input partition.
         """
-        pass
+        # create two new partitions with the data from the input partition dict
+        p1 = self.Partition(0, partition.partition_dict.copy())
+        p2 = self.Partition(0, partition.partition_dict.copy())
+        for e in partition_arborescence.edges:
+            # determine if the edge was open or included
+            if e not in partition.partition_dict:
+                # This is an open edge
+                p1.partition_dict[e] = nx.EdgePartition.EXCLUDED
+                p2.partition_dict[e] = nx.EdgePartition.INCLUDED
+
+                self._write_partition(p1)
+                try:
+                    p1_mst = self.method(
+                        self.G,
+                        self.weight,
+                        partition=self.partition_key,
+                        preserve_attrs=True,
+                    )
+
+                    p1_mst_weight = p1_mst.size(weight=self.weight)
+                    p1.mst_weight = p1_mst_weight if self.minimum else -p1_mst_weight
+                    self.partition_queue.put(p1.__copy__())
+                except nx.NetworkXException:
+                    pass
+
+                p1.partition_dict = p2.partition_dict.copy()

     def _write_partition(self, partition):
         """
@@ -455,10 +1565,33 @@ class ArborescenceIterator:
             A Partition dataclass describing a partition on the edges of the
             graph.
         """
-        pass
+        for u, v, d in self.G.edges(data=True):
+            if (u, v) in partition.partition_dict:
+                d[self.partition_key] = partition.partition_dict[(u, v)]
+            else:
+                d[self.partition_key] = nx.EdgePartition.OPEN
+        nx._clear_cache(self.G)
+
+        for n in self.G:
+            included_count = 0
+            excluded_count = 0
+            for u, v, d in self.G.in_edges(nbunch=n, data=True):
+                if d.get(self.partition_key) == nx.EdgePartition.INCLUDED:
+                    included_count += 1
+                elif d.get(self.partition_key) == nx.EdgePartition.EXCLUDED:
+                    excluded_count += 1
+            # Check that if there is an included edges, all other incoming ones
+            # are excluded. If not fix it!
+            if included_count == 1 and excluded_count != self.G.in_degree(n) - 1:
+                for u, v, d in self.G.in_edges(nbunch=n, data=True):
+                    if d.get(self.partition_key) != nx.EdgePartition.INCLUDED:
+                        d[self.partition_key] = nx.EdgePartition.EXCLUDED

     def _clear_partition(self, G):
         """
         Removes partition data from the graph
         """
-        pass
+        for u, v, d in G.edges(data=True):
+            if self.partition_key in d:
+                del d[self.partition_key]
+        nx._clear_cache(self.G)
diff --git a/networkx/algorithms/tree/coding.py b/networkx/algorithms/tree/coding.py
index 5d4a402e2..8cec023c2 100644
--- a/networkx/algorithms/tree/coding.py
+++ b/networkx/algorithms/tree/coding.py
@@ -10,10 +10,17 @@ sequences to labeled trees.
 """
 from collections import Counter
 from itertools import chain
+
 import networkx as nx
 from networkx.utils import not_implemented_for
-__all__ = ['from_nested_tuple', 'from_prufer_sequence', 'NotATree',
-    'to_nested_tuple', 'to_prufer_sequence']
+
+__all__ = [
+    "from_nested_tuple",
+    "from_prufer_sequence",
+    "NotATree",
+    "to_nested_tuple",
+    "to_prufer_sequence",
+]


 class NotATree(nx.NetworkXException):
@@ -24,8 +31,8 @@ class NotATree(nx.NetworkXException):
     """


-@not_implemented_for('directed')
-@nx._dispatchable(graphs='T')
+@not_implemented_for("directed")
+@nx._dispatchable(graphs="T")
 def to_nested_tuple(T, root, canonical_form=False):
     """Returns a nested tuple representation of the given tree.

@@ -91,7 +98,34 @@ def to_nested_tuple(T, root, canonical_form=False):
         ((((),),),)

     """
-    pass
+
+    def _make_tuple(T, root, _parent):
+        """Recursively compute the nested tuple representation of the
+        given rooted tree.
+
+        ``_parent`` is the parent node of ``root`` in the supertree in
+        which ``T`` is a subtree, or ``None`` if ``root`` is the root of
+        the supertree. This argument is used to determine which
+        neighbors of ``root`` are children and which is the parent.
+
+        """
+        # Get the neighbors of `root` that are not the parent node. We
+        # are guaranteed that `root` is always in `T` by construction.
+        children = set(T[root]) - {_parent}
+        if len(children) == 0:
+            return ()
+        nested = (_make_tuple(T, v, root) for v in children)
+        if canonical_form:
+            nested = sorted(nested)
+        return tuple(nested)
+
+    # Do some sanity checks on the input.
+    if not nx.is_tree(T):
+        raise nx.NotATree("provided graph is not a tree")
+    if root not in T:
+        raise nx.NodeNotFound(f"Graph {T} contains no node {root}")
+
+    return _make_tuple(T, root, None)


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -144,13 +178,44 @@ def from_nested_tuple(sequence, sensible_relabeling=False):
         True

     """
-    pass
-

-@not_implemented_for('directed')
-@nx._dispatchable(graphs='T')
+    def _make_tree(sequence):
+        """Recursively creates a tree from the given sequence of nested
+        tuples.
+
+        This function employs the :func:`~networkx.tree.join` function
+        to recursively join subtrees into a larger tree.
+
+        """
+        # The empty sequence represents the empty tree, which is the
+        # (unique) graph with a single node. We mark the single node
+        # with an attribute that indicates that it is the root of the
+        # graph.
+        if len(sequence) == 0:
+            return nx.empty_graph(1)
+        # For a nonempty sequence, get the subtrees for each child
+        # sequence and join all the subtrees at their roots. After
+        # joining the subtrees, the root is node 0.
+        return nx.tree.join_trees([(_make_tree(child), 0) for child in sequence])
+
+    # Make the tree and remove the `is_root` node attribute added by the
+    # helper function.
+    T = _make_tree(sequence)
+    if sensible_relabeling:
+        # Relabel the nodes according to their breadth-first search
+        # order, starting from the root node (that is, the node 0).
+        bfs_nodes = chain([0], (v for u, v in nx.bfs_edges(T, 0)))
+        labels = {v: i for i, v in enumerate(bfs_nodes)}
+        # We would like to use `copy=False`, but `relabel_nodes` doesn't
+        # allow a relabel mapping that can't be topologically sorted.
+        T = nx.relabel_nodes(T, labels)
+    return T
+
+
+@not_implemented_for("directed")
+@nx._dispatchable(graphs="T")
 def to_prufer_sequence(T):
-    """Returns the Prüfer sequence of the given tree.
+    r"""Returns the Prüfer sequence of the given tree.

     A *Prüfer sequence* is a list of *n* - 2 numbers between 0 and
     *n* - 1, inclusive. The tree corresponding to a given Prüfer
@@ -221,12 +286,37 @@ def to_prufer_sequence(T):
     True

     """
-    pass
+    # Perform some sanity checks on the input.
+    n = len(T)
+    if n < 2:
+        msg = "Prüfer sequence undefined for trees with fewer than two nodes"
+        raise nx.NetworkXPointlessConcept(msg)
+    if not nx.is_tree(T):
+        raise nx.NotATree("provided graph is not a tree")
+    if set(T) != set(range(n)):
+        raise KeyError("tree must have node labels {0, ..., n - 1}")
+
+    degree = dict(T.degree())
+
+    def parents(u):
+        return next(v for v in T[u] if degree[v] > 1)
+
+    index = u = next(k for k in range(n) if degree[k] == 1)
+    result = []
+    for i in range(n - 2):
+        v = parents(u)
+        result.append(v)
+        degree[v] -= 1
+        if v < index and degree[v] == 1:
+            u = v
+        else:
+            index = u = next(k for k in range(index + 1, n) if degree[k] == 1)
+    return result


 @nx._dispatchable(graphs=None, returns_graph=True)
 def from_prufer_sequence(sequence):
-    """Returns the tree corresponding to the given Prüfer sequence.
+    r"""Returns the tree corresponding to the given Prüfer sequence.

     A *Prüfer sequence* is a list of *n* - 2 numbers between 0 and
     *n* - 1, inclusive. The tree corresponding to a given Prüfer
@@ -291,4 +381,32 @@ def from_prufer_sequence(sequence):
     True

     """
-    pass
+    n = len(sequence) + 2
+    # `degree` stores the remaining degree (plus one) for each node. The
+    # degree of a node in the decoded tree is one more than the number
+    # of times it appears in the code.
+    degree = Counter(chain(sequence, range(n)))
+    T = nx.empty_graph(n)
+    # `not_orphaned` is the set of nodes that have a parent in the
+    # tree. After the loop, there should be exactly two nodes that are
+    # not in this set.
+    not_orphaned = set()
+    index = u = next(k for k in range(n) if degree[k] == 1)
+    for v in sequence:
+        # check the validity of the prufer sequence
+        if v < 0 or v > n - 1:
+            raise nx.NetworkXError(
+                f"Invalid Prufer sequence: Values must be between 0 and {n-1}, got {v}"
+            )
+        T.add_edge(u, v)
+        not_orphaned.add(u)
+        degree[v] -= 1
+        if v < index and degree[v] == 1:
+            u = v
+        else:
+            index = u = next(k for k in range(index + 1, n) if degree[k] == 1)
+    # At this point, there must be exactly two orphaned nodes; join them.
+    orphans = set(T) - not_orphaned
+    u, v = orphans
+    T.add_edge(u, v)
+    return T
diff --git a/networkx/algorithms/tree/decomposition.py b/networkx/algorithms/tree/decomposition.py
index e85654c9c..c8b8f2477 100644
--- a/networkx/algorithms/tree/decomposition.py
+++ b/networkx/algorithms/tree/decomposition.py
@@ -1,15 +1,18 @@
-"""Function for computing a junction tree of a graph."""
+r"""Function for computing a junction tree of a graph."""
+
 from itertools import combinations
+
 import networkx as nx
 from networkx.algorithms import chordal_graph_cliques, complete_to_chordal_graph, moral
 from networkx.utils import not_implemented_for
-__all__ = ['junction_tree']
+
+__all__ = ["junction_tree"]


-@not_implemented_for('multigraph')
+@not_implemented_for("multigraph")
 @nx._dispatchable(returns_graph=True)
 def junction_tree(G):
-    """Returns a junction tree of a given graph.
+    r"""Returns a junction tree of a given graph.

     A junction tree (or clique tree) is constructed from a (un)directed graph G.
     The tree is constructed based on a moralized and triangulated version of G.
@@ -57,4 +60,29 @@ def junction_tree(G):
        conference on Uncertainty in artificial intelligence (UAI’94).
        Morgan Kaufmann Publishers Inc., San Francisco, CA, USA, 360–366.
     """
-    pass
+
+    clique_graph = nx.Graph()
+
+    if G.is_directed():
+        G = moral.moral_graph(G)
+    chordal_graph, _ = complete_to_chordal_graph(G)
+
+    cliques = [tuple(sorted(i)) for i in chordal_graph_cliques(chordal_graph)]
+    clique_graph.add_nodes_from(cliques, type="clique")
+
+    for edge in combinations(cliques, 2):
+        set_edge_0 = set(edge[0])
+        set_edge_1 = set(edge[1])
+        if not set_edge_0.isdisjoint(set_edge_1):
+            sepset = tuple(sorted(set_edge_0.intersection(set_edge_1)))
+            clique_graph.add_edge(edge[0], edge[1], weight=len(sepset), sepset=sepset)
+
+    junction_tree = nx.maximum_spanning_tree(clique_graph)
+
+    for edge in list(junction_tree.edges(data=True)):
+        junction_tree.add_node(edge[2]["sepset"], type="sepset")
+        junction_tree.add_edge(edge[0], edge[2]["sepset"])
+        junction_tree.add_edge(edge[1], edge[2]["sepset"])
+        junction_tree.remove_edge(edge[0], edge[1])
+
+    return junction_tree
diff --git a/networkx/algorithms/tree/mst.py b/networkx/algorithms/tree/mst.py
index 50680f2fa..9e8ea3843 100644
--- a/networkx/algorithms/tree/mst.py
+++ b/networkx/algorithms/tree/mst.py
@@ -9,12 +9,21 @@ from itertools import count
 from math import isnan
 from operator import itemgetter
 from queue import PriorityQueue
+
 import networkx as nx
 from networkx.utils import UnionFind, not_implemented_for, py_random_state
-__all__ = ['minimum_spanning_edges', 'maximum_spanning_edges',
-    'minimum_spanning_tree', 'maximum_spanning_tree',
-    'number_of_spanning_trees', 'random_spanning_tree',
-    'partition_spanning_tree', 'EdgePartition', 'SpanningTreeIterator']
+
+__all__ = [
+    "minimum_spanning_edges",
+    "maximum_spanning_edges",
+    "minimum_spanning_tree",
+    "maximum_spanning_tree",
+    "number_of_spanning_trees",
+    "random_spanning_tree",
+    "partition_spanning_tree",
+    "EdgePartition",
+    "SpanningTreeIterator",
+]


 class EdgePartition(Enum):
@@ -26,15 +35,17 @@ class EdgePartition(Enum):
     - EdgePartition.INCLUDED
     - EdgePartition.EXCLUDED
     """
+
     OPEN = 0
     INCLUDED = 1
     EXCLUDED = 2


-@not_implemented_for('multigraph')
-@nx._dispatchable(edge_attrs='weight', preserve_edge_attrs='data')
-def boruvka_mst_edges(G, minimum=True, weight='weight', keys=False, data=
-    True, ignore_nan=False):
+@not_implemented_for("multigraph")
+@nx._dispatchable(edge_attrs="weight", preserve_edge_attrs="data")
+def boruvka_mst_edges(
+    G, minimum=True, weight="weight", keys=False, data=True, ignore_nan=False
+):
     """Iterate over edges of a Borůvka's algorithm min/max spanning tree.

     Parameters
@@ -64,13 +75,76 @@ def boruvka_mst_edges(G, minimum=True, weight='weight', keys=False, data=
         If `ignore_nan is True` then that edge is ignored instead.

     """
-    pass
+    # Initialize a forest, assuming initially that it is the discrete
+    # partition of the nodes of the graph.
+    forest = UnionFind(G)

+    def best_edge(component):
+        """Returns the optimum (minimum or maximum) edge on the edge
+        boundary of the given set of nodes.

-@nx._dispatchable(edge_attrs={'weight': None, 'partition': None},
-    preserve_edge_attrs='data')
-def kruskal_mst_edges(G, minimum, weight='weight', keys=True, data=True,
-    ignore_nan=False, partition=None):
+        A return value of ``None`` indicates an empty boundary.
+
+        """
+        sign = 1 if minimum else -1
+        minwt = float("inf")
+        boundary = None
+        for e in nx.edge_boundary(G, component, data=True):
+            wt = e[-1].get(weight, 1) * sign
+            if isnan(wt):
+                if ignore_nan:
+                    continue
+                msg = f"NaN found as an edge weight. Edge {e}"
+                raise ValueError(msg)
+            if wt < minwt:
+                minwt = wt
+                boundary = e
+        return boundary
+
+    # Determine the optimum edge in the edge boundary of each component
+    # in the forest.
+    best_edges = (best_edge(component) for component in forest.to_sets())
+    best_edges = [edge for edge in best_edges if edge is not None]
+    # If each entry was ``None``, that means the graph was disconnected,
+    # so we are done generating the forest.
+    while best_edges:
+        # Determine the optimum edge in the edge boundary of each
+        # component in the forest.
+        #
+        # This must be a sequence, not an iterator. In this list, the
+        # same edge may appear twice, in different orientations (but
+        # that's okay, since a union operation will be called on the
+        # endpoints the first time it is seen, but not the second time).
+        #
+        # Any ``None`` indicates that the edge boundary for that
+        # component was empty, so that part of the forest has been
+        # completed.
+        #
+        # TODO This can be parallelized, both in the outer loop over
+        # each component in the forest and in the computation of the
+        # minimum. (Same goes for the identical lines outside the loop.)
+        best_edges = (best_edge(component) for component in forest.to_sets())
+        best_edges = [edge for edge in best_edges if edge is not None]
+        # Join trees in the forest using the best edges, and yield that
+        # edge, since it is part of the spanning tree.
+        #
+        # TODO This loop can be parallelized, to an extent (the union
+        # operation must be atomic).
+        for u, v, d in best_edges:
+            if forest[u] != forest[v]:
+                if data:
+                    yield u, v, d
+                else:
+                    yield u, v
+                forest.union(u, v)
+
+
+@nx._dispatchable(
+    edge_attrs={"weight": None, "partition": None}, preserve_edge_attrs="data"
+)
+def kruskal_mst_edges(
+    G, minimum, weight="weight", keys=True, data=True, ignore_nan=False, partition=None
+):
     """
     Iterate over edge of a Kruskal's algorithm min/max spanning tree.

@@ -111,12 +185,75 @@ def kruskal_mst_edges(G, minimum, weight='weight', keys=True, data=True,
         take the following forms: `(u, v)`, `(u, v, d)` or `(u, v, k, d)`
         depending on the `key` and `data` parameters
     """
-    pass
+    subtrees = UnionFind()
+    if G.is_multigraph():
+        edges = G.edges(keys=True, data=True)
+    else:
+        edges = G.edges(data=True)

+    """
+    Sort the edges of the graph with respect to the partition data. 
+    Edges are returned in the following order:

-@nx._dispatchable(edge_attrs='weight', preserve_edge_attrs='data')
-def prim_mst_edges(G, minimum, weight='weight', keys=True, data=True,
-    ignore_nan=False):
+    * Included edges
+    * Open edges from smallest to largest weight
+    * Excluded edges
+    """
+    included_edges = []
+    open_edges = []
+    for e in edges:
+        d = e[-1]
+        wt = d.get(weight, 1)
+        if isnan(wt):
+            if ignore_nan:
+                continue
+            raise ValueError(f"NaN found as an edge weight. Edge {e}")
+
+        edge = (wt,) + e
+        if d.get(partition) == EdgePartition.INCLUDED:
+            included_edges.append(edge)
+        elif d.get(partition) == EdgePartition.EXCLUDED:
+            continue
+        else:
+            open_edges.append(edge)
+
+    if minimum:
+        sorted_open_edges = sorted(open_edges, key=itemgetter(0))
+    else:
+        sorted_open_edges = sorted(open_edges, key=itemgetter(0), reverse=True)
+
+    # Condense the lists into one
+    included_edges.extend(sorted_open_edges)
+    sorted_edges = included_edges
+    del open_edges, sorted_open_edges, included_edges
+
+    # Multigraphs need to handle edge keys in addition to edge data.
+    if G.is_multigraph():
+        for wt, u, v, k, d in sorted_edges:
+            if subtrees[u] != subtrees[v]:
+                if keys:
+                    if data:
+                        yield u, v, k, d
+                    else:
+                        yield u, v, k
+                else:
+                    if data:
+                        yield u, v, d
+                    else:
+                        yield u, v
+                subtrees.union(u, v)
+    else:
+        for wt, u, v, d in sorted_edges:
+            if subtrees[u] != subtrees[v]:
+                if data:
+                    yield u, v, d
+                else:
+                    yield u, v
+                subtrees.union(u, v)
+
+
+@nx._dispatchable(edge_attrs="weight", preserve_edge_attrs="data")
+def prim_mst_edges(G, minimum, weight="weight", keys=True, data=True, ignore_nan=False):
     """Iterate over edges of Prim's algorithm min/max spanning tree.

     Parameters
@@ -144,17 +281,97 @@ def prim_mst_edges(G, minimum, weight='weight', keys=True, data=True,
         If `ignore_nan is True` then that edge is ignored instead.

     """
-    pass
-
-
-ALGORITHMS = {'boruvka': boruvka_mst_edges, 'borůvka': boruvka_mst_edges,
-    'kruskal': kruskal_mst_edges, 'prim': prim_mst_edges}
-
-
-@not_implemented_for('directed')
-@nx._dispatchable(edge_attrs='weight', preserve_edge_attrs='data')
-def minimum_spanning_edges(G, algorithm='kruskal', weight='weight', keys=
-    True, data=True, ignore_nan=False):
+    is_multigraph = G.is_multigraph()
+    push = heappush
+    pop = heappop
+
+    nodes = set(G)
+    c = count()
+
+    sign = 1 if minimum else -1
+
+    while nodes:
+        u = nodes.pop()
+        frontier = []
+        visited = {u}
+        if is_multigraph:
+            for v, keydict in G.adj[u].items():
+                for k, d in keydict.items():
+                    wt = d.get(weight, 1) * sign
+                    if isnan(wt):
+                        if ignore_nan:
+                            continue
+                        msg = f"NaN found as an edge weight. Edge {(u, v, k, d)}"
+                        raise ValueError(msg)
+                    push(frontier, (wt, next(c), u, v, k, d))
+        else:
+            for v, d in G.adj[u].items():
+                wt = d.get(weight, 1) * sign
+                if isnan(wt):
+                    if ignore_nan:
+                        continue
+                    msg = f"NaN found as an edge weight. Edge {(u, v, d)}"
+                    raise ValueError(msg)
+                push(frontier, (wt, next(c), u, v, d))
+        while nodes and frontier:
+            if is_multigraph:
+                W, _, u, v, k, d = pop(frontier)
+            else:
+                W, _, u, v, d = pop(frontier)
+            if v in visited or v not in nodes:
+                continue
+            # Multigraphs need to handle edge keys in addition to edge data.
+            if is_multigraph and keys:
+                if data:
+                    yield u, v, k, d
+                else:
+                    yield u, v, k
+            else:
+                if data:
+                    yield u, v, d
+                else:
+                    yield u, v
+            # update frontier
+            visited.add(v)
+            nodes.discard(v)
+            if is_multigraph:
+                for w, keydict in G.adj[v].items():
+                    if w in visited:
+                        continue
+                    for k2, d2 in keydict.items():
+                        new_weight = d2.get(weight, 1) * sign
+                        if isnan(new_weight):
+                            if ignore_nan:
+                                continue
+                            msg = f"NaN found as an edge weight. Edge {(v, w, k2, d2)}"
+                            raise ValueError(msg)
+                        push(frontier, (new_weight, next(c), v, w, k2, d2))
+            else:
+                for w, d2 in G.adj[v].items():
+                    if w in visited:
+                        continue
+                    new_weight = d2.get(weight, 1) * sign
+                    if isnan(new_weight):
+                        if ignore_nan:
+                            continue
+                        msg = f"NaN found as an edge weight. Edge {(v, w, d2)}"
+                        raise ValueError(msg)
+                    push(frontier, (new_weight, next(c), v, w, d2))
+
+
+ALGORITHMS = {
+    "boruvka": boruvka_mst_edges,
+    "borůvka": boruvka_mst_edges,
+    "kruskal": kruskal_mst_edges,
+    "prim": prim_mst_edges,
+}
+
+
+@not_implemented_for("directed")
+@nx._dispatchable(edge_attrs="weight", preserve_edge_attrs="data")
+def minimum_spanning_edges(
+    G, algorithm="kruskal", weight="weight", keys=True, data=True, ignore_nan=False
+):
     """Generate edges in a minimum spanning forest of an undirected
     weighted graph.

@@ -234,13 +451,22 @@ def minimum_spanning_edges(G, algorithm='kruskal', weight='weight', keys=
     http://www.ics.uci.edu/~eppstein/PADS/

     """
-    pass
-
-
-@not_implemented_for('directed')
-@nx._dispatchable(edge_attrs='weight', preserve_edge_attrs='data')
-def maximum_spanning_edges(G, algorithm='kruskal', weight='weight', keys=
-    True, data=True, ignore_nan=False):
+    try:
+        algo = ALGORITHMS[algorithm]
+    except KeyError as err:
+        msg = f"{algorithm} is not a valid choice for an algorithm."
+        raise ValueError(msg) from err
+
+    return algo(
+        G, minimum=True, weight=weight, keys=keys, data=data, ignore_nan=ignore_nan
+    )
+
+
+@not_implemented_for("directed")
+@nx._dispatchable(edge_attrs="weight", preserve_edge_attrs="data")
+def maximum_spanning_edges(
+    G, algorithm="kruskal", weight="weight", keys=True, data=True, ignore_nan=False
+):
     """Generate edges in a maximum spanning forest of an undirected
     weighted graph.

@@ -319,12 +545,19 @@ def maximum_spanning_edges(G, algorithm='kruskal', weight='weight', keys=
     Modified code from David Eppstein, April 2006
     http://www.ics.uci.edu/~eppstein/PADS/
     """
-    pass
+    try:
+        algo = ALGORITHMS[algorithm]
+    except KeyError as err:
+        msg = f"{algorithm} is not a valid choice for an algorithm."
+        raise ValueError(msg) from err
+
+    return algo(
+        G, minimum=False, weight=weight, keys=keys, data=data, ignore_nan=ignore_nan
+    )


 @nx._dispatchable(preserve_all_attrs=True, returns_graph=True)
-def minimum_spanning_tree(G, weight='weight', algorithm='kruskal',
-    ignore_nan=False):
+def minimum_spanning_tree(G, weight="weight", algorithm="kruskal", ignore_nan=False):
     """Returns a minimum spanning tree or forest on an undirected graph `G`.

     Parameters
@@ -373,12 +606,20 @@ def minimum_spanning_tree(G, weight='weight', algorithm='kruskal',
     Isolated nodes with self-loops are in the tree as edgeless isolated nodes.

     """
-    pass
+    edges = minimum_spanning_edges(
+        G, algorithm, weight, keys=True, data=True, ignore_nan=ignore_nan
+    )
+    T = G.__class__()  # Same graph class as G
+    T.graph.update(G.graph)
+    T.add_nodes_from(G.nodes.items())
+    T.add_edges_from(edges)
+    return T


 @nx._dispatchable(preserve_all_attrs=True, returns_graph=True)
-def partition_spanning_tree(G, minimum=True, weight='weight', partition=
-    'partition', ignore_nan=False):
+def partition_spanning_tree(
+    G, minimum=True, weight="weight", partition="partition", ignore_nan=False
+):
     """
     Find a spanning tree while respecting a partition of edges.

@@ -423,12 +664,24 @@ def partition_spanning_tree(G, minimum=True, weight='weight', partition=
            Vol. 25 (2), p. 219-229,
            https://www.scielo.br/j/pope/a/XHswBwRwJyrfL88dmMwYNWp/?lang=en
     """
-    pass
+    edges = kruskal_mst_edges(
+        G,
+        minimum,
+        weight,
+        keys=True,
+        data=True,
+        ignore_nan=ignore_nan,
+        partition=partition,
+    )
+    T = G.__class__()  # Same graph class as G
+    T.graph.update(G.graph)
+    T.add_nodes_from(G.nodes.items())
+    T.add_edges_from(edges)
+    return T


 @nx._dispatchable(preserve_all_attrs=True, returns_graph=True)
-def maximum_spanning_tree(G, weight='weight', algorithm='kruskal',
-    ignore_nan=False):
+def maximum_spanning_tree(G, weight="weight", algorithm="kruskal", ignore_nan=False):
     """Returns a maximum spanning tree or forest on an undirected graph `G`.

     Parameters
@@ -479,7 +732,15 @@ def maximum_spanning_tree(G, weight='weight', algorithm='kruskal',
     Isolated nodes with self-loops are in the tree as edgeless isolated nodes.

     """
-    pass
+    edges = maximum_spanning_edges(
+        G, algorithm, weight, keys=True, data=True, ignore_nan=ignore_nan
+    )
+    edges = list(edges)
+    T = G.__class__()  # Same graph class as G
+    T.graph.update(G.graph)
+    T.add_nodes_from(G.nodes.items())
+    T.add_edges_from(edges)
+    return T


 @py_random_state(3)
@@ -527,7 +788,187 @@ def random_spanning_tree(G, weight=None, *, multiplicative=True, seed=None):
     .. [1] V. Kulkarni, Generating random combinatorial objects, Journal of
        Algorithms, 11 (1990), pp. 185–207
     """
-    pass
+
+    def find_node(merged_nodes, node):
+        """
+        We can think of clusters of contracted nodes as having one
+        representative in the graph. Each node which is not in merged_nodes
+        is still its own representative. Since a representative can be later
+        contracted, we need to recursively search though the dict to find
+        the final representative, but once we know it we can use path
+        compression to speed up the access of the representative for next time.
+
+        This cannot be replaced by the standard NetworkX union_find since that
+        data structure will merge nodes with less representing nodes into the
+        one with more representing nodes but this function requires we merge
+        them using the order that contract_edges contracts using.
+
+        Parameters
+        ----------
+        merged_nodes : dict
+            The dict storing the mapping from node to representative
+        node
+            The node whose representative we seek
+
+        Returns
+        -------
+        The representative of the `node`
+        """
+        if node not in merged_nodes:
+            return node
+        else:
+            rep = find_node(merged_nodes, merged_nodes[node])
+            merged_nodes[node] = rep
+            return rep
+
+    def prepare_graph():
+        """
+        For the graph `G`, remove all edges not in the set `V` and then
+        contract all edges in the set `U`.
+
+        Returns
+        -------
+        A copy of `G` which has had all edges not in `V` removed and all edges
+        in `U` contracted.
+        """
+
+        # The result is a MultiGraph version of G so that parallel edges are
+        # allowed during edge contraction
+        result = nx.MultiGraph(incoming_graph_data=G)
+
+        # Remove all edges not in V
+        edges_to_remove = set(result.edges()).difference(V)
+        result.remove_edges_from(edges_to_remove)
+
+        # Contract all edges in U
+        #
+        # Imagine that you have two edges to contract and they share an
+        # endpoint like this:
+        #                        [0] ----- [1] ----- [2]
+        # If we contract (0, 1) first, the contraction function will always
+        # delete the second node it is passed so the resulting graph would be
+        #                             [0] ----- [2]
+        # and edge (1, 2) no longer exists but (0, 2) would need to be contracted
+        # in its place now. That is why I use the below dict as a merge-find
+        # data structure with path compression to track how the nodes are merged.
+        merged_nodes = {}
+
+        for u, v in U:
+            u_rep = find_node(merged_nodes, u)
+            v_rep = find_node(merged_nodes, v)
+            # We cannot contract a node with itself
+            if u_rep == v_rep:
+                continue
+            nx.contracted_nodes(result, u_rep, v_rep, self_loops=False, copy=False)
+            merged_nodes[v_rep] = u_rep
+
+        return merged_nodes, result
+
+    def spanning_tree_total_weight(G, weight):
+        """
+        Find the sum of weights of the spanning trees of `G` using the
+        appropriate `method`.
+
+        This is easy if the chosen method is 'multiplicative', since we can
+        use Kirchhoff's Tree Matrix Theorem directly. However, with the
+        'additive' method, this process is slightly more complex and less
+        computationally efficient as we have to find the number of spanning
+        trees which contain each possible edge in the graph.
+
+        Parameters
+        ----------
+        G : NetworkX Graph
+            The graph to find the total weight of all spanning trees on.
+
+        weight : string
+            The key for the weight edge attribute of the graph.
+
+        Returns
+        -------
+        float
+            The sum of either the multiplicative or additive weight for all
+            spanning trees in the graph.
+        """
+        if multiplicative:
+            return nx.total_spanning_tree_weight(G, weight)
+        else:
+            # There are two cases for the total spanning tree additive weight.
+            # 1. There is one edge in the graph. Then the only spanning tree is
+            #    that edge itself, which will have a total weight of that edge
+            #    itself.
+            if G.number_of_edges() == 1:
+                return G.edges(data=weight).__iter__().__next__()[2]
+            # 2. There are no edges or two or more edges in the graph. Then, we find the
+            #    total weight of the spanning trees using the formula in the
+            #    reference paper: take the weight of each edge and multiply it by
+            #    the number of spanning trees which include that edge. This
+            #    can be accomplished by contracting the edge and finding the
+            #    multiplicative total spanning tree weight if the weight of each edge
+            #    is assumed to be 1, which is conveniently built into networkx already,
+            #    by calling total_spanning_tree_weight with weight=None.
+            #    Note that with no edges the returned value is just zero.
+            else:
+                total = 0
+                for u, v, w in G.edges(data=weight):
+                    total += w * nx.total_spanning_tree_weight(
+                        nx.contracted_edge(G, edge=(u, v), self_loops=False), None
+                    )
+                return total
+
+    if G.number_of_nodes() < 2:
+        # no edges in the spanning tree
+        return nx.empty_graph(G.nodes)
+
+    U = set()
+    st_cached_value = 0
+    V = set(G.edges())
+    shuffled_edges = list(G.edges())
+    seed.shuffle(shuffled_edges)
+
+    for u, v in shuffled_edges:
+        e_weight = G[u][v][weight] if weight is not None else 1
+        node_map, prepared_G = prepare_graph()
+        G_total_tree_weight = spanning_tree_total_weight(prepared_G, weight)
+        # Add the edge to U so that we can compute the total tree weight
+        # assuming we include that edge
+        # Now, if (u, v) cannot exist in G because it is fully contracted out
+        # of existence, then it by definition cannot influence G_e's Kirchhoff
+        # value. But, we also cannot pick it.
+        rep_edge = (find_node(node_map, u), find_node(node_map, v))
+        # Check to see if the 'representative edge' for the current edge is
+        # in prepared_G. If so, then we can pick it.
+        if rep_edge in prepared_G.edges:
+            prepared_G_e = nx.contracted_edge(
+                prepared_G, edge=rep_edge, self_loops=False
+            )
+            G_e_total_tree_weight = spanning_tree_total_weight(prepared_G_e, weight)
+            if multiplicative:
+                threshold = e_weight * G_e_total_tree_weight / G_total_tree_weight
+            else:
+                numerator = (
+                    st_cached_value + e_weight
+                ) * nx.total_spanning_tree_weight(prepared_G_e) + G_e_total_tree_weight
+                denominator = (
+                    st_cached_value * nx.total_spanning_tree_weight(prepared_G)
+                    + G_total_tree_weight
+                )
+                threshold = numerator / denominator
+        else:
+            threshold = 0.0
+        z = seed.uniform(0.0, 1.0)
+        if z > threshold:
+            # Remove the edge from V since we did not pick it.
+            V.remove((u, v))
+        else:
+            # Add the edge to U since we picked it.
+            st_cached_value += e_weight
+            U.add((u, v))
+        # If we decide to keep an edge, it may complete the spanning tree.
+        if len(U) == G.number_of_nodes() - 1:
+            spanning_tree = nx.Graph()
+            spanning_tree.add_edges_from(U)
+            return spanning_tree
+    raise Exception(f"Something went wrong! Only {len(U)} edges in the spanning tree!")


 class SpanningTreeIterator:
@@ -550,21 +991,22 @@ class SpanningTreeIterator:
            https://www.scielo.br/j/pope/a/XHswBwRwJyrfL88dmMwYNWp/?lang=en
     """

-
     @dataclass(order=True)
     class Partition:
         """
         This dataclass represents a partition and stores a dict with the edge
         data and the weight of the minimum spanning tree of the partition dict.
         """
+
         mst_weight: float
         partition_dict: dict = field(compare=False)

         def __copy__(self):
-            return SpanningTreeIterator.Partition(self.mst_weight, self.
-                partition_dict.copy())
+            return SpanningTreeIterator.Partition(
+                self.mst_weight, self.partition_dict.copy()
+            )

-    def __init__(self, G, weight='weight', minimum=True, ignore_nan=False):
+    def __init__(self, G, weight="weight", minimum=True, ignore_nan=False):
         """
         Initialize the iterator

@@ -585,12 +1027,14 @@ class SpanningTreeIterator:
             If `ignore_nan is True` then that edge is ignored instead.
         """
         self.G = G.copy()
-        self.G.__networkx_cache__ = None
+        self.G.__networkx_cache__ = None  # Disable caching
         self.weight = weight
         self.minimum = minimum
         self.ignore_nan = ignore_nan
+        # Randomly create a key for an edge attribute to hold the partition data
         self.partition_key = (
-            'SpanningTreeIterators super secret partition attribute name')
+            "SpanningTreeIterators super secret partition attribute name"
+        )

     def __iter__(self):
         """
@@ -601,11 +1045,14 @@ class SpanningTreeIterator:
         """
         self.partition_queue = PriorityQueue()
         self._clear_partition(self.G)
-        mst_weight = partition_spanning_tree(self.G, self.minimum, self.
-            weight, self.partition_key, self.ignore_nan).size(weight=self.
-            weight)
-        self.partition_queue.put(self.Partition(mst_weight if self.minimum else
-            -mst_weight, {}))
+        mst_weight = partition_spanning_tree(
+            self.G, self.minimum, self.weight, self.partition_key, self.ignore_nan
+        ).size(weight=self.weight)
+
+        self.partition_queue.put(
+            self.Partition(mst_weight if self.minimum else -mst_weight, {})
+        )
+
         return self

     def __next__(self):
@@ -619,11 +1066,14 @@ class SpanningTreeIterator:
         if self.partition_queue.empty():
             del self.G, self.partition_queue
             raise StopIteration
+
         partition = self.partition_queue.get()
         self._write_partition(partition)
-        next_tree = partition_spanning_tree(self.G, self.minimum, self.
-            weight, self.partition_key, self.ignore_nan)
+        next_tree = partition_spanning_tree(
+            self.G, self.minimum, self.weight, self.partition_key, self.ignore_nan
+        )
         self._partition(partition, next_tree)
+
         self._clear_partition(next_tree)
         return next_tree

@@ -640,7 +1090,29 @@ class SpanningTreeIterator:
         partition_tree : nx.Graph
             The minimum spanning tree of the input partition.
         """
-        pass
+        # create two new partitions with the data from the input partition dict
+        p1 = self.Partition(0, partition.partition_dict.copy())
+        p2 = self.Partition(0, partition.partition_dict.copy())
+        for e in partition_tree.edges:
+            # determine if the edge was open or included
+            if e not in partition.partition_dict:
+                # This is an open edge
+                p1.partition_dict[e] = EdgePartition.EXCLUDED
+                p2.partition_dict[e] = EdgePartition.INCLUDED
+
+                self._write_partition(p1)
+                p1_mst = partition_spanning_tree(
+                    self.G,
+                    self.minimum,
+                    self.weight,
+                    self.partition_key,
+                    self.ignore_nan,
+                )
+                p1_mst_weight = p1_mst.size(weight=self.weight)
+                if nx.is_connected(p1_mst):
+                    p1.mst_weight = p1_mst_weight if self.minimum else -p1_mst_weight
+                    self.partition_queue.put(p1.__copy__())
+                p1.partition_dict = p2.partition_dict.copy()

     def _write_partition(self, partition):
         """
@@ -653,16 +1125,22 @@ class SpanningTreeIterator:
             A Partition dataclass describing a partition on the edges of the
             graph.
         """
-        pass
+        for u, v, d in self.G.edges(data=True):
+            if (u, v) in partition.partition_dict:
+                d[self.partition_key] = partition.partition_dict[(u, v)]
+            else:
+                d[self.partition_key] = EdgePartition.OPEN

     def _clear_partition(self, G):
         """
         Removes partition data from the graph
         """
-        pass
+        for u, v, d in G.edges(data=True):
+            if self.partition_key in d:
+                del d[self.partition_key]


-@nx._dispatchable(edge_attrs='weight')
+@nx._dispatchable(edge_attrs="weight")
 def number_of_spanning_trees(G, *, root=None, weight=None):
     """Returns the number of spanning trees in `G`.

@@ -766,4 +1244,31 @@ def number_of_spanning_trees(G, *, root=None, weight=None):
         "Matrix-Tree Theorem for Directed Graphs"
         https://www.math.uchicago.edu/~may/VIGRE/VIGRE2010/REUPapers/Margoliash.pdf
     """
-    pass
+    import numpy as np
+
+    if len(G) == 0:
+        raise nx.NetworkXPointlessConcept("Graph G must contain at least one node.")
+
+    # undirected G
+    if not nx.is_directed(G):
+        if not nx.is_connected(G):
+            return 0
+        G_laplacian = nx.laplacian_matrix(G, weight=weight).toarray()
+        return float(np.linalg.det(G_laplacian[1:, 1:]))
+
+    # directed G
+    if root is None:
+        raise nx.NetworkXError("Input `root` must be provided when G is directed")
+    if root not in G:
+        raise nx.NetworkXError("The node root is not in the graph G.")
+    if not nx.is_weakly_connected(G):
+        return 0
+
+    # Compute directed Laplacian matrix
+    nodelist = [root] + [n for n in G if n != root]
+    A = nx.adjacency_matrix(G, nodelist=nodelist, weight=weight)
+    D = np.diag(A.sum(axis=0))
+    G_laplacian = D - A
+
+    # Compute number of spanning trees
+    return float(np.linalg.det(G_laplacian[1:, 1:]))
diff --git a/networkx/algorithms/tree/operations.py b/networkx/algorithms/tree/operations.py
index 3f085fb29..f4368d6a3 100644
--- a/networkx/algorithms/tree/operations.py
+++ b/networkx/algorithms/tree/operations.py
@@ -1,8 +1,10 @@
 """Operations on trees."""
 from functools import partial
 from itertools import accumulate, chain
+
 import networkx as nx
-__all__ = ['join', 'join_trees']
+
+__all__ = ["join", "join_trees"]


 def join(rooted_trees, label_attribute=None):
@@ -17,9 +19,19 @@ def join(rooted_trees, label_attribute=None):
        It has been renamed join_trees with the same syntax/interface.

     """
-    pass
+    import warnings
+
+    warnings.warn(
+        "The function `join` is deprecated and is renamed `join_trees`.\n"
+        "The ``join`` function itself will be removed in v3.4",
+        DeprecationWarning,
+        stacklevel=2,
+    )

+    return join_trees(rooted_trees, label_attribute=label_attribute)

+
+# Argument types don't match dispatching, but allow manual selection of backend
 @nx._dispatchable(graphs=None, returns_graph=True)
 def join_trees(rooted_trees, *, label_attribute=None, first_label=0):
     """Returns a new rooted tree made by joining `rooted_trees`
@@ -79,4 +91,38 @@ def join_trees(rooted_trees, *, label_attribute=None, first_label=0):
         True

     """
-    pass
+    if not rooted_trees:
+        return nx.empty_graph(1)
+
+    # Unzip the zipped list of (tree, root) pairs.
+    trees, roots = zip(*rooted_trees)
+
+    # The join of the trees has the same type as the type of the first tree.
+    R = type(trees[0])()
+
+    lengths = (len(tree) for tree in trees[:-1])
+    first_labels = list(accumulate(lengths, initial=first_label + 1))
+
+    new_roots = []
+    for tree, root, first_node in zip(trees, roots, first_labels):
+        new_root = first_node + list(tree.nodes()).index(root)
+        new_roots.append(new_root)
+
+    # Relabel the nodes so that their union is the integers starting at first_label.
+    relabel = partial(
+        nx.convert_node_labels_to_integers, label_attribute=label_attribute
+    )
+    new_trees = [
+        relabel(tree, first_label=first_label)
+        for tree, first_label in zip(trees, first_labels)
+    ]
+
+    # Add all sets of nodes and edges, attributes
+    for tree in new_trees:
+        R.update(tree)
+
+    # Finally, join the subtrees at the root. We know first_label is unused by the way we relabeled the subtrees.
+    R.add_node(first_label)
+    R.add_edges_from((first_label, root) for root in new_roots)
+
+    return R
diff --git a/networkx/algorithms/tree/recognition.py b/networkx/algorithms/tree/recognition.py
index 71add2188..a9eae9870 100644
--- a/networkx/algorithms/tree/recognition.py
+++ b/networkx/algorithms/tree/recognition.py
@@ -72,11 +72,13 @@ nodes from a larger graph, and it is in this context that the term "spanning"
 becomes a useful notion.

 """
+
 import networkx as nx
-__all__ = ['is_arborescence', 'is_branching', 'is_forest', 'is_tree']
+
+__all__ = ["is_arborescence", "is_branching", "is_forest", "is_tree"]


-@nx.utils.not_implemented_for('undirected')
+@nx.utils.not_implemented_for("undirected")
 @nx._dispatchable
 def is_arborescence(G):
     """
@@ -113,10 +115,10 @@ def is_arborescence(G):
     is_tree

     """
-    pass
+    return is_tree(G) and max(d for n, d in G.in_degree()) <= 1


-@nx.utils.not_implemented_for('undirected')
+@nx.utils.not_implemented_for("undirected")
 @nx._dispatchable
 def is_branching(G):
     """
@@ -153,7 +155,7 @@ def is_branching(G):
     is_forest

     """
-    pass
+    return is_forest(G) and max(d for n, d in G.in_degree()) <= 1


 @nx._dispatchable
@@ -202,7 +204,15 @@ def is_forest(G):
     is_branching

     """
-    pass
+    if len(G) == 0:
+        raise nx.exception.NetworkXPointlessConcept("G has no nodes.")
+
+    if G.is_directed():
+        components = (G.subgraph(c) for c in nx.weakly_connected_components(G))
+    else:
+        components = (G.subgraph(c) for c in nx.connected_components(G))
+
+    return all(len(c) - 1 == c.number_of_edges() for c in components)


 @nx._dispatchable
@@ -251,4 +261,13 @@ def is_tree(G):
     is_arborescence

     """
-    pass
+    if len(G) == 0:
+        raise nx.exception.NetworkXPointlessConcept("G has no nodes.")
+
+    if G.is_directed():
+        is_connected = nx.is_weakly_connected
+    else:
+        is_connected = nx.is_connected
+
+    # A connected graph with no cycles has n-1 edges.
+    return len(G) - 1 == G.number_of_edges() and is_connected(G)
diff --git a/networkx/algorithms/triads.py b/networkx/algorithms/triads.py
index ab34e2910..1e67c1453 100644
--- a/networkx/algorithms/triads.py
+++ b/networkx/algorithms/triads.py
@@ -1,16 +1,118 @@
+# See https://github.com/networkx/networkx/pull/1474
+# Copyright 2011 Reya Group <http://www.reyagroup.com>
+# Copyright 2011 Alex Levenson <alex@isnotinvain.com>
+# Copyright 2011 Diederik van Liere <diederik.vanliere@rotman.utoronto.ca>
 """Functions for analyzing triads of a graph."""
+
 from collections import defaultdict
 from itertools import combinations, permutations
+
 import networkx as nx
 from networkx.utils import not_implemented_for, py_random_state
-__all__ = ['triadic_census', 'is_triad', 'all_triplets', 'all_triads',
-    'triads_by_type', 'triad_type', 'random_triad']
-TRICODES = (1, 2, 2, 3, 2, 4, 6, 8, 2, 6, 5, 7, 3, 8, 7, 11, 2, 6, 4, 8, 5,
-    9, 9, 13, 6, 10, 9, 14, 7, 14, 12, 15, 2, 5, 6, 7, 6, 9, 10, 14, 4, 9, 
-    9, 12, 8, 13, 14, 15, 3, 7, 8, 11, 7, 12, 14, 15, 8, 14, 13, 15, 11, 15,
-    15, 16)
-TRIAD_NAMES = ('003', '012', '102', '021D', '021U', '021C', '111D', '111U',
-    '030T', '030C', '201', '120D', '120U', '120C', '210', '300')
+
+__all__ = [
+    "triadic_census",
+    "is_triad",
+    "all_triplets",
+    "all_triads",
+    "triads_by_type",
+    "triad_type",
+    "random_triad",
+]
+
+#: The integer codes representing each type of triad.
+#:
+#: Triads that are the same up to symmetry have the same code.
+TRICODES = (
+    1,
+    2,
+    2,
+    3,
+    2,
+    4,
+    6,
+    8,
+    2,
+    6,
+    5,
+    7,
+    3,
+    8,
+    7,
+    11,
+    2,
+    6,
+    4,
+    8,
+    5,
+    9,
+    9,
+    13,
+    6,
+    10,
+    9,
+    14,
+    7,
+    14,
+    12,
+    15,
+    2,
+    5,
+    6,
+    7,
+    6,
+    9,
+    10,
+    14,
+    4,
+    9,
+    9,
+    12,
+    8,
+    13,
+    14,
+    15,
+    3,
+    7,
+    8,
+    11,
+    7,
+    12,
+    14,
+    15,
+    8,
+    14,
+    13,
+    15,
+    11,
+    15,
+    15,
+    16,
+)
+
+#: The names of each type of triad. The order of the elements is
+#: important: it corresponds to the tricodes given in :data:`TRICODES`.
+TRIAD_NAMES = (
+    "003",
+    "012",
+    "102",
+    "021D",
+    "021U",
+    "021C",
+    "111D",
+    "111U",
+    "030T",
+    "030C",
+    "201",
+    "120D",
+    "120U",
+    "120C",
+    "210",
+    "300",
+)
+
+
+#: A dictionary mapping triad code to triad name.
 TRICODE_TO_NAME = {i: TRIAD_NAMES[code - 1] for i, code in enumerate(TRICODES)}


@@ -22,10 +124,11 @@ def _tricode(G, v, u, w):
     the binary representation of an integer.

     """
-    pass
+    combos = ((v, u, 1), (u, v, 2), (v, w, 4), (w, v, 8), (u, w, 16), (w, u, 32))
+    return sum(x for u, v, x in combos if v in G[u])


-@not_implemented_for('undirected')
+@not_implemented_for("undirected")
 @nx._dispatchable
 def triadic_census(G, nodelist=None):
     """Determines the triadic census of a directed graph.
@@ -97,7 +200,84 @@ def triadic_census(G, nodelist=None):
         http://vlado.fmf.uni-lj.si/pub/networks/doc/triads/triads.pdf

     """
-    pass
+    nodeset = set(G.nbunch_iter(nodelist))
+    if nodelist is not None and len(nodelist) != len(nodeset):
+        raise ValueError("nodelist includes duplicate nodes or nodes not in G")
+
+    N = len(G)
+    Nnot = N - len(nodeset)  # can signal special counting for subset of nodes
+
+    # create an ordering of nodes with nodeset nodes first
+    m = {n: i for i, n in enumerate(nodeset)}
+    if Nnot:
+        # add non-nodeset nodes later in the ordering
+        not_nodeset = G.nodes - nodeset
+        m.update((n, i + N) for i, n in enumerate(not_nodeset))
+
+    # build all_neighbor dicts for easy counting
+    # After Python 3.8 can leave off these keys(). Speedup also using G._pred
+    # nbrs = {n: G._pred[n].keys() | G._succ[n].keys() for n in G}
+    nbrs = {n: G.pred[n].keys() | G.succ[n].keys() for n in G}
+    dbl_nbrs = {n: G.pred[n].keys() & G.succ[n].keys() for n in G}
+
+    if Nnot:
+        sgl_nbrs = {n: G.pred[n].keys() ^ G.succ[n].keys() for n in not_nodeset}
+        # find number of edges not incident to nodes in nodeset
+        sgl = sum(1 for n in not_nodeset for nbr in sgl_nbrs[n] if nbr not in nodeset)
+        sgl_edges_outside = sgl // 2
+        dbl = sum(1 for n in not_nodeset for nbr in dbl_nbrs[n] if nbr not in nodeset)
+        dbl_edges_outside = dbl // 2
+
+    # Initialize the count for each triad to be zero.
+    census = {name: 0 for name in TRIAD_NAMES}
+    # Main loop over nodes
+    for v in nodeset:
+        vnbrs = nbrs[v]
+        dbl_vnbrs = dbl_nbrs[v]
+        if Nnot:
+            # set up counts of edges attached to v.
+            sgl_unbrs_bdy = sgl_unbrs_out = dbl_unbrs_bdy = dbl_unbrs_out = 0
+        for u in vnbrs:
+            if m[u] <= m[v]:
+                continue
+            unbrs = nbrs[u]
+            neighbors = (vnbrs | unbrs) - {u, v}
+            # Count connected triads.
+            for w in neighbors:
+                if m[u] < m[w] or (m[v] < m[w] < m[u] and v not in nbrs[w]):
+                    code = _tricode(G, v, u, w)
+                    census[TRICODE_TO_NAME[code]] += 1
+
+            # Use a formula for dyadic triads with edge incident to v
+            if u in dbl_vnbrs:
+                census["102"] += N - len(neighbors) - 2
+            else:
+                census["012"] += N - len(neighbors) - 2
+
+            # Count edges attached to v. Subtract later to get triads with v isolated
+            # _out are (u,unbr) for unbrs outside boundary of nodeset
+            # _bdy are (u,unbr) for unbrs on boundary of nodeset (get double counted)
+            if Nnot and u not in nodeset:
+                sgl_unbrs = sgl_nbrs[u]
+                sgl_unbrs_bdy += len(sgl_unbrs & vnbrs - nodeset)
+                sgl_unbrs_out += len(sgl_unbrs - vnbrs - nodeset)
+                dbl_unbrs = dbl_nbrs[u]
+                dbl_unbrs_bdy += len(dbl_unbrs & vnbrs - nodeset)
+                dbl_unbrs_out += len(dbl_unbrs - vnbrs - nodeset)
+        # if nodeset == G.nodes, skip this b/c we will find the edge later.
+        if Nnot:
+            # Count edges outside nodeset not connected with v (v isolated triads)
+            census["012"] += sgl_edges_outside - (sgl_unbrs_out + sgl_unbrs_bdy // 2)
+            census["102"] += dbl_edges_outside - (dbl_unbrs_out + dbl_unbrs_bdy // 2)
+
+    # calculate null triads: "003"
+    # null triads = total number of possible triads - all found triads
+    total_triangles = (N * (N - 1) * (N - 2)) // 6
+    triangles_without_nodeset = (Nnot * (Nnot - 1) * (Nnot - 2)) // 6
+    total_census = total_triangles - triangles_without_nodeset
+    census["003"] = total_census - sum(census.values())
+
+    return census


 @nx._dispatchable
@@ -123,10 +303,14 @@ def is_triad(G):
     >>> nx.is_triad(G)
     False
     """
-    pass
+    if isinstance(G, nx.Graph):
+        if G.order() == 3 and nx.is_directed(G):
+            if not any((n, n) in G.edges() for n in G.nodes()):
+                return True
+    return False


-@not_implemented_for('undirected')
+@not_implemented_for("undirected")
 @nx._dispatchable
 def all_triplets(G):
     """Returns a generator of all possible sets of 3 nodes in a DiGraph.
@@ -155,10 +339,21 @@ def all_triplets(G):
     [(1, 2, 3), (1, 2, 4), (1, 3, 4), (2, 3, 4)]

     """
-    pass
+    import warnings

+    warnings.warn(
+        (
+            "\n\nall_triplets is deprecated and will be rmoved in v3.5.\n"
+            "Use `itertools.combinations(G, 3)` instead."
+        ),
+        category=DeprecationWarning,
+        stacklevel=4,
+    )
+    triplets = combinations(G.nodes(), 3)
+    return triplets

-@not_implemented_for('undirected')
+
+@not_implemented_for("undirected")
 @nx._dispatchable(returns_graph=True)
 def all_triads(G):
     """A generator of all possible triads in G.
@@ -184,10 +379,12 @@ def all_triads(G):
     [(2, 3), (3, 4), (4, 2)]

     """
-    pass
+    triplets = combinations(G.nodes(), 3)
+    for triplet in triplets:
+        yield G.subgraph(triplet).copy()


-@not_implemented_for('undirected')
+@not_implemented_for("undirected")
 @nx._dispatchable
 def triads_by_type(G):
     """Returns a list of all triads for each triad type in a directed graph.
@@ -240,10 +437,17 @@ def triads_by_type(G):
         Oxford.
         https://web.archive.org/web/20170830032057/http://www.stats.ox.ac.uk/~snijders/Trans_Triads_ha.pdf
     """
-    pass
+    # num_triads = o * (o - 1) * (o - 2) // 6
+    # if num_triads > TRIAD_LIMIT: print(WARNING)
+    all_tri = all_triads(G)
+    tri_by_type = defaultdict(list)
+    for triad in all_tri:
+        name = triad_type(triad)
+        tri_by_type[name].append(triad)
+    return tri_by_type


-@not_implemented_for('undirected')
+@not_implemented_for("undirected")
 @nx._dispatchable
 def triad_type(G):
     """Returns the sociological triad type for a triad.
@@ -294,10 +498,54 @@ def triad_type(G):
         Oxford.
         https://web.archive.org/web/20170830032057/http://www.stats.ox.ac.uk/~snijders/Trans_Triads_ha.pdf
     """
-    pass
-
-
-@not_implemented_for('undirected')
+    if not is_triad(G):
+        raise nx.NetworkXAlgorithmError("G is not a triad (order-3 DiGraph)")
+    num_edges = len(G.edges())
+    if num_edges == 0:
+        return "003"
+    elif num_edges == 1:
+        return "012"
+    elif num_edges == 2:
+        e1, e2 = G.edges()
+        if set(e1) == set(e2):
+            return "102"
+        elif e1[0] == e2[0]:
+            return "021D"
+        elif e1[1] == e2[1]:
+            return "021U"
+        elif e1[1] == e2[0] or e2[1] == e1[0]:
+            return "021C"
+    elif num_edges == 3:
+        for e1, e2, e3 in permutations(G.edges(), 3):
+            if set(e1) == set(e2):
+                if e3[0] in e1:
+                    return "111U"
+                # e3[1] in e1:
+                return "111D"
+            elif set(e1).symmetric_difference(set(e2)) == set(e3):
+                if {e1[0], e2[0], e3[0]} == {e1[0], e2[0], e3[0]} == set(G.nodes()):
+                    return "030C"
+                # e3 == (e1[0], e2[1]) and e2 == (e1[1], e3[1]):
+                return "030T"
+    elif num_edges == 4:
+        for e1, e2, e3, e4 in permutations(G.edges(), 4):
+            if set(e1) == set(e2):
+                # identify pair of symmetric edges (which necessarily exists)
+                if set(e3) == set(e4):
+                    return "201"
+                if {e3[0]} == {e4[0]} == set(e3).intersection(set(e4)):
+                    return "120D"
+                if {e3[1]} == {e4[1]} == set(e3).intersection(set(e4)):
+                    return "120U"
+                if e3[1] == e4[0]:
+                    return "120C"
+    elif num_edges == 5:
+        return "210"
+    elif num_edges == 6:
+        return "300"
+
+
+@not_implemented_for("undirected")
 @py_random_state(1)
 @nx._dispatchable(preserve_all_attrs=True, returns_graph=True)
 def random_triad(G, seed=None):
@@ -336,4 +584,21 @@ def random_triad(G, seed=None):
     OutEdgeView([(1, 2)])

     """
-    pass
+    import warnings
+
+    warnings.warn(
+        (
+            "\n\nrandom_triad is deprecated and will be removed in NetworkX v3.5.\n"
+            "Use random.sample instead, e.g.::\n\n"
+            "\tG.subgraph(random.sample(list(G), 3))\n"
+        ),
+        category=DeprecationWarning,
+        stacklevel=5,
+    )
+    if len(G) < 3:
+        raise nx.NetworkXError(
+            f"G needs at least 3 nodes to form a triad; (it has {len(G)} nodes)"
+        )
+    nodes = seed.sample(list(G.nodes()), 3)
+    G2 = G.subgraph(nodes)
+    return G2
diff --git a/networkx/algorithms/vitality.py b/networkx/algorithms/vitality.py
index a54ae099b..29f98fd1b 100644
--- a/networkx/algorithms/vitality.py
+++ b/networkx/algorithms/vitality.py
@@ -2,11 +2,13 @@
 Vitality measures.
 """
 from functools import partial
+
 import networkx as nx
-__all__ = ['closeness_vitality']
+
+__all__ = ["closeness_vitality"]


-@nx._dispatchable(edge_attrs='weight')
+@nx._dispatchable(edge_attrs="weight")
 def closeness_vitality(G, node=None, weight=None, wiener_index=None):
     """Returns the closeness vitality for nodes in the graph.

@@ -64,4 +66,11 @@ def closeness_vitality(G, node=None, weight=None, wiener_index=None):
            <http://books.google.com/books?id=TTNhSm7HYrIC>

     """
-    pass
+    if wiener_index is None:
+        wiener_index = nx.wiener_index(G, weight=weight)
+    if node is not None:
+        after = nx.wiener_index(G.subgraph(set(G) - {node}), weight=weight)
+        return wiener_index - after
+    vitality = partial(closeness_vitality, G, weight=weight, wiener_index=wiener_index)
+    # TODO This can be trivially parallelized.
+    return {v: vitality(node=v) for v in G}
diff --git a/networkx/algorithms/voronoi.py b/networkx/algorithms/voronoi.py
index 913e63c6c..60c453323 100644
--- a/networkx/algorithms/voronoi.py
+++ b/networkx/algorithms/voronoi.py
@@ -1,11 +1,12 @@
 """Functions for computing the Voronoi cells of a graph."""
 import networkx as nx
 from networkx.utils import groups
-__all__ = ['voronoi_cells']

+__all__ = ["voronoi_cells"]

-@nx._dispatchable(edge_attrs='weight')
-def voronoi_cells(G, center_nodes, weight='weight'):
+
+@nx._dispatchable(edge_attrs="weight")
+def voronoi_cells(G, center_nodes, weight="weight"):
     """Returns the Voronoi cells centered at `center_nodes` with respect
     to the shortest-path distance metric.

@@ -67,4 +68,18 @@ def voronoi_cells(G, center_nodes, weight='weight'):
         https://doi.org/10.1002/1097-0037(200010)36:3<156::AID-NET2>3.0.CO;2-L

     """
-    pass
+    # Determine the shortest paths from any one of the center nodes to
+    # every node in the graph.
+    #
+    # This raises `ValueError` if `center_nodes` is an empty set.
+    paths = nx.multi_source_dijkstra_path(G, center_nodes, weight=weight)
+    # Determine the center node from which the shortest path originates.
+    nearest = {v: p[0] for v, p in paths.items()}
+    # Get the mapping from center node to all nodes closer to it than to
+    # any other center node.
+    cells = groups(nearest)
+    # We collect all unreachable nodes under a special key, if there are any.
+    unreachable = set(G) - set(nearest)
+    if unreachable:
+        cells["unreachable"] = unreachable
+    return cells
diff --git a/networkx/algorithms/walks.py b/networkx/algorithms/walks.py
index 0727449a3..fe3417577 100644
--- a/networkx/algorithms/walks.py
+++ b/networkx/algorithms/walks.py
@@ -1,7 +1,9 @@
 """Function for computing walks in a graph.
 """
+
 import networkx as nx
-__all__ = ['number_of_walks']
+
+__all__ = ["number_of_walks"]


 @nx._dispatchable
@@ -62,4 +64,17 @@ def number_of_walks(G, walk_length):
     1

     """
-    pass
+    import numpy as np
+
+    if walk_length < 0:
+        raise ValueError(f"`walk_length` cannot be negative: {walk_length}")
+
+    A = nx.adjacency_matrix(G, weight=None)
+    # TODO: Use matrix_power from scipy.sparse when available
+    # power = sp.sparse.linalg.matrix_power(A, walk_length)
+    power = np.linalg.matrix_power(A.toarray(), walk_length)
+    result = {
+        u: {v: power.item(u_idx, v_idx) for v_idx, v in enumerate(G)}
+        for u_idx, u in enumerate(G)
+    }
+    return result
diff --git a/networkx/algorithms/wiener.py b/networkx/algorithms/wiener.py
index 6ff78645d..cb55d609f 100644
--- a/networkx/algorithms/wiener.py
+++ b/networkx/algorithms/wiener.py
@@ -14,12 +14,15 @@ References
        Croatica Chemica Acta, 71 (1998), 21-51.
        https://hrcak.srce.hr/132323
 """
+
 import itertools as it
+
 import networkx as nx
-__all__ = ['wiener_index', 'schultz_index', 'gutman_index']
+
+__all__ = ["wiener_index", "schultz_index", "gutman_index"]


-@nx._dispatchable(edge_attrs='weight')
+@nx._dispatchable(edge_attrs="weight")
 def wiener_index(G, weight=None):
     """Returns the Wiener index of the given graph.

@@ -79,14 +82,21 @@ def wiener_index(G, weight=None):
     ----------
     .. [1] `Wikipedia: Wiener Index <https://en.wikipedia.org/wiki/Wiener_index>`_
     """
-    pass
+    connected = nx.is_strongly_connected(G) if G.is_directed() else nx.is_connected(G)
+    if not connected:
+        return float("inf")

+    spl = nx.shortest_path_length(G, weight=weight)
+    total = sum(it.chain.from_iterable(nbrs.values() for node, nbrs in spl))
+    # Need to account for double counting pairs of nodes in undirected graphs.
+    return total if G.is_directed() else total / 2

-@nx.utils.not_implemented_for('directed')
-@nx.utils.not_implemented_for('multigraph')
-@nx._dispatchable(edge_attrs='weight')
+
+@nx.utils.not_implemented_for("directed")
+@nx.utils.not_implemented_for("multigraph")
+@nx._dispatchable(edge_attrs="weight")
 def schultz_index(G, weight=None):
-    """Returns the Schultz Index (of the first kind) of `G`
+    r"""Returns the Schultz Index (of the first kind) of `G`

     The *Schultz Index* [3]_ of a graph is the sum over all node pairs of
     distances times the sum of degrees. Consider an undirected graph `G`.
@@ -142,14 +152,19 @@ def schultz_index(G, weight=None):
            J. Chem. Inf. Comput. Sci. 29 (1989), 239–257.

     """
-    pass
+    if not nx.is_connected(G):
+        return float("inf")
+
+    spl = nx.shortest_path_length(G, weight=weight)
+    d = dict(G.degree, weight=weight)
+    return sum(dist * (d[u] + d[v]) for u, info in spl for v, dist in info.items()) / 2


-@nx.utils.not_implemented_for('directed')
-@nx.utils.not_implemented_for('multigraph')
-@nx._dispatchable(edge_attrs='weight')
+@nx.utils.not_implemented_for("directed")
+@nx.utils.not_implemented_for("multigraph")
+@nx._dispatchable(edge_attrs="weight")
 def gutman_index(G, weight=None):
-    """Returns the Gutman Index for the graph `G`.
+    r"""Returns the Gutman Index for the graph `G`.

     The *Gutman Index* measures the topology of networks, especially for molecule
     networks of atoms connected by bonds [1]_. It is also called the Schultz Index
@@ -203,4 +218,9 @@ def gutman_index(G, weight=None):
            https://doi.org/10.1021/ci00021a009

     """
-    pass
+    if not nx.is_connected(G):
+        return float("inf")
+
+    spl = nx.shortest_path_length(G, weight=weight)
+    d = dict(G.degree, weight=weight)
+    return sum(dist * d[u] * d[v] for u, vinfo in spl for v, dist in vinfo.items()) / 2
diff --git a/networkx/classes/coreviews.py b/networkx/classes/coreviews.py
index d7c9ab0ce..f4b54c7b7 100644
--- a/networkx/classes/coreviews.py
+++ b/networkx/classes/coreviews.py
@@ -3,10 +3,20 @@ These ``Views`` often restrict element access, with either the entire view or
 layers of nested mappings being read-only.
 """
 from collections.abc import Mapping
-__all__ = ['AtlasView', 'AdjacencyView', 'MultiAdjacencyView', 'UnionAtlas',
-    'UnionAdjacency', 'UnionMultiInner', 'UnionMultiAdjacency',
-    'FilterAtlas', 'FilterAdjacency', 'FilterMultiInner',
-    'FilterMultiAdjacency']
+
+__all__ = [
+    "AtlasView",
+    "AdjacencyView",
+    "MultiAdjacencyView",
+    "UnionAtlas",
+    "UnionAdjacency",
+    "UnionMultiInner",
+    "UnionMultiAdjacency",
+    "FilterAtlas",
+    "FilterAdjacency",
+    "FilterMultiInner",
+    "FilterMultiAdjacency",
+]


 class AtlasView(Mapping):
@@ -21,13 +31,14 @@ class AtlasView(Mapping):
     AdjacencyView: View into dict-of-dict-of-dict
     MultiAdjacencyView: View into dict-of-dict-of-dict-of-dict
     """
-    __slots__ = '_atlas',
+
+    __slots__ = ("_atlas",)

     def __getstate__(self):
-        return {'_atlas': self._atlas}
+        return {"_atlas": self._atlas}

     def __setstate__(self, state):
-        self._atlas = state['_atlas']
+        self._atlas = state["_atlas"]

     def __init__(self, d):
         self._atlas = d
@@ -41,11 +52,14 @@ class AtlasView(Mapping):
     def __getitem__(self, key):
         return self._atlas[key]

+    def copy(self):
+        return {n: self[n].copy() for n in self._atlas}
+
     def __str__(self):
-        return str(self._atlas)
+        return str(self._atlas)  # {nbr: self[nbr] for nbr in self})

     def __repr__(self):
-        return f'{self.__class__.__name__}({self._atlas!r})'
+        return f"{self.__class__.__name__}({self._atlas!r})"


 class AdjacencyView(AtlasView):
@@ -60,11 +74,15 @@ class AdjacencyView(AtlasView):
     AtlasView: View into dict-of-dict
     MultiAdjacencyView: View into dict-of-dict-of-dict-of-dict
     """
-    __slots__ = ()
+
+    __slots__ = ()  # Still uses AtlasView slots names _atlas

     def __getitem__(self, name):
         return AtlasView(self._atlas[name])

+    def copy(self):
+        return {n: self[n].copy() for n in self._atlas}
+

 class MultiAdjacencyView(AdjacencyView):
     """An MultiAdjacencyView is a Read-only Map of Maps of Maps of Maps.
@@ -78,11 +96,15 @@ class MultiAdjacencyView(AdjacencyView):
     AtlasView: View into dict-of-dict
     AdjacencyView: View into dict-of-dict-of-dict
     """
-    __slots__ = ()
+
+    __slots__ = ()  # Still uses AtlasView slots names _atlas

     def __getitem__(self, name):
         return AdjacencyView(self._atlas[name])

+    def copy(self):
+        return {n: self[n].copy() for n in self._atlas}
+

 class UnionAtlas(Mapping):
     """A read-only union of two atlases (dict-of-dict).
@@ -97,14 +119,15 @@ class UnionAtlas(Mapping):
     UnionAdjacency: View into dict-of-dict-of-dict
     UnionMultiAdjacency: View into dict-of-dict-of-dict-of-dict
     """
-    __slots__ = '_succ', '_pred'
+
+    __slots__ = ("_succ", "_pred")

     def __getstate__(self):
-        return {'_succ': self._succ, '_pred': self._pred}
+        return {"_succ": self._succ, "_pred": self._pred}

     def __setstate__(self, state):
-        self._succ = state['_succ']
-        self._pred = state['_pred']
+        self._succ = state["_succ"]
+        self._pred = state["_pred"]

     def __init__(self, succ, pred):
         self._succ = succ
@@ -122,11 +145,20 @@ class UnionAtlas(Mapping):
         except KeyError:
             return self._pred[key]

+    def copy(self):
+        result = {nbr: dd.copy() for nbr, dd in self._succ.items()}
+        for nbr, dd in self._pred.items():
+            if nbr in result:
+                result[nbr].update(dd)
+            else:
+                result[nbr] = dd.copy()
+        return result
+
     def __str__(self):
         return str({nbr: self[nbr] for nbr in self})

     def __repr__(self):
-        return f'{self.__class__.__name__}({self._succ!r}, {self._pred!r})'
+        return f"{self.__class__.__name__}({self._succ!r}, {self._pred!r})"


 class UnionAdjacency(Mapping):
@@ -146,22 +178,24 @@ class UnionAdjacency(Mapping):
     UnionAtlas: View into dict-of-dict
     UnionMultiAdjacency: View into dict-of-dict-of-dict-of-dict
     """
-    __slots__ = '_succ', '_pred'
+
+    __slots__ = ("_succ", "_pred")

     def __getstate__(self):
-        return {'_succ': self._succ, '_pred': self._pred}
+        return {"_succ": self._succ, "_pred": self._pred}

     def __setstate__(self, state):
-        self._succ = state['_succ']
-        self._pred = state['_pred']
+        self._succ = state["_succ"]
+        self._pred = state["_pred"]

     def __init__(self, succ, pred):
+        # keys must be the same for two input dicts
         assert len(set(succ.keys()) ^ set(pred.keys())) == 0
         self._succ = succ
         self._pred = pred

     def __len__(self):
-        return len(self._succ)
+        return len(self._succ)  # length of each dict should be the same

     def __iter__(self):
         return iter(self._succ)
@@ -169,11 +203,14 @@ class UnionAdjacency(Mapping):
     def __getitem__(self, nbr):
         return UnionAtlas(self._succ[nbr], self._pred[nbr])

+    def copy(self):
+        return {n: self[n].copy() for n in self._succ}
+
     def __str__(self):
         return str({nbr: self[nbr] for nbr in self})

     def __repr__(self):
-        return f'{self.__class__.__name__}({self._succ!r}, {self._pred!r})'
+        return f"{self.__class__.__name__}({self._succ!r}, {self._pred!r})"


 class UnionMultiInner(UnionAtlas):
@@ -190,7 +227,8 @@ class UnionMultiInner(UnionAtlas):
     UnionAdjacency:  View into dict-of-dict-of-dict
     UnionMultiAdjacency:  View into dict-of-dict-of-dict-of-dict
     """
-    __slots__ = ()
+
+    __slots__ = ()  # Still uses UnionAtlas slots names _succ, _pred

     def __getitem__(self, node):
         in_succ = node in self._succ
@@ -201,6 +239,10 @@ class UnionMultiInner(UnionAtlas):
             return UnionAtlas(self._succ[node], {})
         return UnionAtlas({}, self._pred[node])

+    def copy(self):
+        nodes = set(self._succ.keys()) | set(self._pred.keys())
+        return {n: self[n].copy() for n in nodes}
+

 class UnionMultiAdjacency(UnionAdjacency):
     """A read-only union of two dict MultiAdjacencies.
@@ -214,13 +256,14 @@ class UnionMultiAdjacency(UnionAdjacency):
     UnionAtlas:  View into dict-of-dict
     UnionMultiInner:  View into dict-of-dict-of-dict
     """
-    __slots__ = ()
+
+    __slots__ = ()  # Still uses UnionAdjacency slots names _succ, _pred

     def __getitem__(self, node):
         return UnionMultiInner(self._succ[node], self._pred[node])


-class FilterAtlas(Mapping):
+class FilterAtlas(Mapping):  # nodedict, nbrdict, keydict
     """A read-only Mapping of Mappings with filtering criteria for nodes.

     It is a view into a dict-of-dict data structure, and it selects only
@@ -241,7 +284,7 @@ class FilterAtlas(Mapping):
         return sum(1 for n in self)

     def __iter__(self):
-        try:
+        try:  # check that NODE_OK has attr 'nodes'
             node_ok_shorter = 2 * len(self.NODE_OK.nodes) < len(self._atlas)
         except AttributeError:
             node_ok_shorter = False
@@ -252,16 +295,16 @@ class FilterAtlas(Mapping):
     def __getitem__(self, key):
         if key in self._atlas and self.NODE_OK(key):
             return self._atlas[key]
-        raise KeyError(f'Key {key} not found')
+        raise KeyError(f"Key {key} not found")

     def __str__(self):
         return str({nbr: self[nbr] for nbr in self})

     def __repr__(self):
-        return f'{self.__class__.__name__}({self._atlas!r}, {self.NODE_OK!r})'
+        return f"{self.__class__.__name__}({self._atlas!r}, {self.NODE_OK!r})"


-class FilterAdjacency(Mapping):
+class FilterAdjacency(Mapping):  # edgedict
     """A read-only Mapping of Mappings with filtering criteria for nodes and edges.

     It is a view into a dict-of-dict-of-dict data structure, and it selects nodes
@@ -284,7 +327,7 @@ class FilterAdjacency(Mapping):
         return sum(1 for n in self)

     def __iter__(self):
-        try:
+        try:  # check that NODE_OK has attr 'nodes'
             node_ok_shorter = 2 * len(self.NODE_OK.nodes) < len(self._atlas)
         except AttributeError:
             node_ok_shorter = False
@@ -297,18 +340,19 @@ class FilterAdjacency(Mapping):

             def new_node_ok(nbr):
                 return self.NODE_OK(nbr) and self.EDGE_OK(node, nbr)
+
             return FilterAtlas(self._atlas[node], new_node_ok)
-        raise KeyError(f'Key {node} not found')
+        raise KeyError(f"Key {node} not found")

     def __str__(self):
         return str({nbr: self[nbr] for nbr in self})

     def __repr__(self):
         name = self.__class__.__name__
-        return f'{name}({self._atlas!r}, {self.NODE_OK!r}, {self.EDGE_OK!r})'
+        return f"{name}({self._atlas!r}, {self.NODE_OK!r}, {self.EDGE_OK!r})"


-class FilterMultiInner(FilterAdjacency):
+class FilterMultiInner(FilterAdjacency):  # muliedge_seconddict
     """A read-only Mapping of Mappings with filtering criteria for nodes and edges.

     It is a view into a dict-of-dict-of-dict-of-dict data structure, and it selects nodes
@@ -322,7 +366,7 @@ class FilterMultiInner(FilterAdjacency):
     """

     def __iter__(self):
-        try:
+        try:  # check that NODE_OK has attr 'nodes'
             node_ok_shorter = 2 * len(self.NODE_OK.nodes) < len(self._atlas)
         except AttributeError:
             node_ok_shorter = False
@@ -344,11 +388,12 @@ class FilterMultiInner(FilterAdjacency):

             def new_node_ok(key):
                 return self.EDGE_OK(nbr, key)
+
             return FilterAtlas(self._atlas[nbr], new_node_ok)
-        raise KeyError(f'Key {nbr} not found')
+        raise KeyError(f"Key {nbr} not found")


-class FilterMultiAdjacency(FilterAdjacency):
+class FilterMultiAdjacency(FilterAdjacency):  # multiedgedict
     """A read-only Mapping of Mappings with filtering criteria
     for nodes and edges.

@@ -368,5 +413,6 @@ class FilterMultiAdjacency(FilterAdjacency):

             def edge_ok(nbr, key):
                 return self.NODE_OK(nbr) and self.EDGE_OK(node, nbr, key)
+
             return FilterMultiInner(self._atlas[node], self.NODE_OK, edge_ok)
-        raise KeyError(f'Key {node} not found')
+        raise KeyError(f"Key {node} not found")
diff --git a/networkx/classes/digraph.py b/networkx/classes/digraph.py
index 923dbb853..fc2374a2b 100644
--- a/networkx/classes/digraph.py
+++ b/networkx/classes/digraph.py
@@ -1,13 +1,21 @@
 """Base class for directed graphs."""
 from copy import deepcopy
 from functools import cached_property
+
 import networkx as nx
 from networkx import convert
 from networkx.classes.coreviews import AdjacencyView
 from networkx.classes.graph import Graph
-from networkx.classes.reportviews import DiDegreeView, InDegreeView, InEdgeView, OutDegreeView, OutEdgeView
+from networkx.classes.reportviews import (
+    DiDegreeView,
+    InDegreeView,
+    InEdgeView,
+    OutDegreeView,
+    OutEdgeView,
+)
 from networkx.exception import NetworkXError
-__all__ = ['DiGraph']
+
+__all__ = ["DiGraph"]


 class _CachedPropertyResetterAdjAndSucc:
@@ -28,12 +36,13 @@ class _CachedPropertyResetterAdjAndSucc:

     def __set__(self, obj, value):
         od = obj.__dict__
-        od['_adj'] = value
-        od['_succ'] = value
-        if 'adj' in od:
-            del od['adj']
-        if 'succ' in od:
-            del od['succ']
+        od["_adj"] = value
+        od["_succ"] = value
+        # reset cached properties
+        if "adj" in od:
+            del od["adj"]
+        if "succ" in od:
+            del od["succ"]


 class _CachedPropertyResetterPred:
@@ -53,9 +62,9 @@ class _CachedPropertyResetterPred:

     def __set__(self, obj, value):
         od = obj.__dict__
-        od['_pred'] = value
-        if 'pred' in od:
-            del od['pred']
+        od["_pred"] = value
+        if "pred" in od:
+            del od["pred"]


 class DiGraph(Graph):
@@ -299,8 +308,9 @@ class DiGraph(Graph):
     >>> G[2][1] is G[2][2]
     True
     """
-    _adj = _CachedPropertyResetterAdjAndSucc()
-    _succ = _adj
+
+    _adj = _CachedPropertyResetterAdjAndSucc()  # type: ignore[assignment]
+    _succ = _adj  # type: ignore[has-type]
     _pred = _CachedPropertyResetterPred()

     def __init__(self, incoming_graph_data=None, **attr):
@@ -336,13 +346,20 @@ class DiGraph(Graph):
         {'day': 'Friday'}

         """
-        self.graph = self.graph_attr_dict_factory()
-        self._node = self.node_dict_factory()
-        self._adj = self.adjlist_outer_dict_factory()
-        self._pred = self.adjlist_outer_dict_factory()
+        self.graph = self.graph_attr_dict_factory()  # dictionary for graph attributes
+        self._node = self.node_dict_factory()  # dictionary for node attr
+        # We store two adjacency lists:
+        # the predecessors of node n are stored in the dict self._pred
+        # the successors of node n are stored in the dict self._succ=self._adj
+        self._adj = self.adjlist_outer_dict_factory()  # empty adjacency dict successor
+        self._pred = self.adjlist_outer_dict_factory()  # predecessor
+        # Note: self._succ = self._adj  # successor
+
         self.__networkx_cache__ = {}
+        # attempt to load graph with data
         if incoming_graph_data is not None:
             convert.to_networkx_graph(incoming_graph_data, create_using=self)
+        # load graph attributes (must be after convert)
         self.graph.update(attr)

     @cached_property
@@ -362,7 +379,7 @@ class DiGraph(Graph):

         For directed graphs, `G.adj` holds outgoing (successor) info.
         """
-        pass
+        return AdjacencyView(self._succ)

     @cached_property
     def succ(self):
@@ -383,7 +400,7 @@ class DiGraph(Graph):

         For directed graphs, `G.adj` is identical to `G.succ`.
         """
-        pass
+        return AdjacencyView(self._succ)

     @cached_property
     def pred(self):
@@ -399,7 +416,7 @@ class DiGraph(Graph):
         by dicts also exists: `for nbr, foovalue in G.pred[node].data('foo'):`
         A default can be set via a `default` argument to the `data` method.
         """
-        pass
+        return AdjacencyView(self._pred)

     def add_node(self, node_for_adding, **attr):
         """Add a single node `node_for_adding` and update node attributes.
@@ -440,7 +457,16 @@ class DiGraph(Graph):
         NetworkX Graphs, though one should be careful that the hash
         doesn't change on mutables.
         """
-        pass
+        if node_for_adding not in self._succ:
+            if node_for_adding is None:
+                raise ValueError("None cannot be a node")
+            self._succ[node_for_adding] = self.adjlist_inner_dict_factory()
+            self._pred[node_for_adding] = self.adjlist_inner_dict_factory()
+            attr_dict = self._node[node_for_adding] = self.node_attr_dict_factory()
+            attr_dict.update(attr)
+        else:  # update attr even if node already exists
+            self._node[node_for_adding].update(attr)
+        nx._clear_cache(self)

     def add_nodes_from(self, nodes_for_adding, **attr):
         """Add multiple nodes.
@@ -503,7 +529,23 @@ class DiGraph(Graph):
         >>> # correct way
         >>> G.add_nodes_from(list(n + 1 for n in G.nodes))
         """
-        pass
+        for n in nodes_for_adding:
+            try:
+                newnode = n not in self._node
+                newdict = attr
+            except TypeError:
+                n, ndict = n
+                newnode = n not in self._node
+                newdict = attr.copy()
+                newdict.update(ndict)
+            if newnode:
+                if n is None:
+                    raise ValueError("None cannot be a node")
+                self._succ[n] = self.adjlist_inner_dict_factory()
+                self._pred[n] = self.adjlist_inner_dict_factory()
+                self._node[n] = self.node_attr_dict_factory()
+            self._node[n].update(newdict)
+        nx._clear_cache(self)

     def remove_node(self, n):
         """Remove node n.
@@ -535,7 +577,18 @@ class DiGraph(Graph):
         []

         """
-        pass
+        try:
+            nbrs = self._succ[n]
+            del self._node[n]
+        except KeyError as err:  # NetworkXError if n not in self
+            raise NetworkXError(f"The node {n} is not in the digraph.") from err
+        for u in nbrs:
+            del self._pred[u][n]  # remove all edges n-u in digraph
+        del self._succ[n]  # remove node from succ
+        for u in self._pred[n]:
+            del self._succ[u][n]  # remove all edges n-u in digraph
+        del self._pred[n]  # remove node from pred
+        nx._clear_cache(self)

     def remove_nodes_from(self, nodes):
         """Remove multiple nodes.
@@ -578,7 +631,19 @@ class DiGraph(Graph):
         >>> # this command will work, since the dictionary underlying graph is not modified
         >>> G.remove_nodes_from(list(n for n in G.nodes if n < 2))
         """
-        pass
+        for n in nodes:
+            try:
+                succs = self._succ[n]
+                del self._node[n]
+                for u in succs:
+                    del self._pred[u][n]  # remove all edges n-u in digraph
+                del self._succ[n]  # now remove node
+                for u in self._pred[n]:
+                    del self._succ[u][n]  # remove all edges n-u in digraph
+                del self._pred[n]  # now remove node
+            except KeyError:
+                pass  # silent failure on remove
+        nx._clear_cache(self)

     def add_edge(self, u_of_edge, v_of_edge, **attr):
         """Add an edge between u and v.
@@ -630,7 +695,26 @@ class DiGraph(Graph):
         >>> G[1][2].update({0: 5})
         >>> G.edges[1, 2].update({0: 5})
         """
-        pass
+        u, v = u_of_edge, v_of_edge
+        # add nodes
+        if u not in self._succ:
+            if u is None:
+                raise ValueError("None cannot be a node")
+            self._succ[u] = self.adjlist_inner_dict_factory()
+            self._pred[u] = self.adjlist_inner_dict_factory()
+            self._node[u] = self.node_attr_dict_factory()
+        if v not in self._succ:
+            if v is None:
+                raise ValueError("None cannot be a node")
+            self._succ[v] = self.adjlist_inner_dict_factory()
+            self._pred[v] = self.adjlist_inner_dict_factory()
+            self._node[v] = self.node_attr_dict_factory()
+        # add the edge
+        datadict = self._adj[u].get(v, self.edge_attr_dict_factory())
+        datadict.update(attr)
+        self._succ[u][v] = datadict
+        self._pred[v][u] = datadict
+        nx._clear_cache(self)

     def add_edges_from(self, ebunch_to_add, **attr):
         """Add all the edges in ebunch_to_add.
@@ -687,7 +771,33 @@ class DiGraph(Graph):
         >>> # right way - note that there will be no self-edge for node 5
         >>> G.add_edges_from(list((5, n) for n in G.nodes))
         """
-        pass
+        for e in ebunch_to_add:
+            ne = len(e)
+            if ne == 3:
+                u, v, dd = e
+            elif ne == 2:
+                u, v = e
+                dd = {}
+            else:
+                raise NetworkXError(f"Edge tuple {e} must be a 2-tuple or 3-tuple.")
+            if u not in self._succ:
+                if u is None:
+                    raise ValueError("None cannot be a node")
+                self._succ[u] = self.adjlist_inner_dict_factory()
+                self._pred[u] = self.adjlist_inner_dict_factory()
+                self._node[u] = self.node_attr_dict_factory()
+            if v not in self._succ:
+                if v is None:
+                    raise ValueError("None cannot be a node")
+                self._succ[v] = self.adjlist_inner_dict_factory()
+                self._pred[v] = self.adjlist_inner_dict_factory()
+                self._node[v] = self.node_attr_dict_factory()
+            datadict = self._adj[u].get(v, self.edge_attr_dict_factory())
+            datadict.update(attr)
+            datadict.update(dd)
+            self._succ[u][v] = datadict
+            self._pred[v][u] = datadict
+        nx._clear_cache(self)

     def remove_edge(self, u, v):
         """Remove the edge between u and v.
@@ -716,7 +826,12 @@ class DiGraph(Graph):
         >>> e = (2, 3, {"weight": 7})  # an edge with attribute data
         >>> G.remove_edge(*e[:2])  # select first part of edge tuple
         """
-        pass
+        try:
+            del self._succ[u][v]
+            del self._pred[v][u]
+        except KeyError as err:
+            raise NetworkXError(f"The edge {u}-{v} not in graph.") from err
+        nx._clear_cache(self)

     def remove_edges_from(self, ebunch):
         """Remove all edges specified in ebunch.
@@ -744,21 +859,26 @@ class DiGraph(Graph):
         >>> ebunch = [(1, 2), (2, 3)]
         >>> G.remove_edges_from(ebunch)
         """
-        pass
+        for e in ebunch:
+            u, v = e[:2]  # ignore edge data
+            if u in self._succ and v in self._succ[u]:
+                del self._succ[u][v]
+                del self._pred[v][u]
+        nx._clear_cache(self)

     def has_successor(self, u, v):
         """Returns True if node u has successor v.

         This is true if graph has the edge u->v.
         """
-        pass
+        return u in self._succ and v in self._succ[u]

     def has_predecessor(self, u, v):
         """Returns True if node u has predecessor v.

         This is true if graph has the edge u<-v.
         """
-        pass
+        return u in self._pred and v in self._pred[u]

     def successors(self, n):
         """Returns an iterator over successor nodes of n.
@@ -784,7 +904,12 @@ class DiGraph(Graph):
         -----
         neighbors() and successors() are the same.
         """
-        pass
+        try:
+            return iter(self._succ[n])
+        except KeyError as err:
+            raise NetworkXError(f"The node {n} is not in the digraph.") from err
+
+    # digraph definitions
     neighbors = successors

     def predecessors(self, n):
@@ -807,7 +932,10 @@ class DiGraph(Graph):
         --------
         successors
         """
-        pass
+        try:
+            return iter(self._pred[n])
+        except KeyError as err:
+            raise NetworkXError(f"The node {n} is not in the digraph.") from err

     @cached_property
     def edges(self):
@@ -870,7 +998,13 @@ class DiGraph(Graph):
         OutEdgeDataView([(0, 1)])

         """
-        pass
+        return OutEdgeView(self)
+
+    # alias out_edges to edges
+    @cached_property
+    def out_edges(self):
+        return OutEdgeView(self)
+
     out_edges.__doc__ = edges.__doc__

     @cached_property
@@ -911,7 +1045,7 @@ class DiGraph(Graph):
         --------
         edges
         """
-        pass
+        return InEdgeView(self)

     @cached_property
     def degree(self):
@@ -955,7 +1089,7 @@ class DiGraph(Graph):
         [(0, 1), (1, 2), (2, 2)]

         """
-        pass
+        return DiDegreeView(self)

     @cached_property
     def in_degree(self):
@@ -1002,7 +1136,7 @@ class DiGraph(Graph):
         [(0, 0), (1, 1), (2, 1)]

         """
-        pass
+        return InDegreeView(self)

     @cached_property
     def out_degree(self):
@@ -1049,7 +1183,7 @@ class DiGraph(Graph):
         [(0, 1), (1, 1), (2, 1)]

         """
-        pass
+        return OutDegreeView(self)

     def clear(self):
         """Remove all nodes and edges from the graph.
@@ -1066,7 +1200,11 @@ class DiGraph(Graph):
         []

         """
-        pass
+        self._succ.clear()
+        self._pred.clear()
+        self._node.clear()
+        self.graph.clear()
+        nx._clear_cache(self)

     def clear_edges(self):
         """Remove all edges from the graph without altering nodes.
@@ -1081,15 +1219,19 @@ class DiGraph(Graph):
         []

         """
-        pass
+        for predecessor_dict in self._pred.values():
+            predecessor_dict.clear()
+        for successor_dict in self._succ.values():
+            successor_dict.clear()
+        nx._clear_cache(self)

     def is_multigraph(self):
         """Returns True if graph is a multigraph, False otherwise."""
-        pass
+        return False

     def is_directed(self):
         """Returns True if graph is directed, False otherwise."""
-        pass
+        return True

     def to_undirected(self, reciprocal=False, as_view=False):
         """Returns an undirected representation of the digraph.
@@ -1148,7 +1290,27 @@ class DiGraph(Graph):
         >>> list(G2.edges)
         [(0, 1)]
         """
-        pass
+        graph_class = self.to_undirected_class()
+        if as_view is True:
+            return nx.graphviews.generic_graph_view(self, graph_class)
+        # deepcopy when not a view
+        G = graph_class()
+        G.graph.update(deepcopy(self.graph))
+        G.add_nodes_from((n, deepcopy(d)) for n, d in self._node.items())
+        if reciprocal is True:
+            G.add_edges_from(
+                (u, v, deepcopy(d))
+                for u, nbrs in self._adj.items()
+                for v, d in nbrs.items()
+                if v in self._pred[u]
+            )
+        else:
+            G.add_edges_from(
+                (u, v, deepcopy(d))
+                for u, nbrs in self._adj.items()
+                for v, d in nbrs.items()
+            )
+        return G

     def reverse(self, copy=True):
         """Returns the reverse of the graph.
@@ -1163,4 +1325,10 @@ class DiGraph(Graph):
             If False, the reverse graph is created using a view of
             the original graph.
         """
-        pass
+        if copy:
+            H = self.__class__()
+            H.graph.update(deepcopy(self.graph))
+            H.add_nodes_from((n, deepcopy(d)) for n, d in self.nodes.items())
+            H.add_edges_from((v, u, deepcopy(d)) for u, v, d in self.edges(data=True))
+            return H
+        return nx.reverse_view(self)
diff --git a/networkx/classes/filters.py b/networkx/classes/filters.py
index c012402dc..215aa375d 100644
--- a/networkx/classes/filters.py
+++ b/networkx/classes/filters.py
@@ -2,41 +2,57 @@

 These filters return the function used when creating `SubGraph`.
 """
-__all__ = ['no_filter', 'hide_nodes', 'hide_edges', 'hide_multiedges',
-    'hide_diedges', 'hide_multidiedges', 'show_nodes', 'show_edges',
-    'show_multiedges', 'show_diedges', 'show_multidiedges']
+__all__ = [
+    "no_filter",
+    "hide_nodes",
+    "hide_edges",
+    "hide_multiedges",
+    "hide_diedges",
+    "hide_multidiedges",
+    "show_nodes",
+    "show_edges",
+    "show_multiedges",
+    "show_diedges",
+    "show_multidiedges",
+]


 def no_filter(*items):
     """Returns a filter function that always evaluates to True."""
-    pass
+    return True


 def hide_nodes(nodes):
     """Returns a filter function that hides specific nodes."""
-    pass
+    nodes = set(nodes)
+    return lambda node: node not in nodes


 def hide_diedges(edges):
     """Returns a filter function that hides specific directed edges."""
-    pass
+    edges = {(u, v) for u, v in edges}
+    return lambda u, v: (u, v) not in edges


 def hide_edges(edges):
     """Returns a filter function that hides specific undirected edges."""
-    pass
+    alledges = set(edges) | {(v, u) for (u, v) in edges}
+    return lambda u, v: (u, v) not in alledges


 def hide_multidiedges(edges):
     """Returns a filter function that hides specific multi-directed edges."""
-    pass
+    edges = {(u, v, k) for u, v, k in edges}
+    return lambda u, v, k: (u, v, k) not in edges


 def hide_multiedges(edges):
     """Returns a filter function that hides specific multi-undirected edges."""
-    pass
+    alledges = set(edges) | {(v, u, k) for (u, v, k) in edges}
+    return lambda u, v, k: (u, v, k) not in alledges


+# write show_nodes as a class to make SubGraph pickleable
 class show_nodes:
     """Filter class to show specific nodes."""

@@ -49,19 +65,23 @@ class show_nodes:

 def show_diedges(edges):
     """Returns a filter function that shows specific directed edges."""
-    pass
+    edges = {(u, v) for u, v in edges}
+    return lambda u, v: (u, v) in edges


 def show_edges(edges):
     """Returns a filter function that shows specific undirected edges."""
-    pass
+    alledges = set(edges) | {(v, u) for (u, v) in edges}
+    return lambda u, v: (u, v) in alledges


 def show_multidiedges(edges):
     """Returns a filter function that shows specific multi-directed edges."""
-    pass
+    edges = {(u, v, k) for u, v, k in edges}
+    return lambda u, v, k: (u, v, k) in edges


 def show_multiedges(edges):
     """Returns a filter function that shows specific multi-undirected edges."""
-    pass
+    alledges = set(edges) | {(v, u, k) for (u, v, k) in edges}
+    return lambda u, v, k: (u, v, k) in alledges
diff --git a/networkx/classes/function.py b/networkx/classes/function.py
index e7c4ca267..f87b7897f 100644
--- a/networkx/classes/function.py
+++ b/networkx/classes/function.py
@@ -1,18 +1,51 @@
 """Functional interface to graph methods and assorted utilities.
 """
+
 from collections import Counter
 from itertools import chain
+
 import networkx as nx
 from networkx.utils import not_implemented_for, pairwise
-__all__ = ['nodes', 'edges', 'degree', 'degree_histogram', 'neighbors',
-    'number_of_nodes', 'number_of_edges', 'density', 'is_directed',
-    'freeze', 'is_frozen', 'subgraph', 'induced_subgraph', 'edge_subgraph',
-    'restricted_view', 'to_directed', 'to_undirected', 'add_star',
-    'add_path', 'add_cycle', 'create_empty_copy', 'set_node_attributes',
-    'get_node_attributes', 'set_edge_attributes', 'get_edge_attributes',
-    'all_neighbors', 'non_neighbors', 'non_edges', 'common_neighbors',
-    'is_weighted', 'is_negatively_weighted', 'is_empty', 'selfloop_edges',
-    'nodes_with_selfloops', 'number_of_selfloops', 'path_weight', 'is_path']
+
+__all__ = [
+    "nodes",
+    "edges",
+    "degree",
+    "degree_histogram",
+    "neighbors",
+    "number_of_nodes",
+    "number_of_edges",
+    "density",
+    "is_directed",
+    "freeze",
+    "is_frozen",
+    "subgraph",
+    "induced_subgraph",
+    "edge_subgraph",
+    "restricted_view",
+    "to_directed",
+    "to_undirected",
+    "add_star",
+    "add_path",
+    "add_cycle",
+    "create_empty_copy",
+    "set_node_attributes",
+    "get_node_attributes",
+    "set_edge_attributes",
+    "get_edge_attributes",
+    "all_neighbors",
+    "non_neighbors",
+    "non_edges",
+    "common_neighbors",
+    "is_weighted",
+    "is_negatively_weighted",
+    "is_empty",
+    "selfloop_edges",
+    "nodes_with_selfloops",
+    "number_of_selfloops",
+    "path_weight",
+    "is_path",
+]


 def nodes(G):
@@ -20,7 +53,7 @@ def nodes(G):

     This function wraps the :func:`G.nodes <networkx.Graph.nodes>` property.
     """
-    pass
+    return G.nodes()


 def edges(G, nbunch=None):
@@ -32,7 +65,7 @@ def edges(G, nbunch=None):

     This function wraps the :func:`G.edges <networkx.Graph.edges>` property.
     """
-    pass
+    return G.edges(nbunch)


 def degree(G, nbunch=None, weight=None):
@@ -41,7 +74,7 @@ def degree(G, nbunch=None, weight=None):

     This function wraps the :func:`G.degree <networkx.Graph.degree>` property.
     """
-    pass
+    return G.degree(nbunch, weight)


 def neighbors(G, n):
@@ -49,7 +82,7 @@ def neighbors(G, n):

     This function wraps the :func:`G.neighbors <networkx.Graph.neighbors>` function.
     """
-    pass
+    return G.neighbors(n)


 def number_of_nodes(G):
@@ -57,7 +90,7 @@ def number_of_nodes(G):

     This function wraps the :func:`G.number_of_nodes <networkx.Graph.number_of_nodes>` function.
     """
-    pass
+    return G.number_of_nodes()


 def number_of_edges(G):
@@ -65,23 +98,23 @@ def number_of_edges(G):

     This function wraps the :func:`G.number_of_edges <networkx.Graph.number_of_edges>` function.
     """
-    pass
+    return G.number_of_edges()


 def density(G):
-    """Returns the density of a graph.
+    r"""Returns the density of a graph.

     The density for undirected graphs is

     .. math::

-       d = \\frac{2m}{n(n-1)},
+       d = \frac{2m}{n(n-1)},

     and for directed graphs is

     .. math::

-       d = \\frac{m}{n(n-1)},
+       d = \frac{m}{n(n-1)},

     where `n` is the number of nodes and `m`  is the number of edges in `G`.

@@ -93,7 +126,14 @@ def density(G):
     Self loops are counted in the total number of edges so graphs with self
     loops can have density higher than 1.
     """
-    pass
+    n = number_of_nodes(G)
+    m = number_of_edges(G)
+    if m == 0 or n <= 1:
+        return 0
+    d = m / (n * (n - 1))
+    if not G.is_directed():
+        d *= 2
+    return d


 def degree_histogram(G):
@@ -115,17 +155,18 @@ def degree_histogram(G):
     Note: the bins are width one, hence len(list) can be large
     (Order(number_of_edges))
     """
-    pass
+    counts = Counter(d for n, d in G.degree())
+    return [counts.get(i, 0) for i in range(max(counts) + 1 if counts else 0)]


 def is_directed(G):
     """Return True if graph is directed."""
-    pass
+    return G.is_directed()


 def frozen(*args, **kwargs):
     """Dummy method for raising errors when trying to modify frozen graphs"""
-    pass
+    raise nx.NetworkXError("Frozen graph can't be modified")


 def freeze(G):
@@ -163,7 +204,19 @@ def freeze(G):
     --------
     is_frozen
     """
-    pass
+    G.add_node = frozen
+    G.add_nodes_from = frozen
+    G.remove_node = frozen
+    G.remove_nodes_from = frozen
+    G.add_edge = frozen
+    G.add_edges_from = frozen
+    G.add_weighted_edges_from = frozen
+    G.remove_edge = frozen
+    G.remove_edges_from = frozen
+    G.clear = frozen
+    G.clear_edges = frozen
+    G.frozen = True
+    return G


 def is_frozen(G):
@@ -178,7 +231,10 @@ def is_frozen(G):
     --------
     freeze
     """
-    pass
+    try:
+        return G.frozen
+    except AttributeError:
+        return False


 def add_star(G_to_add_to, nodes_for_star, **attr):
@@ -206,7 +262,14 @@ def add_star(G_to_add_to, nodes_for_star, **attr):
     >>> nx.add_star(G, [0, 1, 2, 3])
     >>> nx.add_star(G, [10, 11, 12], weight=2)
     """
-    pass
+    nlist = iter(nodes_for_star)
+    try:
+        v = next(nlist)
+    except StopIteration:
+        return
+    G_to_add_to.add_node(v)
+    edges = ((v, n) for n in nlist)
+    G_to_add_to.add_edges_from(edges, **attr)


 def add_path(G_to_add_to, nodes_for_path, **attr):
@@ -232,7 +295,13 @@ def add_path(G_to_add_to, nodes_for_path, **attr):
     >>> nx.add_path(G, [0, 1, 2, 3])
     >>> nx.add_path(G, [10, 11, 12], weight=7)
     """
-    pass
+    nlist = iter(nodes_for_path)
+    try:
+        first_node = next(nlist)
+    except StopIteration:
+        return
+    G_to_add_to.add_node(first_node)
+    G_to_add_to.add_edges_from(pairwise(chain((first_node,), nlist)), **attr)


 def add_cycle(G_to_add_to, nodes_for_cycle, **attr):
@@ -258,7 +327,15 @@ def add_cycle(G_to_add_to, nodes_for_cycle, **attr):
     >>> nx.add_cycle(G, [0, 1, 2, 3])
     >>> nx.add_cycle(G, [10, 11, 12], weight=7)
     """
-    pass
+    nlist = iter(nodes_for_cycle)
+    try:
+        first_node = next(nlist)
+    except StopIteration:
+        return
+    G_to_add_to.add_node(first_node)
+    G_to_add_to.add_edges_from(
+        pairwise(chain((first_node,), nlist), cyclic=True), **attr
+    )


 def subgraph(G, nbunch):
@@ -281,7 +358,7 @@ def subgraph(G, nbunch):
     -----
     subgraph(G) calls G.subgraph()
     """
-    pass
+    return G.subgraph(nbunch)


 def induced_subgraph(G, nbunch):
@@ -326,7 +403,8 @@ def induced_subgraph(G, nbunch):
     >>> list(H.nodes)
     [0, 1, 3]
     """
-    pass
+    induced_nodes = nx.filters.show_nodes(G.nbunch_iter(nbunch))
+    return nx.subgraph_view(G, filter_node=induced_nodes)


 def edge_subgraph(G, edges):
@@ -369,7 +447,23 @@ def edge_subgraph(G, edges):
     >>> list(H.edges)
     [(0, 1), (3, 4)]
     """
-    pass
+    nxf = nx.filters
+    edges = set(edges)
+    nodes = set()
+    for e in edges:
+        nodes.update(e[:2])
+    induced_nodes = nxf.show_nodes(nodes)
+    if G.is_multigraph():
+        if G.is_directed():
+            induced_edges = nxf.show_multidiedges(edges)
+        else:
+            induced_edges = nxf.show_multiedges(edges)
+    else:
+        if G.is_directed():
+            induced_edges = nxf.show_diedges(edges)
+        else:
+            induced_edges = nxf.show_edges(edges)
+    return nx.subgraph_view(G, filter_node=induced_nodes, filter_edge=induced_edges)


 def restricted_view(G, nodes, edges):
@@ -413,7 +507,19 @@ def restricted_view(G, nodes, edges):
     >>> list(H.edges)
     [(2, 3)]
     """
-    pass
+    nxf = nx.filters
+    hide_nodes = nxf.hide_nodes(nodes)
+    if G.is_multigraph():
+        if G.is_directed():
+            hide_edges = nxf.hide_multidiedges(edges)
+        else:
+            hide_edges = nxf.hide_multiedges(edges)
+    else:
+        if G.is_directed():
+            hide_edges = nxf.hide_diedges(edges)
+        else:
+            hide_edges = nxf.hide_edges(edges)
+    return nx.subgraph_view(G, filter_node=hide_nodes, filter_edge=hide_edges)


 def to_directed(graph):
@@ -423,7 +529,7 @@ def to_directed(graph):
     Note that graph.to_directed defaults to `as_view=False`
     while this function always provides a view.
     """
-    pass
+    return graph.to_directed(as_view=True)


 def to_undirected(graph):
@@ -433,7 +539,7 @@ def to_undirected(graph):
     Note that graph.to_undirected defaults to `as_view=False`
     while this function always provides a view.
     """
-    pass
+    return graph.to_undirected(as_view=True)


 def create_empty_copy(G, with_data=True):
@@ -452,7 +558,11 @@ def create_empty_copy(G, with_data=True):
     empty_graph

     """
-    pass
+    H = G.__class__()
+    H.add_nodes_from(G.nodes(data=with_data))
+    if with_data:
+        H.graph.update(G.graph)
+    return H


 def set_node_attributes(G, values, name=None):
@@ -536,7 +646,24 @@ def set_node_attributes(G, values, name=None):
         False

     """
-    pass
+    # Set node attributes based on type of `values`
+    if name is not None:  # `values` must not be a dict of dict
+        try:  # `values` is a dict
+            for n, v in values.items():
+                try:
+                    G.nodes[n][name] = values[n]
+                except KeyError:
+                    pass
+        except AttributeError:  # `values` is a constant
+            for n in G:
+                G.nodes[n][name] = values
+    else:  # `values` must be dict of dict
+        for n, d in values.items():
+            try:
+                G.nodes[n].update(d)
+            except KeyError:
+                pass
+    nx._clear_cache(G)


 def get_node_attributes(G, name, default=None):
@@ -570,7 +697,9 @@ def get_node_attributes(G, name, default=None):
     >>> color[4]
     'yellow'
     """
-    pass
+    if default is not None:
+        return {n: d.get(name, default) for n, d in G.nodes.items()}
+    return {n: d[name] for n, d in G.nodes.items() if name in d}


 def set_edge_attributes(G, values, name=None):
@@ -674,7 +803,41 @@ def set_edge_attributes(G, values, name=None):
         {(0, 1): 7}

     """
-    pass
+    if name is not None:
+        # `values` does not contain attribute names
+        try:
+            # if `values` is a dict using `.items()` => {edge: value}
+            if G.is_multigraph():
+                for (u, v, key), value in values.items():
+                    try:
+                        G._adj[u][v][key][name] = value
+                    except KeyError:
+                        pass
+            else:
+                for (u, v), value in values.items():
+                    try:
+                        G._adj[u][v][name] = value
+                    except KeyError:
+                        pass
+        except AttributeError:
+            # treat `values` as a constant
+            for u, v, data in G.edges(data=True):
+                data[name] = values
+    else:
+        # `values` consists of doct-of-dict {edge: {attr: value}} shape
+        if G.is_multigraph():
+            for (u, v, key), d in values.items():
+                try:
+                    G._adj[u][v][key].update(d)
+                except KeyError:
+                    pass
+        else:
+            for (u, v), d in values.items():
+                try:
+                    G._adj[u][v].update(d)
+                except KeyError:
+                    pass
+    nx._clear_cache(G)


 def get_edge_attributes(G, name, default=None):
@@ -710,7 +873,13 @@ def get_edge_attributes(G, name, default=None):
     >>> color[(3, 4)]
     'yellow'
     """
-    pass
+    if G.is_multigraph():
+        edges = G.edges(keys=True, data=True)
+    else:
+        edges = G.edges(data=True)
+    if default is not None:
+        return {x[:-1]: x[-1].get(name, default) for x in edges}
+    return {x[:-1]: x[-1][name] for x in edges if name in x[-1]}


 def all_neighbors(graph, node):
@@ -731,7 +900,11 @@ def all_neighbors(graph, node):
     neighbors : iterator
         Iterator of neighbors
     """
-    pass
+    if graph.is_directed():
+        values = chain(graph.predecessors(node), graph.successors(node))
+    else:
+        values = graph.neighbors(node)
+    return values


 def non_neighbors(graph, node):
@@ -750,7 +923,7 @@ def non_neighbors(graph, node):
     non_neighbors : set
         Set of nodes in the graph that are not neighbors of the node.
     """
-    pass
+    return graph._adj.keys() - graph._adj[node].keys() - {node}


 def non_edges(graph):
@@ -766,10 +939,19 @@ def non_edges(graph):
     non_edges : iterator
         Iterator of edges that are not in the graph.
     """
-    pass
-
-
-@not_implemented_for('directed')
+    if graph.is_directed():
+        for u in graph:
+            for v in non_neighbors(graph, u):
+                yield (u, v)
+    else:
+        nodes = set(graph)
+        while nodes:
+            u = nodes.pop()
+            for v in nodes - set(graph[u]):
+                yield (u, v)
+
+
+@not_implemented_for("directed")
 def common_neighbors(G, u, v):
     """Returns the common neighbors of two nodes in a graph.

@@ -797,10 +979,15 @@ def common_neighbors(G, u, v):
     >>> sorted(nx.common_neighbors(G, 0, 1))
     [2, 3, 4]
     """
-    pass
+    if u not in G:
+        raise nx.NetworkXError("u is not in the graph.")
+    if v not in G:
+        raise nx.NetworkXError("v is not in the graph.")

+    return G._adj[u].keys() & G._adj[v].keys() - {u, v}

-def is_weighted(G, edge=None, weight='weight'):
+
+def is_weighted(G, edge=None, weight="weight"):
     """Returns True if `G` has weighted edges.

     Parameters
@@ -839,11 +1026,22 @@ def is_weighted(G, edge=None, weight='weight'):
     True

     """
-    pass
+    if edge is not None:
+        data = G.get_edge_data(*edge)
+        if data is None:
+            msg = f"Edge {edge!r} does not exist."
+            raise nx.NetworkXError(msg)
+        return weight in data
+
+    if is_empty(G):
+        # Special handling required since: all([]) == True
+        return False

+    return all(weight in data for u, v, data in G.edges(data=True))

-@nx._dispatchable(edge_attrs='weight')
-def is_negatively_weighted(G, edge=None, weight='weight'):
+
+@nx._dispatchable(edge_attrs="weight")
+def is_negatively_weighted(G, edge=None, weight="weight"):
     """Returns True if `G` has negatively weighted edges.

     Parameters
@@ -886,7 +1084,14 @@ def is_negatively_weighted(G, edge=None, weight='weight'):
     True

     """
-    pass
+    if edge is not None:
+        data = G.get_edge_data(*edge)
+        if data is None:
+            msg = f"Edge {edge!r} does not exist."
+            raise nx.NetworkXError(msg)
+        return weight in data and data[weight] < 0
+
+    return any(weight in data and data[weight] < 0 for u, v, data in G.edges(data=True))


 def is_empty(G):
@@ -909,7 +1114,7 @@ def is_empty(G):
     is the number of nodes in the graph.

     """
-    pass
+    return not any(G._adj.values())


 def nodes_with_selfloops(G):
@@ -936,7 +1141,7 @@ def nodes_with_selfloops(G):
     [1]

     """
-    pass
+    return (n for n, nbrs in G._adj.items() if n in nbrs)


 def selfloop_edges(G, data=False, keys=False, default=None):
@@ -981,7 +1186,64 @@ def selfloop_edges(G, data=False, keys=False, default=None):
     >>> list(nx.selfloop_edges(G, keys=True, data=True))
     [(1, 1, 0, {})]
     """
-    pass
+    if data is True:
+        if G.is_multigraph():
+            if keys is True:
+                return (
+                    (n, n, k, d)
+                    for n, nbrs in G._adj.items()
+                    if n in nbrs
+                    for k, d in nbrs[n].items()
+                )
+            else:
+                return (
+                    (n, n, d)
+                    for n, nbrs in G._adj.items()
+                    if n in nbrs
+                    for d in nbrs[n].values()
+                )
+        else:
+            return ((n, n, nbrs[n]) for n, nbrs in G._adj.items() if n in nbrs)
+    elif data is not False:
+        if G.is_multigraph():
+            if keys is True:
+                return (
+                    (n, n, k, d.get(data, default))
+                    for n, nbrs in G._adj.items()
+                    if n in nbrs
+                    for k, d in nbrs[n].items()
+                )
+            else:
+                return (
+                    (n, n, d.get(data, default))
+                    for n, nbrs in G._adj.items()
+                    if n in nbrs
+                    for d in nbrs[n].values()
+                )
+        else:
+            return (
+                (n, n, nbrs[n].get(data, default))
+                for n, nbrs in G._adj.items()
+                if n in nbrs
+            )
+    else:
+        if G.is_multigraph():
+            if keys is True:
+                return (
+                    (n, n, k)
+                    for n, nbrs in G._adj.items()
+                    if n in nbrs
+                    for k in nbrs[n]
+                )
+            else:
+                return (
+                    (n, n)
+                    for n, nbrs in G._adj.items()
+                    if n in nbrs
+                    for i in range(len(nbrs[n]))  # for easy edge removal (#4068)
+                )
+        else:
+            return ((n, n) for n, nbrs in G._adj.items() if n in nbrs)


 def number_of_selfloops(G):
@@ -1006,7 +1268,7 @@ def number_of_selfloops(G):
     >>> nx.number_of_selfloops(G)
     1
     """
-    pass
+    return sum(1 for _ in nx.selfloop_edges(G))


 def is_path(G, path):
@@ -1029,7 +1291,10 @@ def is_path(G, path):
         True if `path` is a valid path in `G`

     """
-    pass
+    try:
+        return all(nbr in G._adj[node] for node, nbr in nx.utils.pairwise(path))
+    except (KeyError, TypeError):
+        return False


 def path_weight(G, path, weight):
@@ -1057,4 +1322,14 @@ def path_weight(G, path, weight):
     NetworkXNoPath
         If the specified edge does not exist.
     """
-    pass
+    multigraph = G.is_multigraph()
+    cost = 0
+
+    if not nx.is_path(G, path):
+        raise nx.NetworkXNoPath("path does not exist")
+    for node, nbr in nx.utils.pairwise(path):
+        if multigraph:
+            cost += min(v[weight] for v in G._adj[node][nbr].values())
+        else:
+            cost += G._adj[node][nbr][weight]
+    return cost
diff --git a/networkx/classes/graph.py b/networkx/classes/graph.py
index e41c191f7..bf628ed62 100644
--- a/networkx/classes/graph.py
+++ b/networkx/classes/graph.py
@@ -9,12 +9,14 @@ For directed graphs see DiGraph and MultiDiGraph.
 """
 from copy import deepcopy
 from functools import cached_property
+
 import networkx as nx
 from networkx import convert
 from networkx.classes.coreviews import AdjacencyView
 from networkx.classes.reportviews import DegreeView, EdgeView, NodeView
 from networkx.exception import NetworkXError
-__all__ = ['Graph']
+
+__all__ = ["Graph"]


 class _CachedPropertyResetterAdj:
@@ -34,9 +36,9 @@ class _CachedPropertyResetterAdj:

     def __set__(self, obj, value):
         od = obj.__dict__
-        od['_adj'] = value
-        if 'adj' in od:
-            del od['adj']
+        od["_adj"] = value
+        if "adj" in od:
+            del od["adj"]


 class _CachedPropertyResetterNode:
@@ -56,9 +58,9 @@ class _CachedPropertyResetterNode:

     def __set__(self, obj, value):
         od = obj.__dict__
-        od['_node'] = value
-        if 'nodes' in od:
-            del od['nodes']
+        od["_node"] = value
+        if "nodes" in od:
+            del od["nodes"]


 class Graph:
@@ -300,8 +302,10 @@ class Graph:
     >>> G[2][1] is G[2][2]
     True
     """
+
     _adj = _CachedPropertyResetterAdj()
     _node = _CachedPropertyResetterNode()
+
     node_dict_factory = dict
     node_attr_dict_factory = dict
     adjlist_outer_dict_factory = dict
@@ -315,7 +319,7 @@ class Graph:
         If you subclass the base classes, use this to designate
         what directed class to use for `to_directed()` copies.
         """
-        pass
+        return nx.DiGraph

     def to_undirected_class(self):
         """Returns the class to use for empty undirected copies.
@@ -323,7 +327,7 @@ class Graph:
         If you subclass the base classes, use this to designate
         what directed class to use for `to_directed()` copies.
         """
-        pass
+        return Graph

     def __init__(self, incoming_graph_data=None, **attr):
         """Initialize a graph with edges, name, or graph attributes.
@@ -358,12 +362,14 @@ class Graph:
         {'day': 'Friday'}

         """
-        self.graph = self.graph_attr_dict_factory()
-        self._node = self.node_dict_factory()
-        self._adj = self.adjlist_outer_dict_factory()
+        self.graph = self.graph_attr_dict_factory()  # dictionary for graph attributes
+        self._node = self.node_dict_factory()  # empty node attribute dict
+        self._adj = self.adjlist_outer_dict_factory()  # empty adjacency dict
         self.__networkx_cache__ = {}
+        # attempt to load graph with data
         if incoming_graph_data is not None:
             convert.to_networkx_graph(incoming_graph_data, create_using=self)
+        # load graph attributes (must be after convert)
         self.graph.update(attr)

     @cached_property
@@ -383,7 +389,7 @@ class Graph:

         For directed graphs, `G.adj` holds outgoing (successor) info.
         """
-        pass
+        return AdjacencyView(self._adj)

     @property
     def name(self):
@@ -393,7 +399,12 @@ class Graph:
         keyed by the string `"name"`. as well as an attribute (technically
         a property) `G.name`. This is entirely user controlled.
         """
-        pass
+        return self.graph.get("name", "")
+
+    @name.setter
+    def name(self, s):
+        self.graph["name"] = s
+        nx._clear_cache(self)

     def __str__(self):
         """Returns a short summary of the graph.
@@ -415,10 +426,13 @@ class Graph:
         'Graph with 3 nodes and 2 edges'

         """
-        return ''.join([type(self).__name__, f' named {self.name!r}' if
-            self.name else '',
-            f' with {self.number_of_nodes()} nodes and {self.number_of_edges()} edges'
-            ])
+        return "".join(
+            [
+                type(self).__name__,
+                f" named {self.name!r}" if self.name else "",
+                f" with {self.number_of_nodes()} nodes and {self.number_of_edges()} edges",
+            ]
+        )

     def __iter__(self):
         """Iterate over the nodes. Use: 'for n in G'.
@@ -539,7 +553,15 @@ class Graph:
         NetworkX Graphs, though one should be careful that the hash
         doesn't change on mutables.
         """
-        pass
+        if node_for_adding not in self._node:
+            if node_for_adding is None:
+                raise ValueError("None cannot be a node")
+            self._adj[node_for_adding] = self.adjlist_inner_dict_factory()
+            attr_dict = self._node[node_for_adding] = self.node_attr_dict_factory()
+            attr_dict.update(attr)
+        else:  # update attr even if node already exists
+            self._node[node_for_adding].update(attr)
+        nx._clear_cache(self)

     def add_nodes_from(self, nodes_for_adding, **attr):
         """Add multiple nodes.
@@ -602,7 +624,22 @@ class Graph:
         >>> # correct way
         >>> G.add_nodes_from(list(n + 1 for n in G.nodes))
         """
-        pass
+        for n in nodes_for_adding:
+            try:
+                newnode = n not in self._node
+                newdict = attr
+            except TypeError:
+                n, ndict = n
+                newnode = n not in self._node
+                newdict = attr.copy()
+                newdict.update(ndict)
+            if newnode:
+                if n is None:
+                    raise ValueError("None cannot be a node")
+                self._adj[n] = self.adjlist_inner_dict_factory()
+                self._node[n] = self.node_attr_dict_factory()
+            self._node[n].update(newdict)
+        nx._clear_cache(self)

     def remove_node(self, n):
         """Remove node n.
@@ -634,7 +671,16 @@ class Graph:
         []

         """
-        pass
+        adj = self._adj
+        try:
+            nbrs = list(adj[n])  # list handles self-loops (allows mutation)
+            del self._node[n]
+        except KeyError as err:  # NetworkXError if n not in self
+            raise NetworkXError(f"The node {n} is not in the graph.") from err
+        for u in nbrs:
+            del adj[u][n]  # remove all edges n-u in graph
+        del adj[n]  # now remove node
+        nx._clear_cache(self)

     def remove_nodes_from(self, nodes):
         """Remove multiple nodes.
@@ -678,7 +724,16 @@ class Graph:
         >>> # this command will work, since the dictionary underlying graph is not modified
         >>> G.remove_nodes_from(list(n for n in G.nodes if n < 2))
         """
-        pass
+        adj = self._adj
+        for n in nodes:
+            try:
+                del self._node[n]
+                for u in list(adj[n]):  # list handles self-loops
+                    del adj[u][n]  # (allows mutation of dict in loop)
+                del adj[n]
+            except KeyError:
+                pass
+        nx._clear_cache(self)

     @cached_property
     def nodes(self):
@@ -771,7 +826,7 @@ class Graph:
             {0: 1, 1: 2, 2: 3}

         """
-        pass
+        return NodeView(self)

     def number_of_nodes(self):
         """Returns the number of nodes in the graph.
@@ -792,7 +847,7 @@ class Graph:
         >>> G.number_of_nodes()
         3
         """
-        pass
+        return len(self._node)

     def order(self):
         """Returns the number of nodes in the graph.
@@ -813,7 +868,7 @@ class Graph:
         >>> G.order()
         3
         """
-        pass
+        return len(self._node)

     def has_node(self, n):
         """Returns True if the graph contains the node n.
@@ -836,7 +891,10 @@ class Graph:
         True

         """
-        pass
+        try:
+            return n in self._node
+        except TypeError:
+            return False

     def add_edge(self, u_of_edge, v_of_edge, **attr):
         """Add an edge between u and v.
@@ -888,7 +946,24 @@ class Graph:
         >>> G[1][2].update({0: 5})
         >>> G.edges[1, 2].update({0: 5})
         """
-        pass
+        u, v = u_of_edge, v_of_edge
+        # add nodes
+        if u not in self._node:
+            if u is None:
+                raise ValueError("None cannot be a node")
+            self._adj[u] = self.adjlist_inner_dict_factory()
+            self._node[u] = self.node_attr_dict_factory()
+        if v not in self._node:
+            if v is None:
+                raise ValueError("None cannot be a node")
+            self._adj[v] = self.adjlist_inner_dict_factory()
+            self._node[v] = self.node_attr_dict_factory()
+        # add the edge
+        datadict = self._adj[u].get(v, self.edge_attr_dict_factory())
+        datadict.update(attr)
+        self._adj[u][v] = datadict
+        self._adj[v][u] = datadict
+        nx._clear_cache(self)

     def add_edges_from(self, ebunch_to_add, **attr):
         """Add all the edges in ebunch_to_add.
@@ -945,9 +1020,33 @@ class Graph:
         >>> # correct way - note that there will be no self-edge for node 5
         >>> G.add_edges_from(list((5, n) for n in G.nodes))
         """
-        pass
-
-    def add_weighted_edges_from(self, ebunch_to_add, weight='weight', **attr):
+        for e in ebunch_to_add:
+            ne = len(e)
+            if ne == 3:
+                u, v, dd = e
+            elif ne == 2:
+                u, v = e
+                dd = {}  # doesn't need edge_attr_dict_factory
+            else:
+                raise NetworkXError(f"Edge tuple {e} must be a 2-tuple or 3-tuple.")
+            if u not in self._node:
+                if u is None:
+                    raise ValueError("None cannot be a node")
+                self._adj[u] = self.adjlist_inner_dict_factory()
+                self._node[u] = self.node_attr_dict_factory()
+            if v not in self._node:
+                if v is None:
+                    raise ValueError("None cannot be a node")
+                self._adj[v] = self.adjlist_inner_dict_factory()
+                self._node[v] = self.node_attr_dict_factory()
+            datadict = self._adj[u].get(v, self.edge_attr_dict_factory())
+            datadict.update(attr)
+            datadict.update(dd)
+            self._adj[u][v] = datadict
+            self._adj[v][u] = datadict
+        nx._clear_cache(self)
+
+    def add_weighted_edges_from(self, ebunch_to_add, weight="weight", **attr):
         """Add weighted edges in `ebunch_to_add` with specified weight attr

         Parameters
@@ -995,7 +1094,8 @@ class Graph:
         >>> # correct way - note that there will be no self-edge for node 5
         >>> G.add_weighted_edges_from(list((5, n, weight) for n in G.nodes))
         """
-        pass
+        self.add_edges_from(((u, v, {weight: d}) for u, v, d in ebunch_to_add), **attr)
+        nx._clear_cache(self)

     def remove_edge(self, u, v):
         """Remove the edge between u and v.
@@ -1023,7 +1123,13 @@ class Graph:
         >>> e = (2, 3, {"weight": 7})  # an edge with attribute data
         >>> G.remove_edge(*e[:2])  # select first part of edge tuple
         """
-        pass
+        try:
+            del self._adj[u][v]
+            if u != v:  # self-loop needs only one entry removed
+                del self._adj[v][u]
+        except KeyError as err:
+            raise NetworkXError(f"The edge {u}-{v} is not in the graph") from err
+        nx._clear_cache(self)

     def remove_edges_from(self, ebunch):
         """Remove all edges specified in ebunch.
@@ -1051,7 +1157,14 @@ class Graph:
         >>> ebunch = [(1, 2), (2, 3)]
         >>> G.remove_edges_from(ebunch)
         """
-        pass
+        adj = self._adj
+        for e in ebunch:
+            u, v = e[:2]  # ignore edge data if present
+            if u in adj and v in adj[u]:
+                del adj[u][v]
+                if u != v:  # self loop needs only one entry removed
+                    del adj[v][u]
+        nx._clear_cache(self)

     def update(self, edges=None, nodes=None):
         """Update the graph using nodes/edges/graphs as input.
@@ -1140,7 +1253,26 @@ class Graph:
         add_edges_from: add multiple edges to a graph
         add_nodes_from: add multiple nodes to a graph
         """
-        pass
+        if edges is not None:
+            if nodes is not None:
+                self.add_nodes_from(nodes)
+                self.add_edges_from(edges)
+            else:
+                # check if edges is a Graph object
+                try:
+                    graph_nodes = edges.nodes
+                    graph_edges = edges.edges
+                except AttributeError:
+                    # edge not Graph-like
+                    self.add_edges_from(edges)
+                else:  # edges is Graph-like
+                    self.add_nodes_from(graph_nodes.data())
+                    self.add_edges_from(graph_edges.data())
+                    self.graph.update(edges.graph)
+        elif nodes is not None:
+            self.add_nodes_from(nodes)
+        else:
+            raise NetworkXError("update needs nodes or edges input")

     def has_edge(self, u, v):
         """Returns True if the edge (u, v) is in the graph.
@@ -1178,7 +1310,10 @@ class Graph:
         True

         """
-        pass
+        try:
+            return v in self._adj[u]
+        except KeyError:
+            return False

     def neighbors(self, n):
         """Returns an iterator over all neighbors of node n.
@@ -1218,7 +1353,10 @@ class Graph:
         >>> [n for n in G[0]]
         [1]
         """
-        pass
+        try:
+            return iter(self._adj[n])
+        except KeyError as err:
+            raise NetworkXError(f"The node {n} is not in the graph.") from err

     @cached_property
     def edges(self):
@@ -1275,7 +1413,7 @@ class Graph:
         >>> G.edges(0)  # only edges from node 0
         EdgeDataView([(0, 1)])
         """
-        pass
+        return EdgeView(self)

     def get_edge_data(self, u, v, default=None):
         """Returns the attribute dictionary associated with edge (u, v).
@@ -1318,7 +1456,10 @@ class Graph:
         >>> G.get_edge_data("a", "b", default=0)  # edge not in graph, return 0
         0
         """
-        pass
+        try:
+            return self._adj[u][v]
+        except KeyError:
+            return default

     def adjacency(self):
         """Returns an iterator over (node, adjacency dict) tuples for all nodes.
@@ -1338,7 +1479,7 @@ class Graph:
         [(0, {1: {}}), (1, {0: {}, 2: {}}), (2, {1: {}, 3: {}}), (3, {2: {}})]

         """
-        pass
+        return iter(self._adj.items())

     @cached_property
     def degree(self):
@@ -1376,7 +1517,7 @@ class Graph:
         >>> list(G.degree([0, 1, 2]))
         [(0, 1), (1, 2), (2, 2)]
         """
-        pass
+        return DegreeView(self)

     def clear(self):
         """Remove all nodes and edges from the graph.
@@ -1393,7 +1534,10 @@ class Graph:
         []

         """
-        pass
+        self._adj.clear()
+        self._node.clear()
+        self.graph.clear()
+        nx._clear_cache(self)

     def clear_edges(self):
         """Remove all edges from the graph without altering nodes.
@@ -1407,15 +1551,17 @@ class Graph:
         >>> list(G.edges)
         []
         """
-        pass
+        for nbr_dict in self._adj.values():
+            nbr_dict.clear()
+        nx._clear_cache(self)

     def is_multigraph(self):
         """Returns True if graph is a multigraph, False otherwise."""
-        pass
+        return False

     def is_directed(self):
         """Returns True if graph is directed, False otherwise."""
-        pass
+        return False

     def copy(self, as_view=False):
         """Returns a copy of the graph.
@@ -1494,7 +1640,17 @@ class Graph:
         >>> H = G.copy()

         """
-        pass
+        if as_view is True:
+            return nx.graphviews.generic_graph_view(self)
+        G = self.__class__()
+        G.graph.update(self.graph)
+        G.add_nodes_from((n, d.copy()) for n, d in self._node.items())
+        G.add_edges_from(
+            (u, v, datadict.copy())
+            for u, nbrs in self._adj.items()
+            for v, datadict in nbrs.items()
+        )
+        return G

     def to_directed(self, as_view=False):
         """Returns a directed representation of the graph.
@@ -1538,7 +1694,19 @@ class Graph:
         >>> list(H.edges)
         [(0, 1)]
         """
-        pass
+        graph_class = self.to_directed_class()
+        if as_view is True:
+            return nx.graphviews.generic_graph_view(self, graph_class)
+        # deepcopy when not a view
+        G = graph_class()
+        G.graph.update(deepcopy(self.graph))
+        G.add_nodes_from((n, deepcopy(d)) for n, d in self._node.items())
+        G.add_edges_from(
+            (u, v, deepcopy(data))
+            for u, nbrs in self._adj.items()
+            for v, data in nbrs.items()
+        )
+        return G

     def to_undirected(self, as_view=False):
         """Returns an undirected copy of the graph.
@@ -1583,7 +1751,19 @@ class Graph:
         >>> list(G2.edges)
         [(0, 1)]
         """
-        pass
+        graph_class = self.to_undirected_class()
+        if as_view is True:
+            return nx.graphviews.generic_graph_view(self, graph_class)
+        # deepcopy when not a view
+        G = graph_class()
+        G.graph.update(deepcopy(self.graph))
+        G.add_nodes_from((n, deepcopy(d)) for n, d in self._node.items())
+        G.add_edges_from(
+            (u, v, deepcopy(d))
+            for u, nbrs in self._adj.items()
+            for v, d in nbrs.items()
+        )
+        return G

     def subgraph(self, nodes):
         """Returns a SubGraph view of the subgraph induced on `nodes`.
@@ -1650,7 +1830,14 @@ class Graph:
         >>> list(H.edges)
         [(0, 1), (1, 2)]
         """
-        pass
+        induced_nodes = nx.filters.show_nodes(self.nbunch_iter(nodes))
+        # if already a subgraph, don't make a chain
+        subgraph = nx.subgraph_view
+        if hasattr(self, "_NODE_OK"):
+            return subgraph(
+                self._graph, filter_node=induced_nodes, filter_edge=self._EDGE_OK
+            )
+        return subgraph(self, filter_node=induced_nodes)

     def edge_subgraph(self, edges):
         """Returns the subgraph induced by the specified edges.
@@ -1690,7 +1877,7 @@ class Graph:
         [(0, 1), (3, 4)]

         """
-        pass
+        return nx.edge_subgraph(self, edges)

     def size(self, weight=None):
         """Returns the number of edges or total of all edge weights.
@@ -1728,7 +1915,12 @@ class Graph:
         >>> G.size(weight="weight")
         6.0
         """
-        pass
+        s = sum(d for v, d in self.degree(weight=weight))
+        # If `weight` is None, the sum of the degrees is guaranteed to be
+        # even, so we can perform integer division and hence return an
+        # integer. Otherwise, the sum of the weighted degrees is not
+        # guaranteed to be an integer, so we perform "real" division.
+        return s // 2 if weight is None else s / 2

     def number_of_edges(self, u=None, v=None):
         """Returns the number of edges between two nodes.
@@ -1776,7 +1968,11 @@ class Graph:
         1

         """
-        pass
+        if u is None:
+            return int(self.size())
+        if v in self._adj[u]:
+            return 1
+        return 0

     def nbunch_iter(self, nbunch=None):
         """Returns an iterator over nodes contained in nbunch that are
@@ -1818,4 +2014,30 @@ class Graph:
         or None, a :exc:`NetworkXError` is raised.  Also, if any object in
         nbunch is not hashable, a :exc:`NetworkXError` is raised.
         """
-        pass
+        if nbunch is None:  # include all nodes via iterator
+            bunch = iter(self._adj)
+        elif nbunch in self:  # if nbunch is a single node
+            bunch = iter([nbunch])
+        else:  # if nbunch is a sequence of nodes
+
+            def bunch_iter(nlist, adj):
+                try:
+                    for n in nlist:
+                        if n in adj:
+                            yield n
+                except TypeError as err:
+                    exc, message = err, err.args[0]
+                    # capture error for non-sequence/iterator nbunch.
+                    if "iter" in message:
+                        exc = NetworkXError(
+                            "nbunch is not a node or a sequence of nodes."
+                        )
+                    # capture error for unhashable node.
+                    if "hashable" in message:
+                        exc = NetworkXError(
+                            f"Node {n} in sequence nbunch is not a valid node."
+                        )
+                    raise exc
+
+            bunch = bunch_iter(nbunch, self._adj)
+        return bunch
diff --git a/networkx/classes/graphviews.py b/networkx/classes/graphviews.py
index d47d41c1b..275bbd71c 100644
--- a/networkx/classes/graphviews.py
+++ b/networkx/classes/graphviews.py
@@ -24,11 +24,18 @@ with induced subgraphs.
 Often it is easiest to use .copy() to avoid chains.
 """
 import networkx as nx
-from networkx.classes.coreviews import FilterAdjacency, FilterAtlas, FilterMultiAdjacency, UnionAdjacency, UnionMultiAdjacency
+from networkx.classes.coreviews import (
+    FilterAdjacency,
+    FilterAtlas,
+    FilterMultiAdjacency,
+    UnionAdjacency,
+    UnionMultiAdjacency,
+)
 from networkx.classes.filters import no_filter
 from networkx.exception import NetworkXError
 from networkx.utils import deprecate_positional_args, not_implemented_for
-__all__ = ['generic_graph_view', 'subgraph_view', 'reverse_view']
+
+__all__ = ["generic_graph_view", "subgraph_view", "reverse_view"]


 def generic_graph_view(G, create_using=None):
@@ -93,10 +100,39 @@ def generic_graph_view(G, create_using=None):
     >>> type(viewDG)
     <class 'networkx.classes.digraph.DiGraph'>
     """
-    pass
-
-
-@deprecate_positional_args(version='3.4')
+    if create_using is None:
+        newG = G.__class__()
+    else:
+        newG = nx.empty_graph(0, create_using)
+    if G.is_multigraph() != newG.is_multigraph():
+        raise NetworkXError("Multigraph for G must agree with create_using")
+    newG = nx.freeze(newG)
+
+    # create view by assigning attributes from G
+    newG._graph = G
+    newG.graph = G.graph
+
+    newG._node = G._node
+    if newG.is_directed():
+        if G.is_directed():
+            newG._succ = G._succ
+            newG._pred = G._pred
+            # newG._adj is synced with _succ
+        else:
+            newG._succ = G._adj
+            newG._pred = G._adj
+            # newG._adj is synced with _succ
+    elif G.is_directed():
+        if G.is_multigraph():
+            newG._adj = UnionMultiAdjacency(G._succ, G._pred)
+        else:
+            newG._adj = UnionAdjacency(G._succ, G._pred)
+    else:
+        newG._adj = G._adj
+    return newG
+
+
+@deprecate_positional_args(version="3.4")
 def subgraph_view(G, *, filter_node=no_filter, filter_edge=no_filter):
     """View of `G` applying a filter on nodes and edges.

@@ -168,10 +204,37 @@ def subgraph_view(G, *, filter_node=no_filter, filter_edge=no_filter):
     >>> view.edges()
     EdgeView([(0, 1), (1, 2), (2, 3)])
     """
-    pass
+    newG = nx.freeze(G.__class__())
+    newG._NODE_OK = filter_node
+    newG._EDGE_OK = filter_edge
+
+    # create view by assigning attributes from G
+    newG._graph = G
+    newG.graph = G.graph
+
+    newG._node = FilterAtlas(G._node, filter_node)
+    if G.is_multigraph():
+        Adj = FilterMultiAdjacency
+
+        def reverse_edge(u, v, k=None):
+            return filter_edge(v, u, k)
+
+    else:
+        Adj = FilterAdjacency
+
+        def reverse_edge(u, v, k=None):
+            return filter_edge(v, u)
+
+    if G.is_directed():
+        newG._succ = Adj(G._succ, filter_node, filter_edge)
+        newG._pred = Adj(G._pred, filter_node, reverse_edge)
+        # newG._adj is synced with _succ
+    else:
+        newG._adj = Adj(G._adj, filter_node, filter_edge)
+    return newG


-@not_implemented_for('undirected')
+@not_implemented_for("undirected")
 def reverse_view(G):
     """View of `G` with edge directions reversed

@@ -200,4 +263,7 @@ def reverse_view(G):
     >>> view.edges()
     OutEdgeView([(2, 1), (3, 2)])
     """
-    pass
+    newG = generic_graph_view(G)
+    newG._succ, newG._pred = G._pred, G._succ
+    # newG._adj is synced with _succ
+    return newG
diff --git a/networkx/classes/multidigraph.py b/networkx/classes/multidigraph.py
index 3ae6b64bc..ad048cd5a 100644
--- a/networkx/classes/multidigraph.py
+++ b/networkx/classes/multidigraph.py
@@ -1,14 +1,22 @@
 """Base class for MultiDiGraph."""
 from copy import deepcopy
 from functools import cached_property
+
 import networkx as nx
 from networkx import convert
 from networkx.classes.coreviews import MultiAdjacencyView
 from networkx.classes.digraph import DiGraph
 from networkx.classes.multigraph import MultiGraph
-from networkx.classes.reportviews import DiMultiDegreeView, InMultiDegreeView, InMultiEdgeView, OutMultiDegreeView, OutMultiEdgeView
+from networkx.classes.reportviews import (
+    DiMultiDegreeView,
+    InMultiDegreeView,
+    InMultiEdgeView,
+    OutMultiDegreeView,
+    OutMultiEdgeView,
+)
 from networkx.exception import NetworkXError
-__all__ = ['MultiDiGraph']
+
+__all__ = ["MultiDiGraph"]


 class MultiDiGraph(MultiGraph, DiGraph):
@@ -283,10 +291,14 @@ class MultiDiGraph(MultiGraph, DiGraph):
     >>> G[2][1] is G[2][2]
     True
     """
+
+    # node_dict_factory = dict    # already assigned in Graph
+    # adjlist_outer_dict_factory = dict
+    # adjlist_inner_dict_factory = dict
     edge_key_dict_factory = dict
+    # edge_attr_dict_factory = dict

-    def __init__(self, incoming_graph_data=None, multigraph_input=None, **attr
-        ):
+    def __init__(self, incoming_graph_data=None, multigraph_input=None, **attr):
         """Initialize a graph with edges, name, or graph attributes.

         Parameters
@@ -332,19 +344,19 @@ class MultiDiGraph(MultiGraph, DiGraph):
         {'day': 'Friday'}

         """
-        if isinstance(incoming_graph_data, dict
-            ) and multigraph_input is not False:
+        # multigraph_input can be None/True/False. So check "is not False"
+        if isinstance(incoming_graph_data, dict) and multigraph_input is not False:
             DiGraph.__init__(self)
             try:
-                convert.from_dict_of_dicts(incoming_graph_data,
-                    create_using=self, multigraph_input=True)
+                convert.from_dict_of_dicts(
+                    incoming_graph_data, create_using=self, multigraph_input=True
+                )
                 self.graph.update(attr)
             except Exception as err:
                 if multigraph_input is True:
                     raise nx.NetworkXError(
-                        f"""converting multigraph_input raised:
-{type(err)}: {err}"""
-                        )
+                        f"converting multigraph_input raised:\n{type(err)}: {err}"
+                    )
                 DiGraph.__init__(self, incoming_graph_data, **attr)
         else:
             DiGraph.__init__(self, incoming_graph_data, **attr)
@@ -366,7 +378,7 @@ class MultiDiGraph(MultiGraph, DiGraph):

         For directed graphs, `G.adj` holds outgoing (successor) info.
         """
-        pass
+        return MultiAdjacencyView(self._succ)

     @cached_property
     def succ(self):
@@ -385,7 +397,7 @@ class MultiDiGraph(MultiGraph, DiGraph):

         For directed graphs, `G.succ` is identical to `G.adj`.
         """
-        pass
+        return MultiAdjacencyView(self._succ)

     @cached_property
     def pred(self):
@@ -399,7 +411,7 @@ class MultiDiGraph(MultiGraph, DiGraph):
         Iterating over G.adj behaves like a dict. Useful idioms include
         `for nbr, datadict in G.adj[n].items():`.
         """
-        pass
+        return MultiAdjacencyView(self._pred)

     def add_edge(self, u_for_edge, v_for_edge, key=None, **attr):
         """Add an edge between u and v.
@@ -467,7 +479,37 @@ class MultiDiGraph(MultiGraph, DiGraph):
         >>> G[1][2][0].update({0: 5})
         >>> G.edges[1, 2, 0].update({0: 5})
         """
-        pass
+        u, v = u_for_edge, v_for_edge
+        # add nodes
+        if u not in self._succ:
+            if u is None:
+                raise ValueError("None cannot be a node")
+            self._succ[u] = self.adjlist_inner_dict_factory()
+            self._pred[u] = self.adjlist_inner_dict_factory()
+            self._node[u] = self.node_attr_dict_factory()
+        if v not in self._succ:
+            if v is None:
+                raise ValueError("None cannot be a node")
+            self._succ[v] = self.adjlist_inner_dict_factory()
+            self._pred[v] = self.adjlist_inner_dict_factory()
+            self._node[v] = self.node_attr_dict_factory()
+        if key is None:
+            key = self.new_edge_key(u, v)
+        if v in self._succ[u]:
+            keydict = self._adj[u][v]
+            datadict = keydict.get(key, self.edge_attr_dict_factory())
+            datadict.update(attr)
+            keydict[key] = datadict
+        else:
+            # selfloops work this way without special treatment
+            datadict = self.edge_attr_dict_factory()
+            datadict.update(attr)
+            keydict = self.edge_key_dict_factory()
+            keydict[key] = datadict
+            self._succ[u][v] = keydict
+            self._pred[v][u] = keydict
+        nx._clear_cache(self)
+        return key

     def remove_edge(self, u, v, key=None):
         """Remove an edge between u and v.
@@ -525,7 +567,24 @@ class MultiDiGraph(MultiGraph, DiGraph):
         OutMultiEdgeView([(1, 2, 'second')])

         """
-        pass
+        try:
+            d = self._adj[u][v]
+        except KeyError as err:
+            raise NetworkXError(f"The edge {u}-{v} is not in the graph.") from err
+        # remove the edge with specified data
+        if key is None:
+            d.popitem()
+        else:
+            try:
+                del d[key]
+            except KeyError as err:
+                msg = f"The edge {u}-{v} with key {key} is not in the graph."
+                raise NetworkXError(msg) from err
+        if len(d) == 0:
+            # remove the key entries if last edge
+            del self._succ[u][v]
+            del self._pred[v][u]
+        nx._clear_cache(self)

     @cached_property
     def edges(self):
@@ -606,7 +665,13 @@ class MultiDiGraph(MultiGraph, DiGraph):
         --------
         in_edges, out_edges
         """
-        pass
+        return OutMultiEdgeView(self)
+
+    # alias out_edges to edges
+    @cached_property
+    def out_edges(self):
+        return OutMultiEdgeView(self)
+
     out_edges.__doc__ = edges.__doc__

     @cached_property
@@ -641,7 +706,7 @@ class MultiDiGraph(MultiGraph, DiGraph):
         --------
         edges
         """
-        pass
+        return InMultiEdgeView(self)

     @cached_property
     def degree(self):
@@ -689,7 +754,7 @@ class MultiDiGraph(MultiGraph, DiGraph):
         [(0, 2), (1, 3), (2, 2)]

         """
-        pass
+        return DiMultiDegreeView(self)

     @cached_property
     def in_degree(self):
@@ -740,7 +805,7 @@ class MultiDiGraph(MultiGraph, DiGraph):
         [(0, 0), (1, 2), (2, 1)]

         """
-        pass
+        return InMultiDegreeView(self)

     @cached_property
     def out_degree(self):
@@ -790,15 +855,15 @@ class MultiDiGraph(MultiGraph, DiGraph):
         [(0, 2), (1, 1), (2, 1)]

         """
-        pass
+        return OutMultiDegreeView(self)

     def is_multigraph(self):
         """Returns True if graph is a multigraph, False otherwise."""
-        pass
+        return True

     def is_directed(self):
         """Returns True if graph is directed, False otherwise."""
-        pass
+        return True

     def to_undirected(self, reciprocal=False, as_view=False):
         """Returns an undirected representation of the digraph.
@@ -851,7 +916,29 @@ class MultiDiGraph(MultiGraph, DiGraph):
         >>> list(G2.edges)
         [(0, 1)]
         """
-        pass
+        graph_class = self.to_undirected_class()
+        if as_view is True:
+            return nx.graphviews.generic_graph_view(self, graph_class)
+        # deepcopy when not a view
+        G = graph_class()
+        G.graph.update(deepcopy(self.graph))
+        G.add_nodes_from((n, deepcopy(d)) for n, d in self._node.items())
+        if reciprocal is True:
+            G.add_edges_from(
+                (u, v, key, deepcopy(data))
+                for u, nbrs in self._adj.items()
+                for v, keydict in nbrs.items()
+                for key, data in keydict.items()
+                if v in self._pred[u] and key in self._pred[u][v]
+            )
+        else:
+            G.add_edges_from(
+                (u, v, key, deepcopy(data))
+                for u, nbrs in self._adj.items()
+                for v, keydict in nbrs.items()
+                for key, data in keydict.items()
+            )
+        return G

     def reverse(self, copy=True):
         """Returns the reverse of the graph.
@@ -866,4 +953,13 @@ class MultiDiGraph(MultiGraph, DiGraph):
             If False, the reverse graph is created using a view of
             the original graph.
         """
-        pass
+        if copy:
+            H = self.__class__()
+            H.graph.update(deepcopy(self.graph))
+            H.add_nodes_from((n, deepcopy(d)) for n, d in self._node.items())
+            H.add_edges_from(
+                (v, u, k, deepcopy(d))
+                for u, v, k, d in self.edges(keys=True, data=True)
+            )
+            return H
+        return nx.reverse_view(self)
diff --git a/networkx/classes/multigraph.py b/networkx/classes/multigraph.py
index 5fe16870d..d1b263265 100644
--- a/networkx/classes/multigraph.py
+++ b/networkx/classes/multigraph.py
@@ -1,12 +1,14 @@
 """Base class for MultiGraph."""
 from copy import deepcopy
 from functools import cached_property
+
 import networkx as nx
 from networkx import NetworkXError, convert
 from networkx.classes.coreviews import MultiAdjacencyView
 from networkx.classes.graph import Graph
 from networkx.classes.reportviews import MultiDegreeView, MultiEdgeView
-__all__ = ['MultiGraph']
+
+__all__ = ["MultiGraph"]


 class MultiGraph(Graph):
@@ -281,7 +283,12 @@ class MultiGraph(Graph):
     >>> G[2][1] is G[2][2]
     True
     """
+
+    # node_dict_factory = dict    # already assigned in Graph
+    # adjlist_outer_dict_factory = dict
+    # adjlist_inner_dict_factory = dict
     edge_key_dict_factory = dict
+    # edge_attr_dict_factory = dict

     def to_directed_class(self):
         """Returns the class to use for empty directed copies.
@@ -289,7 +296,7 @@ class MultiGraph(Graph):
         If you subclass the base classes, use this to designate
         what directed class to use for `to_directed()` copies.
         """
-        pass
+        return nx.MultiDiGraph

     def to_undirected_class(self):
         """Returns the class to use for empty undirected copies.
@@ -297,10 +304,9 @@ class MultiGraph(Graph):
         If you subclass the base classes, use this to designate
         what directed class to use for `to_directed()` copies.
         """
-        pass
+        return MultiGraph

-    def __init__(self, incoming_graph_data=None, multigraph_input=None, **attr
-        ):
+    def __init__(self, incoming_graph_data=None, multigraph_input=None, **attr):
         """Initialize a graph with edges, name, or graph attributes.

         Parameters
@@ -346,19 +352,19 @@ class MultiGraph(Graph):
         {'day': 'Friday'}

         """
-        if isinstance(incoming_graph_data, dict
-            ) and multigraph_input is not False:
+        # multigraph_input can be None/True/False. So check "is not False"
+        if isinstance(incoming_graph_data, dict) and multigraph_input is not False:
             Graph.__init__(self)
             try:
-                convert.from_dict_of_dicts(incoming_graph_data,
-                    create_using=self, multigraph_input=True)
+                convert.from_dict_of_dicts(
+                    incoming_graph_data, create_using=self, multigraph_input=True
+                )
                 self.graph.update(attr)
             except Exception as err:
                 if multigraph_input is True:
                     raise nx.NetworkXError(
-                        f"""converting multigraph_input raised:
-{type(err)}: {err}"""
-                        )
+                        f"converting multigraph_input raised:\n{type(err)}: {err}"
+                    )
                 Graph.__init__(self, incoming_graph_data, **attr)
         else:
             Graph.__init__(self, incoming_graph_data, **attr)
@@ -390,7 +396,7 @@ class MultiGraph(Graph):

         For directed graphs, `G.adj` holds outgoing (successor) info.
         """
-        pass
+        return MultiAdjacencyView(self._adj)

     def new_edge_key(self, u, v):
         """Returns an unused key for edges between nodes `u` and `v`.
@@ -412,7 +418,14 @@ class MultiGraph(Graph):
         -------
         key : int
         """
-        pass
+        try:
+            keydict = self._adj[u][v]
+        except KeyError:
+            return 0
+        key = len(keydict)
+        while key in keydict:
+            key += 1
+        return key

     def add_edge(self, u_for_edge, v_for_edge, key=None, **attr):
         """Add an edge between u and v.
@@ -480,7 +493,35 @@ class MultiGraph(Graph):
         >>> G[1][2][0].update({0: 5})
         >>> G.edges[1, 2, 0].update({0: 5})
         """
-        pass
+        u, v = u_for_edge, v_for_edge
+        # add nodes
+        if u not in self._adj:
+            if u is None:
+                raise ValueError("None cannot be a node")
+            self._adj[u] = self.adjlist_inner_dict_factory()
+            self._node[u] = self.node_attr_dict_factory()
+        if v not in self._adj:
+            if v is None:
+                raise ValueError("None cannot be a node")
+            self._adj[v] = self.adjlist_inner_dict_factory()
+            self._node[v] = self.node_attr_dict_factory()
+        if key is None:
+            key = self.new_edge_key(u, v)
+        if v in self._adj[u]:
+            keydict = self._adj[u][v]
+            datadict = keydict.get(key, self.edge_attr_dict_factory())
+            datadict.update(attr)
+            keydict[key] = datadict
+        else:
+            # selfloops work this way without special treatment
+            datadict = self.edge_attr_dict_factory()
+            datadict.update(attr)
+            keydict = self.edge_key_dict_factory()
+            keydict[key] = datadict
+            self._adj[u][v] = keydict
+            self._adj[v][u] = keydict
+        nx._clear_cache(self)
+        return key

     def add_edges_from(self, ebunch_to_add, **attr):
         """Add all the edges in ebunch_to_add.
@@ -550,7 +591,34 @@ class MultiGraph(Graph):
         >>> # right way - note that there will be no self-edge for node 5
         >>> assigned_keys = G.add_edges_from(list((5, n) for n in G.nodes))
         """
-        pass
+        keylist = []
+        for e in ebunch_to_add:
+            ne = len(e)
+            if ne == 4:
+                u, v, key, dd = e
+            elif ne == 3:
+                u, v, dd = e
+                key = None
+            elif ne == 2:
+                u, v = e
+                dd = {}
+                key = None
+            else:
+                msg = f"Edge tuple {e} must be a 2-tuple, 3-tuple or 4-tuple."
+                raise NetworkXError(msg)
+            ddd = {}
+            ddd.update(attr)
+            try:
+                ddd.update(dd)
+            except (TypeError, ValueError):
+                if ne != 3:
+                    raise
+                key = dd  # ne == 3 with 3rd value not dict, must be a key
+            key = self.add_edge(u, v, key)
+            self[u][v][key].update(ddd)
+            keylist.append(key)
+        nx._clear_cache(self)
+        return keylist

     def remove_edge(self, u, v, key=None):
         """Remove an edge between u and v.
@@ -611,7 +679,25 @@ class MultiGraph(Graph):
         MultiEdgeView([(1, 2, 'second')])

         """
-        pass
+        try:
+            d = self._adj[u][v]
+        except KeyError as err:
+            raise NetworkXError(f"The edge {u}-{v} is not in the graph.") from err
+        # remove the edge with specified data
+        if key is None:
+            d.popitem()
+        else:
+            try:
+                del d[key]
+            except KeyError as err:
+                msg = f"The edge {u}-{v} with key {key} is not in the graph."
+                raise NetworkXError(msg) from err
+        if len(d) == 0:
+            # remove the key entries if last edge
+            del self._adj[u][v]
+            if u != v:  # check for selfloop
+                del self._adj[v][u]
+        nx._clear_cache(self)

     def remove_edges_from(self, ebunch):
         """Remove all edges specified in ebunch.
@@ -665,7 +751,12 @@ class MultiGraph(Graph):
         MultiEdgeView([(0, 1, 'x'), (0, 1, 'y')])

         """
-        pass
+        for e in ebunch:
+            try:
+                self.remove_edge(*e[:3])
+            except NetworkXError:
+                pass
+        nx._clear_cache(self)

     def has_edge(self, u, v, key=None):
         """Returns True if the graph has an edge between nodes u and v.
@@ -719,7 +810,13 @@ class MultiGraph(Graph):
         True

         """
-        pass
+        try:
+            if key is None:
+                return v in self._adj[u]
+            else:
+                return key in self._adj[u][v]
+        except KeyError:
+            return False

     @cached_property
     def edges(self):
@@ -795,7 +892,7 @@ class MultiGraph(Graph):
         >>> G.edges(0)
         MultiEdgeDataView([(0, 1)])
         """
-        pass
+        return MultiEdgeView(self)

     def get_edge_data(self, u, v, key=None, default=None):
         """Returns the attribute dictionary associated with edge (u, v,
@@ -866,7 +963,13 @@ class MultiGraph(Graph):
         >>> G.get_edge_data(1, 0, 0)  # specific key gives back
         {'weight': 5}
         """
-        pass
+        try:
+            if key is None:
+                return self._adj[u][v]
+            else:
+                return self._adj[u][v][key]
+        except KeyError:
+            return default

     @cached_property
     def degree(self):
@@ -906,15 +1009,15 @@ class MultiGraph(Graph):
         [(0, 1), (1, 2)]

         """
-        pass
+        return MultiDegreeView(self)

     def is_multigraph(self):
         """Returns True if graph is a multigraph, False otherwise."""
-        pass
+        return True

     def is_directed(self):
         """Returns True if graph is directed, False otherwise."""
-        pass
+        return False

     def copy(self, as_view=False):
         """Returns a copy of the graph.
@@ -993,7 +1096,18 @@ class MultiGraph(Graph):
         >>> H = G.copy()

         """
-        pass
+        if as_view is True:
+            return nx.graphviews.generic_graph_view(self)
+        G = self.__class__()
+        G.graph.update(self.graph)
+        G.add_nodes_from((n, d.copy()) for n, d in self._node.items())
+        G.add_edges_from(
+            (u, v, key, datadict.copy())
+            for u, nbrs in self._adj.items()
+            for v, keydict in nbrs.items()
+            for key, datadict in keydict.items()
+        )
+        return G

     def to_directed(self, as_view=False):
         """Returns a directed representation of the graph.
@@ -1041,7 +1155,20 @@ class MultiGraph(Graph):
         >>> list(H.edges)
         [(0, 1, 0)]
         """
-        pass
+        graph_class = self.to_directed_class()
+        if as_view is True:
+            return nx.graphviews.generic_graph_view(self, graph_class)
+        # deepcopy when not a view
+        G = graph_class()
+        G.graph.update(deepcopy(self.graph))
+        G.add_nodes_from((n, deepcopy(d)) for n, d in self._node.items())
+        G.add_edges_from(
+            (u, v, key, deepcopy(datadict))
+            for u, nbrs in self.adj.items()
+            for v, keydict in nbrs.items()
+            for key, datadict in keydict.items()
+        )
+        return G

     def to_undirected(self, as_view=False):
         """Returns an undirected copy of the graph.
@@ -1081,7 +1208,20 @@ class MultiGraph(Graph):
         >>> list(G2.edges)
         [(0, 1, 0), (0, 1, 1), (1, 2, 0)]
         """
-        pass
+        graph_class = self.to_undirected_class()
+        if as_view is True:
+            return nx.graphviews.generic_graph_view(self, graph_class)
+        # deepcopy when not a view
+        G = graph_class()
+        G.graph.update(deepcopy(self.graph))
+        G.add_nodes_from((n, deepcopy(d)) for n, d in self._node.items())
+        G.add_edges_from(
+            (u, v, key, deepcopy(datadict))
+            for u, nbrs in self._adj.items()
+            for v, keydict in nbrs.items()
+            for key, datadict in keydict.items()
+        )
+        return G

     def number_of_edges(self, u=None, v=None):
         """Returns the number of edges between two nodes.
@@ -1133,4 +1273,10 @@ class MultiGraph(Graph):
             1

         """
-        pass
+        if u is None:
+            return self.size()
+        try:
+            edgedata = self._adj[u][v]
+        except KeyError:
+            return 0  # no such edge
+        return len(edgedata)
diff --git a/networkx/classes/reportviews.py b/networkx/classes/reportviews.py
index 0ef60f043..5f9397f82 100644
--- a/networkx/classes/reportviews.py
+++ b/networkx/classes/reportviews.py
@@ -83,16 +83,36 @@ EdgeDataView
     The argument `nbunch` restricts edges to those incident to nodes in nbunch.
 """
 from collections.abc import Mapping, Set
-import networkx as nx
-__all__ = ['NodeView', 'NodeDataView', 'EdgeView', 'OutEdgeView',
-    'InEdgeView', 'EdgeDataView', 'OutEdgeDataView', 'InEdgeDataView',
-    'MultiEdgeView', 'OutMultiEdgeView', 'InMultiEdgeView',
-    'MultiEdgeDataView', 'OutMultiEdgeDataView', 'InMultiEdgeDataView',
-    'DegreeView', 'DiDegreeView', 'InDegreeView', 'OutDegreeView',
-    'MultiDegreeView', 'DiMultiDegreeView', 'InMultiDegreeView',
-    'OutMultiDegreeView']

+import networkx as nx

+__all__ = [
+    "NodeView",
+    "NodeDataView",
+    "EdgeView",
+    "OutEdgeView",
+    "InEdgeView",
+    "EdgeDataView",
+    "OutEdgeDataView",
+    "InEdgeDataView",
+    "MultiEdgeView",
+    "OutMultiEdgeView",
+    "InMultiEdgeView",
+    "MultiEdgeDataView",
+    "OutMultiEdgeDataView",
+    "InMultiEdgeDataView",
+    "DegreeView",
+    "DiDegreeView",
+    "InDegreeView",
+    "OutDegreeView",
+    "MultiDegreeView",
+    "DiMultiDegreeView",
+    "InMultiDegreeView",
+    "OutMultiDegreeView",
+]
+
+
+# NodeViews
 class NodeView(Mapping, Set):
     """A NodeView class to act as G.nodes for a NetworkX Graph

@@ -146,17 +166,19 @@ class NodeView(Mapping, Set):
     >>> NVdata[2] == NV[2]  # NVdata gets 'color', NV gets datadict
     False
     """
-    __slots__ = '_nodes',
+
+    __slots__ = ("_nodes",)

     def __getstate__(self):
-        return {'_nodes': self._nodes}
+        return {"_nodes": self._nodes}

     def __setstate__(self, state):
-        self._nodes = state['_nodes']
+        self._nodes = state["_nodes"]

     def __init__(self, graph):
         self._nodes = graph._node

+    # Mapping methods
     def __len__(self):
         return len(self._nodes)

@@ -166,13 +188,20 @@ class NodeView(Mapping, Set):
     def __getitem__(self, n):
         if isinstance(n, slice):
             raise nx.NetworkXError(
-                f'{type(self).__name__} does not support slicing, try list(G.nodes)[{n.start}:{n.stop}:{n.step}]'
-                )
+                f"{type(self).__name__} does not support slicing, "
+                f"try list(G.nodes)[{n.start}:{n.stop}:{n.step}]"
+            )
         return self._nodes[n]

+    # Set methods
     def __contains__(self, n):
         return n in self._nodes

+    @classmethod
+    def _from_iterable(cls, it):
+        return set(it)
+
+    # DataView method
     def __call__(self, data=False, default=None):
         if data is False:
             return self
@@ -242,13 +271,15 @@ class NodeView(Mapping, Set):
         >>> G.nodes.data("height")
         NodeDataView({0: None, 1: None, 2: None}, data='height')
         """
-        pass
+        if data is False:
+            return self
+        return NodeDataView(self._nodes, data, default)

     def __str__(self):
         return str(list(self))

     def __repr__(self):
-        return f'{self.__class__.__name__}({tuple(self)})'
+        return f"{self.__class__.__name__}({tuple(self)})"


 class NodeDataView(Set):
@@ -268,22 +299,32 @@ class NodeDataView(Set):
     data : bool or string (default=False)
     default : object (default=None)
     """
-    __slots__ = '_nodes', '_data', '_default'
+
+    __slots__ = ("_nodes", "_data", "_default")

     def __getstate__(self):
-        return {'_nodes': self._nodes, '_data': self._data, '_default':
-            self._default}
+        return {"_nodes": self._nodes, "_data": self._data, "_default": self._default}

     def __setstate__(self, state):
-        self._nodes = state['_nodes']
-        self._data = state['_data']
-        self._default = state['_default']
+        self._nodes = state["_nodes"]
+        self._data = state["_data"]
+        self._default = state["_default"]

     def __init__(self, nodedict, data=False, default=None):
         self._nodes = nodedict
         self._data = data
         self._default = default

+    @classmethod
+    def _from_iterable(cls, it):
+        try:
+            return set(it)
+        except TypeError as err:
+            if "unhashable" in str(err):
+                msg = " : Could be b/c data=True or your values are unhashable"
+                raise TypeError(str(err) + msg) from err
+            raise
+
     def __len__(self):
         return len(self._nodes)

@@ -293,8 +334,10 @@ class NodeDataView(Set):
             return iter(self._nodes)
         if data is True:
             return iter(self._nodes.items())
-        return ((n, dd[data] if data in dd else self._default) for n, dd in
-            self._nodes.items())
+        return (
+            (n, dd[data] if data in dd else self._default)
+            for n, dd in self._nodes.items()
+        )

     def __contains__(self, n):
         try:
@@ -313,8 +356,9 @@ class NodeDataView(Set):
     def __getitem__(self, n):
         if isinstance(n, slice):
             raise nx.NetworkXError(
-                f'{type(self).__name__} does not support slicing, try list(G.nodes.data())[{n.start}:{n.stop}:{n.step}]'
-                )
+                f"{type(self).__name__} does not support slicing, "
+                f"try list(G.nodes.data())[{n.start}:{n.stop}:{n.step}]"
+            )
         ddict = self._nodes[n]
         data = self._data
         if data is False or data is True:
@@ -327,12 +371,13 @@ class NodeDataView(Set):
     def __repr__(self):
         name = self.__class__.__name__
         if self._data is False:
-            return f'{name}({tuple(self)})'
+            return f"{name}({tuple(self)})"
         if self._data is True:
-            return f'{name}({dict(self)})'
-        return f'{name}({dict(self)}, data={self._data!r})'
+            return f"{name}({dict(self)})"
+        return f"{name}({dict(self)}, data={self._data!r})"


+# DegreeViews
 class DiDegreeView:
     """A View class for degree of nodes in a NetworkX Graph

@@ -373,10 +418,9 @@ class DiDegreeView:

     def __init__(self, G, nbunch=None, weight=None):
         self._graph = G
-        self._succ = G._succ if hasattr(G, '_succ') else G._adj
-        self._pred = G._pred if hasattr(G, '_pred') else G._adj
-        self._nodes = self._succ if nbunch is None else list(G.nbunch_iter(
-            nbunch))
+        self._succ = G._succ if hasattr(G, "_succ") else G._adj
+        self._pred = G._pred if hasattr(G, "_pred") else G._adj
+        self._nodes = self._succ if nbunch is None else list(G.nbunch_iter(nbunch))
         self._weight = weight

     def __call__(self, nbunch=None, weight=None):
@@ -399,8 +443,9 @@ class DiDegreeView:
         preds = self._pred[n]
         if weight is None:
             return len(succs) + len(preds)
-        return sum(dd.get(weight, 1) for dd in succs.values()) + sum(dd.get
-            (weight, 1) for dd in preds.values())
+        return sum(dd.get(weight, 1) for dd in succs.values()) + sum(
+            dd.get(weight, 1) for dd in preds.values()
+        )

     def __iter__(self):
         weight = self._weight
@@ -408,14 +453,15 @@ class DiDegreeView:
             for n in self._nodes:
                 succs = self._succ[n]
                 preds = self._pred[n]
-                yield n, len(succs) + len(preds)
+                yield (n, len(succs) + len(preds))
         else:
             for n in self._nodes:
                 succs = self._succ[n]
                 preds = self._pred[n]
                 deg = sum(dd.get(weight, 1) for dd in succs.values()) + sum(
-                    dd.get(weight, 1) for dd in preds.values())
-                yield n, deg
+                    dd.get(weight, 1) for dd in preds.values()
+                )
+                yield (n, deg)

     def __len__(self):
         return len(self._nodes)
@@ -424,7 +470,7 @@ class DiDegreeView:
         return str(list(self))

     def __repr__(self):
-        return f'{self.__class__.__name__}({dict(self)})'
+        return f"{self.__class__.__name__}({dict(self)})"


 class DegreeView(DiDegreeView):
@@ -476,21 +522,23 @@ class DegreeView(DiDegreeView):
         nbrs = self._succ[n]
         if weight is None:
             return len(nbrs) + (n in nbrs)
-        return sum(dd.get(weight, 1) for dd in nbrs.values()) + (n in nbrs and
-            nbrs[n].get(weight, 1))
+        return sum(dd.get(weight, 1) for dd in nbrs.values()) + (
+            n in nbrs and nbrs[n].get(weight, 1)
+        )

     def __iter__(self):
         weight = self._weight
         if weight is None:
             for n in self._nodes:
                 nbrs = self._succ[n]
-                yield n, len(nbrs) + (n in nbrs)
+                yield (n, len(nbrs) + (n in nbrs))
         else:
             for n in self._nodes:
                 nbrs = self._succ[n]
-                deg = sum(dd.get(weight, 1) for dd in nbrs.values()) + (n in
-                    nbrs and nbrs[n].get(weight, 1))
-                yield n, deg
+                deg = sum(dd.get(weight, 1) for dd in nbrs.values()) + (
+                    n in nbrs and nbrs[n].get(weight, 1)
+                )
+                yield (n, deg)


 class OutDegreeView(DiDegreeView):
@@ -508,12 +556,12 @@ class OutDegreeView(DiDegreeView):
         if weight is None:
             for n in self._nodes:
                 succs = self._succ[n]
-                yield n, len(succs)
+                yield (n, len(succs))
         else:
             for n in self._nodes:
                 succs = self._succ[n]
                 deg = sum(dd.get(weight, 1) for dd in succs.values())
-                yield n, deg
+                yield (n, deg)


 class InDegreeView(DiDegreeView):
@@ -531,12 +579,12 @@ class InDegreeView(DiDegreeView):
         if weight is None:
             for n in self._nodes:
                 preds = self._pred[n]
-                yield n, len(preds)
+                yield (n, len(preds))
         else:
             for n in self._nodes:
                 preds = self._pred[n]
                 deg = sum(dd.get(weight, 1) for dd in preds.values())
-                yield n, deg
+                yield (n, deg)


 class MultiDegreeView(DiDegreeView):
@@ -546,10 +594,13 @@ class MultiDegreeView(DiDegreeView):
         weight = self._weight
         nbrs = self._succ[n]
         if weight is None:
-            return sum(len(keys) for keys in nbrs.values()) + (n in nbrs and
-                len(nbrs[n]))
-        deg = sum(d.get(weight, 1) for key_dict in nbrs.values() for d in
-            key_dict.values())
+            return sum(len(keys) for keys in nbrs.values()) + (
+                n in nbrs and len(nbrs[n])
+            )
+        # edge weighted graph - degree is sum of nbr edge weights
+        deg = sum(
+            d.get(weight, 1) for key_dict in nbrs.values() for d in key_dict.values()
+        )
         if n in nbrs:
             deg += sum(d.get(weight, 1) for d in nbrs[n].values())
         return deg
@@ -559,17 +610,21 @@ class MultiDegreeView(DiDegreeView):
         if weight is None:
             for n in self._nodes:
                 nbrs = self._succ[n]
-                deg = sum(len(keys) for keys in nbrs.values()) + (n in nbrs and
-                    len(nbrs[n]))
-                yield n, deg
+                deg = sum(len(keys) for keys in nbrs.values()) + (
+                    n in nbrs and len(nbrs[n])
+                )
+                yield (n, deg)
         else:
             for n in self._nodes:
                 nbrs = self._succ[n]
-                deg = sum(d.get(weight, 1) for key_dict in nbrs.values() for
-                    d in key_dict.values())
+                deg = sum(
+                    d.get(weight, 1)
+                    for key_dict in nbrs.values()
+                    for d in key_dict.values()
+                )
                 if n in nbrs:
                     deg += sum(d.get(weight, 1) for d in nbrs[n].values())
-                yield n, deg
+                yield (n, deg)


 class DiMultiDegreeView(DiDegreeView):
@@ -580,11 +635,15 @@ class DiMultiDegreeView(DiDegreeView):
         succs = self._succ[n]
         preds = self._pred[n]
         if weight is None:
-            return sum(len(keys) for keys in succs.values()) + sum(len(keys
-                ) for keys in preds.values())
-        deg = sum(d.get(weight, 1) for key_dict in succs.values() for d in
-            key_dict.values()) + sum(d.get(weight, 1) for key_dict in preds
-            .values() for d in key_dict.values())
+            return sum(len(keys) for keys in succs.values()) + sum(
+                len(keys) for keys in preds.values()
+            )
+        # edge weighted graph - degree is sum of nbr edge weights
+        deg = sum(
+            d.get(weight, 1) for key_dict in succs.values() for d in key_dict.values()
+        ) + sum(
+            d.get(weight, 1) for key_dict in preds.values() for d in key_dict.values()
+        )
         return deg

     def __iter__(self):
@@ -593,17 +652,24 @@ class DiMultiDegreeView(DiDegreeView):
             for n in self._nodes:
                 succs = self._succ[n]
                 preds = self._pred[n]
-                deg = sum(len(keys) for keys in succs.values()) + sum(len(
-                    keys) for keys in preds.values())
-                yield n, deg
+                deg = sum(len(keys) for keys in succs.values()) + sum(
+                    len(keys) for keys in preds.values()
+                )
+                yield (n, deg)
         else:
             for n in self._nodes:
                 succs = self._succ[n]
                 preds = self._pred[n]
-                deg = sum(d.get(weight, 1) for key_dict in succs.values() for
-                    d in key_dict.values()) + sum(d.get(weight, 1) for
-                    key_dict in preds.values() for d in key_dict.values())
-                yield n, deg
+                deg = sum(
+                    d.get(weight, 1)
+                    for key_dict in succs.values()
+                    for d in key_dict.values()
+                ) + sum(
+                    d.get(weight, 1)
+                    for key_dict in preds.values()
+                    for d in key_dict.values()
+                )
+                yield (n, deg)


 class InMultiDegreeView(DiDegreeView):
@@ -614,8 +680,10 @@ class InMultiDegreeView(DiDegreeView):
         nbrs = self._pred[n]
         if weight is None:
             return sum(len(data) for data in nbrs.values())
-        return sum(d.get(weight, 1) for key_dict in nbrs.values() for d in
-            key_dict.values())
+        # edge weighted graph - degree is sum of nbr edge weights
+        return sum(
+            d.get(weight, 1) for key_dict in nbrs.values() for d in key_dict.values()
+        )

     def __iter__(self):
         weight = self._weight
@@ -623,13 +691,16 @@ class InMultiDegreeView(DiDegreeView):
             for n in self._nodes:
                 nbrs = self._pred[n]
                 deg = sum(len(data) for data in nbrs.values())
-                yield n, deg
+                yield (n, deg)
         else:
             for n in self._nodes:
                 nbrs = self._pred[n]
-                deg = sum(d.get(weight, 1) for key_dict in nbrs.values() for
-                    d in key_dict.values())
-                yield n, deg
+                deg = sum(
+                    d.get(weight, 1)
+                    for key_dict in nbrs.values()
+                    for d in key_dict.values()
+                )
+                yield (n, deg)


 class OutMultiDegreeView(DiDegreeView):
@@ -640,8 +711,10 @@ class OutMultiDegreeView(DiDegreeView):
         nbrs = self._succ[n]
         if weight is None:
             return sum(len(data) for data in nbrs.values())
-        return sum(d.get(weight, 1) for key_dict in nbrs.values() for d in
-            key_dict.values())
+        # edge weighted graph - degree is sum of nbr edge weights
+        return sum(
+            d.get(weight, 1) for key_dict in nbrs.values() for d in key_dict.values()
+        )

     def __iter__(self):
         weight = self._weight
@@ -649,23 +722,39 @@ class OutMultiDegreeView(DiDegreeView):
             for n in self._nodes:
                 nbrs = self._succ[n]
                 deg = sum(len(data) for data in nbrs.values())
-                yield n, deg
+                yield (n, deg)
         else:
             for n in self._nodes:
                 nbrs = self._succ[n]
-                deg = sum(d.get(weight, 1) for key_dict in nbrs.values() for
-                    d in key_dict.values())
-                yield n, deg
+                deg = sum(
+                    d.get(weight, 1)
+                    for key_dict in nbrs.values()
+                    for d in key_dict.values()
+                )
+                yield (n, deg)


+# EdgeDataViews
 class OutEdgeDataView:
     """EdgeDataView for outward edges of DiGraph; See EdgeDataView"""
-    __slots__ = ('_viewer', '_nbunch', '_data', '_default', '_adjdict',
-        '_nodes_nbrs', '_report')
+
+    __slots__ = (
+        "_viewer",
+        "_nbunch",
+        "_data",
+        "_default",
+        "_adjdict",
+        "_nodes_nbrs",
+        "_report",
+    )

     def __getstate__(self):
-        return {'viewer': self._viewer, 'nbunch': self._nbunch, 'data':
-            self._data, 'default': self._default}
+        return {
+            "viewer": self._viewer,
+            "nbunch": self._nbunch,
+            "data": self._data,
+            "default": self._default,
+        }

     def __setstate__(self, state):
         self.__init__(**state)
@@ -676,30 +765,38 @@ class OutEdgeDataView:
         if nbunch is None:
             self._nodes_nbrs = adjdict.items
         else:
+            # dict retains order of nodes but acts like a set
             nbunch = dict.fromkeys(viewer._graph.nbunch_iter(nbunch))
-            self._nodes_nbrs = lambda : [(n, adjdict[n]) for n in nbunch]
+            self._nodes_nbrs = lambda: [(n, adjdict[n]) for n in nbunch]
         self._nbunch = nbunch
         self._data = data
         self._default = default
+        # Set _report based on data and default
         if data is True:
             self._report = lambda n, nbr, dd: (n, nbr, dd)
         elif data is False:
             self._report = lambda n, nbr, dd: (n, nbr)
-        else:
-            self._report = lambda n, nbr, dd: (n, nbr, dd[data]
-                ) if data in dd else (n, nbr, default)
+        else:  # data is attribute name
+            self._report = (
+                lambda n, nbr, dd: (n, nbr, dd[data])
+                if data in dd
+                else (n, nbr, default)
+            )

     def __len__(self):
         return sum(len(nbrs) for n, nbrs in self._nodes_nbrs())

     def __iter__(self):
-        return (self._report(n, nbr, dd) for n, nbrs in self._nodes_nbrs() for
-            nbr, dd in nbrs.items())
+        return (
+            self._report(n, nbr, dd)
+            for n, nbrs in self._nodes_nbrs()
+            for nbr, dd in nbrs.items()
+        )

     def __contains__(self, e):
         u, v = e[:2]
         if self._nbunch is not None and u not in self._nbunch:
-            return False
+            return False  # this edge doesn't start in nbunch
         try:
             ddict = self._adjdict[u][v]
         except KeyError:
@@ -710,7 +807,7 @@ class OutEdgeDataView:
         return str(list(self))

     def __repr__(self):
-        return f'{self.__class__.__name__}({list(self)})'
+        return f"{self.__class__.__name__}({list(self)})"


 class EdgeDataView(OutEdgeDataView):
@@ -741,6 +838,7 @@ class EdgeDataView(OutEdgeDataView):
     [(0, 1, 'biz'), (1, 2, 'bar')]
     >>> assert (0, 1, "biz") in G.edges(data="foo", default="biz")
     """
+
     __slots__ = ()

     def __len__(self):
@@ -757,9 +855,8 @@ class EdgeDataView(OutEdgeDataView):

     def __contains__(self, e):
         u, v = e[:2]
-        if (self._nbunch is not None and u not in self._nbunch and v not in
-            self._nbunch):
-            return False
+        if self._nbunch is not None and u not in self._nbunch and v not in self._nbunch:
+            return False  # this edge doesn't start and it doesn't end in nbunch
         try:
             ddict = self._adjdict[u][v]
         except KeyError:
@@ -769,16 +866,20 @@ class EdgeDataView(OutEdgeDataView):

 class InEdgeDataView(OutEdgeDataView):
     """An EdgeDataView class for outward edges of DiGraph; See EdgeDataView"""
+
     __slots__ = ()

     def __iter__(self):
-        return (self._report(nbr, n, dd) for n, nbrs in self._nodes_nbrs() for
-            nbr, dd in nbrs.items())
+        return (
+            self._report(nbr, n, dd)
+            for n, nbrs in self._nodes_nbrs()
+            for nbr, dd in nbrs.items()
+        )

     def __contains__(self, e):
         u, v = e[:2]
         if self._nbunch is not None and v not in self._nbunch:
-            return False
+            return False  # this edge doesn't end in nbunch
         try:
             ddict = self._adjdict[v][u]
         except KeyError:
@@ -788,28 +889,35 @@ class InEdgeDataView(OutEdgeDataView):

 class OutMultiEdgeDataView(OutEdgeDataView):
     """An EdgeDataView for outward edges of MultiDiGraph; See EdgeDataView"""
-    __slots__ = 'keys',
+
+    __slots__ = ("keys",)

     def __getstate__(self):
-        return {'viewer': self._viewer, 'nbunch': self._nbunch, 'keys':
-            self.keys, 'data': self._data, 'default': self._default}
+        return {
+            "viewer": self._viewer,
+            "nbunch": self._nbunch,
+            "keys": self.keys,
+            "data": self._data,
+            "default": self._default,
+        }

     def __setstate__(self, state):
         self.__init__(**state)

-    def __init__(self, viewer, nbunch=None, data=False, *, default=None,
-        keys=False):
+    def __init__(self, viewer, nbunch=None, data=False, *, default=None, keys=False):
         self._viewer = viewer
         adjdict = self._adjdict = viewer._adjdict
         self.keys = keys
         if nbunch is None:
             self._nodes_nbrs = adjdict.items
         else:
+            # dict retains order of nodes but acts like a set
             nbunch = dict.fromkeys(viewer._graph.nbunch_iter(nbunch))
-            self._nodes_nbrs = lambda : [(n, adjdict[n]) for n in nbunch]
+            self._nodes_nbrs = lambda: [(n, adjdict[n]) for n in nbunch]
         self._nbunch = nbunch
         self._data = data
         self._default = default
+        # Set _report based on data and default
         if data is True:
             if keys is True:
                 self._report = lambda n, nbr, k, dd: (n, nbr, k, dd)
@@ -820,24 +928,35 @@ class OutMultiEdgeDataView(OutEdgeDataView):
                 self._report = lambda n, nbr, k, dd: (n, nbr, k)
             else:
                 self._report = lambda n, nbr, k, dd: (n, nbr)
-        elif keys is True:
-            self._report = lambda n, nbr, k, dd: (n, nbr, k, dd[data]
-                ) if data in dd else (n, nbr, k, default)
-        else:
-            self._report = lambda n, nbr, k, dd: (n, nbr, dd[data]
-                ) if data in dd else (n, nbr, default)
+        else:  # data is attribute name
+            if keys is True:
+                self._report = (
+                    lambda n, nbr, k, dd: (n, nbr, k, dd[data])
+                    if data in dd
+                    else (n, nbr, k, default)
+                )
+            else:
+                self._report = (
+                    lambda n, nbr, k, dd: (n, nbr, dd[data])
+                    if data in dd
+                    else (n, nbr, default)
+                )

     def __len__(self):
         return sum(1 for e in self)

     def __iter__(self):
-        return (self._report(n, nbr, k, dd) for n, nbrs in self._nodes_nbrs
-            () for nbr, kd in nbrs.items() for k, dd in kd.items())
+        return (
+            self._report(n, nbr, k, dd)
+            for n, nbrs in self._nodes_nbrs()
+            for nbr, kd in nbrs.items()
+            for k, dd in kd.items()
+        )

     def __contains__(self, e):
         u, v = e[:2]
         if self._nbunch is not None and u not in self._nbunch:
-            return False
+            return False  # this edge doesn't start in nbunch
         try:
             kdict = self._adjdict[u][v]
         except KeyError:
@@ -854,6 +973,7 @@ class OutMultiEdgeDataView(OutEdgeDataView):

 class MultiEdgeDataView(OutMultiEdgeDataView):
     """An EdgeDataView class for edges of MultiGraph; See EdgeDataView"""
+
     __slots__ = ()

     def __iter__(self):
@@ -868,9 +988,8 @@ class MultiEdgeDataView(OutMultiEdgeDataView):

     def __contains__(self, e):
         u, v = e[:2]
-        if (self._nbunch is not None and u not in self._nbunch and v not in
-            self._nbunch):
-            return False
+        if self._nbunch is not None and u not in self._nbunch and v not in self._nbunch:
+            return False  # this edge doesn't start and doesn't end in nbunch
         try:
             kdict = self._adjdict[u][v]
         except KeyError:
@@ -890,16 +1009,21 @@ class MultiEdgeDataView(OutMultiEdgeDataView):

 class InMultiEdgeDataView(OutMultiEdgeDataView):
     """An EdgeDataView for inward edges of MultiDiGraph; See EdgeDataView"""
+
     __slots__ = ()

     def __iter__(self):
-        return (self._report(nbr, n, k, dd) for n, nbrs in self._nodes_nbrs
-            () for nbr, kd in nbrs.items() for k, dd in kd.items())
+        return (
+            self._report(nbr, n, k, dd)
+            for n, nbrs in self._nodes_nbrs()
+            for nbr, kd in nbrs.items()
+            for k, dd in kd.items()
+        )

     def __contains__(self, e):
         u, v = e[:2]
         if self._nbunch is not None and v not in self._nbunch:
-            return False
+            return False  # this edge doesn't end in nbunch
         try:
             kdict = self._adjdict[v][u]
         except KeyError:
@@ -911,31 +1035,39 @@ class InMultiEdgeDataView(OutMultiEdgeDataView):
         return any(e == self._report(u, v, k, dd) for k, dd in kdict.items())


+# EdgeViews    have set operations and no data reported
 class OutEdgeView(Set, Mapping):
     """A EdgeView class for outward edges of a DiGraph"""
-    __slots__ = '_adjdict', '_graph', '_nodes_nbrs'
+
+    __slots__ = ("_adjdict", "_graph", "_nodes_nbrs")

     def __getstate__(self):
-        return {'_graph': self._graph, '_adjdict': self._adjdict}
+        return {"_graph": self._graph, "_adjdict": self._adjdict}

     def __setstate__(self, state):
-        self._graph = state['_graph']
-        self._adjdict = state['_adjdict']
+        self._graph = state["_graph"]
+        self._adjdict = state["_adjdict"]
         self._nodes_nbrs = self._adjdict.items
+
+    @classmethod
+    def _from_iterable(cls, it):
+        return set(it)
+
     dataview = OutEdgeDataView

     def __init__(self, G):
         self._graph = G
-        self._adjdict = G._succ if hasattr(G, 'succ') else G._adj
+        self._adjdict = G._succ if hasattr(G, "succ") else G._adj
         self._nodes_nbrs = self._adjdict.items

+    # Set methods
     def __len__(self):
         return sum(len(nbrs) for n, nbrs in self._nodes_nbrs())

     def __iter__(self):
         for n, nbrs in self._nodes_nbrs():
             for nbr in nbrs:
-                yield n, nbr
+                yield (n, nbr)

     def __contains__(self, e):
         try:
@@ -944,17 +1076,20 @@ class OutEdgeView(Set, Mapping):
         except KeyError:
             return False

+    # Mapping Methods
     def __getitem__(self, e):
         if isinstance(e, slice):
             raise nx.NetworkXError(
-                f'{type(self).__name__} does not support slicing, try list(G.edges)[{e.start}:{e.stop}:{e.step}]'
-                )
+                f"{type(self).__name__} does not support slicing, "
+                f"try list(G.edges)[{e.start}:{e.stop}:{e.step}]"
+            )
         u, v = e
         try:
             return self._adjdict[u][v]
-        except KeyError as ex:
-            raise KeyError(f'The edge {e} is not in the graph.')
+        except KeyError as ex:  # Customize msg to indicate exception origin
+            raise KeyError(f"The edge {e} is not in the graph.")

+    # EdgeDataView methods
     def __call__(self, nbunch=None, data=False, *, default=None):
         if nbunch is None and data is False:
             return self
@@ -1036,13 +1171,16 @@ class OutEdgeView(Set, Mapping):
         >>> G.edges.data("speed")
         EdgeDataView([(0, 1, None), (0, 2, None), (1, 2, None)])
         """
-        pass
+        if nbunch is None and data is False:
+            return self
+        return self.dataview(self, nbunch, data, default=default)

+    # String Methods
     def __str__(self):
         return str(list(self))

     def __repr__(self):
-        return f'{self.__class__.__name__}({list(self)})'
+        return f"{self.__class__.__name__}({list(self)})"


 class EdgeView(OutEdgeView):
@@ -1115,7 +1253,9 @@ class EdgeView(OutEdgeView):
     (2, 3, 0)
     (2, 3, 1)
     """
+
     __slots__ = ()
+
     dataview = EdgeDataView

     def __len__(self):
@@ -1127,7 +1267,7 @@ class EdgeView(OutEdgeView):
         for n, nbrs in self._nodes_nbrs():
             for nbr in list(nbrs):
                 if nbr not in seen:
-                    yield n, nbr
+                    yield (n, nbr)
             seen[n] = 1
         del seen

@@ -1141,23 +1281,25 @@ class EdgeView(OutEdgeView):

 class InEdgeView(OutEdgeView):
     """A EdgeView class for inward edges of a DiGraph"""
+
     __slots__ = ()

     def __setstate__(self, state):
-        self._graph = state['_graph']
-        self._adjdict = state['_adjdict']
+        self._graph = state["_graph"]
+        self._adjdict = state["_adjdict"]
         self._nodes_nbrs = self._adjdict.items
+
     dataview = InEdgeDataView

     def __init__(self, G):
         self._graph = G
-        self._adjdict = G._pred if hasattr(G, 'pred') else G._adj
+        self._adjdict = G._pred if hasattr(G, "pred") else G._adj
         self._nodes_nbrs = self._adjdict.items

     def __iter__(self):
         for n, nbrs in self._nodes_nbrs():
             for nbr in nbrs:
-                yield nbr, n
+                yield (nbr, n)

     def __contains__(self, e):
         try:
@@ -1169,26 +1311,30 @@ class InEdgeView(OutEdgeView):
     def __getitem__(self, e):
         if isinstance(e, slice):
             raise nx.NetworkXError(
-                f'{type(self).__name__} does not support slicing, try list(G.in_edges)[{e.start}:{e.stop}:{e.step}]'
-                )
+                f"{type(self).__name__} does not support slicing, "
+                f"try list(G.in_edges)[{e.start}:{e.stop}:{e.step}]"
+            )
         u, v = e
         return self._adjdict[v][u]


 class OutMultiEdgeView(OutEdgeView):
     """A EdgeView class for outward edges of a MultiDiGraph"""
+
     __slots__ = ()
+
     dataview = OutMultiEdgeDataView

     def __len__(self):
-        return sum(len(kdict) for n, nbrs in self._nodes_nbrs() for nbr,
-            kdict in nbrs.items())
+        return sum(
+            len(kdict) for n, nbrs in self._nodes_nbrs() for nbr, kdict in nbrs.items()
+        )

     def __iter__(self):
         for n, nbrs in self._nodes_nbrs():
             for nbr, kdict in nbrs.items():
                 for key in kdict:
-                    yield n, nbr, key
+                    yield (n, nbr, key)

     def __contains__(self, e):
         N = len(e)
@@ -1198,7 +1344,7 @@ class OutMultiEdgeView(OutEdgeView):
             u, v = e
             k = 0
         else:
-            raise ValueError('MultiEdge must have length 2 or 3')
+            raise ValueError("MultiEdge must have length 2 or 3")
         try:
             return k in self._adjdict[u][v]
         except KeyError:
@@ -1207,8 +1353,9 @@ class OutMultiEdgeView(OutEdgeView):
     def __getitem__(self, e):
         if isinstance(e, slice):
             raise nx.NetworkXError(
-                f'{type(self).__name__} does not support slicing, try list(G.edges)[{e.start}:{e.stop}:{e.step}]'
-                )
+                f"{type(self).__name__} does not support slicing, "
+                f"try list(G.edges)[{e.start}:{e.stop}:{e.step}]"
+            )
         u, v, k = e
         return self._adjdict[u][v][k]

@@ -1217,10 +1364,17 @@ class OutMultiEdgeView(OutEdgeView):
             return self
         return self.dataview(self, nbunch, data, default=default, keys=keys)

+    def data(self, data=True, default=None, nbunch=None, keys=False):
+        if nbunch is None and data is False and keys is True:
+            return self
+        return self.dataview(self, nbunch, data, default=default, keys=keys)
+

 class MultiEdgeView(OutMultiEdgeView):
     """A EdgeView class for edges of a MultiGraph"""
+
     __slots__ = ()
+
     dataview = MultiEdgeDataView

     def __len__(self):
@@ -1232,31 +1386,33 @@ class MultiEdgeView(OutMultiEdgeView):
             for nbr, kd in nbrs.items():
                 if nbr not in seen:
                     for k, dd in kd.items():
-                        yield n, nbr, k
+                        yield (n, nbr, k)
             seen[n] = 1
         del seen


 class InMultiEdgeView(OutMultiEdgeView):
     """A EdgeView class for inward edges of a MultiDiGraph"""
+
     __slots__ = ()

     def __setstate__(self, state):
-        self._graph = state['_graph']
-        self._adjdict = state['_adjdict']
+        self._graph = state["_graph"]
+        self._adjdict = state["_adjdict"]
         self._nodes_nbrs = self._adjdict.items
+
     dataview = InMultiEdgeDataView

     def __init__(self, G):
         self._graph = G
-        self._adjdict = G._pred if hasattr(G, 'pred') else G._adj
+        self._adjdict = G._pred if hasattr(G, "pred") else G._adj
         self._nodes_nbrs = self._adjdict.items

     def __iter__(self):
         for n, nbrs in self._nodes_nbrs():
             for nbr, kdict in nbrs.items():
                 for key in kdict:
-                    yield nbr, n, key
+                    yield (nbr, n, key)

     def __contains__(self, e):
         N = len(e)
@@ -1266,7 +1422,7 @@ class InMultiEdgeView(OutMultiEdgeView):
             u, v = e
             k = 0
         else:
-            raise ValueError('MultiEdge must have length 2 or 3')
+            raise ValueError("MultiEdge must have length 2 or 3")
         try:
             return k in self._adjdict[v][u]
         except KeyError:
@@ -1275,7 +1431,8 @@ class InMultiEdgeView(OutMultiEdgeView):
     def __getitem__(self, e):
         if isinstance(e, slice):
             raise nx.NetworkXError(
-                f'{type(self).__name__} does not support slicing, try list(G.in_edges)[{e.start}:{e.stop}:{e.step}]'
-                )
+                f"{type(self).__name__} does not support slicing, "
+                f"try list(G.in_edges)[{e.start}:{e.stop}:{e.step}]"
+            )
         u, v, k = e
         return self._adjdict[v][u][k]
diff --git a/networkx/convert.py b/networkx/convert.py
index 9d6a1aa8a..7cc8fe401 100644
--- a/networkx/convert.py
+++ b/networkx/convert.py
@@ -17,9 +17,18 @@ nx_agraph, nx_pydot
 """
 import warnings
 from collections.abc import Collection, Generator, Iterator
+
 import networkx as nx
-__all__ = ['to_networkx_graph', 'from_dict_of_dicts', 'to_dict_of_dicts',
-    'from_dict_of_lists', 'to_dict_of_lists', 'from_edgelist', 'to_edgelist']
+
+__all__ = [
+    "to_networkx_graph",
+    "from_dict_of_dicts",
+    "to_dict_of_dicts",
+    "from_dict_of_lists",
+    "to_dict_of_lists",
+    "from_edgelist",
+    "to_edgelist",
+]


 def to_networkx_graph(data, create_using=None, multigraph_input=False):
@@ -61,7 +70,110 @@ def to_networkx_graph(data, create_using=None, multigraph_input=False):
         a multigraph from a multigraph.

     """
-    pass
+    # NX graph
+    if hasattr(data, "adj"):
+        try:
+            result = from_dict_of_dicts(
+                data.adj,
+                create_using=create_using,
+                multigraph_input=data.is_multigraph(),
+            )
+            # data.graph should be dict-like
+            result.graph.update(data.graph)
+            # data.nodes should be dict-like
+            # result.add_node_from(data.nodes.items()) possible but
+            # for custom node_attr_dict_factory which may be hashable
+            # will be unexpected behavior
+            for n, dd in data.nodes.items():
+                result._node[n].update(dd)
+            return result
+        except Exception as err:
+            raise nx.NetworkXError("Input is not a correct NetworkX graph.") from err
+
+    # pygraphviz  agraph
+    if hasattr(data, "is_strict"):
+        try:
+            return nx.nx_agraph.from_agraph(data, create_using=create_using)
+        except Exception as err:
+            raise nx.NetworkXError("Input is not a correct pygraphviz graph.") from err
+
+    # dict of dicts/lists
+    if isinstance(data, dict):
+        try:
+            return from_dict_of_dicts(
+                data, create_using=create_using, multigraph_input=multigraph_input
+            )
+        except Exception as err1:
+            if multigraph_input is True:
+                raise nx.NetworkXError(
+                    f"converting multigraph_input raised:\n{type(err1)}: {err1}"
+                )
+            try:
+                return from_dict_of_lists(data, create_using=create_using)
+            except Exception as err2:
+                raise TypeError("Input is not known type.") from err2
+
+    # Pandas DataFrame
+    try:
+        import pandas as pd
+
+        if isinstance(data, pd.DataFrame):
+            if data.shape[0] == data.shape[1]:
+                try:
+                    return nx.from_pandas_adjacency(data, create_using=create_using)
+                except Exception as err:
+                    msg = "Input is not a correct Pandas DataFrame adjacency matrix."
+                    raise nx.NetworkXError(msg) from err
+            else:
+                try:
+                    return nx.from_pandas_edgelist(
+                        data, edge_attr=True, create_using=create_using
+                    )
+                except Exception as err:
+                    msg = "Input is not a correct Pandas DataFrame edge-list."
+                    raise nx.NetworkXError(msg) from err
+    except ImportError:
+        warnings.warn("pandas not found, skipping conversion test.", ImportWarning)
+
+    # numpy array
+    try:
+        import numpy as np
+
+        if isinstance(data, np.ndarray):
+            try:
+                return nx.from_numpy_array(data, create_using=create_using)
+            except Exception as err:
+                raise nx.NetworkXError(
+                    f"Failed to interpret array as an adjacency matrix."
+                ) from err
+    except ImportError:
+        warnings.warn("numpy not found, skipping conversion test.", ImportWarning)
+
+    # scipy sparse array - any format
+    try:
+        import scipy
+
+        if hasattr(data, "format"):
+            try:
+                return nx.from_scipy_sparse_array(data, create_using=create_using)
+            except Exception as err:
+                raise nx.NetworkXError(
+                    "Input is not a correct scipy sparse array type."
+                ) from err
+    except ImportError:
+        warnings.warn("scipy not found, skipping conversion test.", ImportWarning)
+
+    # Note: most general check - should remain last in order of execution
+    # Includes containers (e.g. list, set, dict, etc.), generators, and
+    # iterators (e.g. itertools.chain) of edges
+
+    if isinstance(data, Collection | Generator | Iterator):
+        try:
+            return from_edgelist(data, create_using=create_using)
+        except Exception as err:
+            raise nx.NetworkXError("Input is not a valid edge list") from err
+
+    raise nx.NetworkXError("Input is not a known data type for conversion.")


 @nx._dispatchable
@@ -81,7 +193,13 @@ def to_dict_of_lists(G, nodelist=None):
     Completely ignores edge data for MultiGraph and MultiDiGraph.

     """
-    pass
+    if nodelist is None:
+        nodelist = G
+
+    d = {}
+    for n in nodelist:
+        d[n] = [nbr for nbr in G.neighbors(n) if nbr in nodelist]
+    return d


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -106,7 +224,23 @@ def from_dict_of_lists(d, create_using=None):
     >>> G = nx.Graph(dol)  # use Graph constructor

     """
-    pass
+    G = nx.empty_graph(0, create_using)
+    G.add_nodes_from(d)
+    if G.is_multigraph() and not G.is_directed():
+        # a dict_of_lists can't show multiedges.  BUT for undirected graphs,
+        # each edge shows up twice in the dict_of_lists.
+        # So we need to treat this case separately.
+        seen = {}
+        for node, nbrlist in d.items():
+            for nbr in nbrlist:
+                if nbr not in seen:
+                    G.add_edge(node, nbr)
+            seen[node] = 1  # don't allow reverse edge to show up
+    else:
+        G.add_edges_from(
+            ((node, nbr) for node, nbrlist in d.items() for nbr in nbrlist)
+        )
+    return G


 def to_dict_of_dicts(G, nodelist=None, edge_data=None):
@@ -206,7 +340,26 @@ def to_dict_of_dicts(G, nodelist=None, edge_data=None):
     >>> d
     {0: {1: 10}, 1: {0: 10}}
     """
-    pass
+    dod = {}
+    if nodelist is None:
+        if edge_data is None:
+            for u, nbrdict in G.adjacency():
+                dod[u] = nbrdict.copy()
+        else:  # edge_data is not None
+            for u, nbrdict in G.adjacency():
+                dod[u] = dod.fromkeys(nbrdict, edge_data)
+    else:  # nodelist is not None
+        if edge_data is None:
+            for u in nodelist:
+                dod[u] = {}
+                for v, data in ((v, data) for v, data in G[u].items() if v in nodelist):
+                    dod[u][v] = data
+        else:  # nodelist and edge_data are not None
+            for u in nodelist:
+                dod[u] = {}
+                for v in (v for v in G[u] if v in nodelist):
+                    dod[u][v] = edge_data
+    return dod


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -238,7 +391,62 @@ def from_dict_of_dicts(d, create_using=None, multigraph_input=False):
     >>> G = nx.Graph(dod)  # use Graph constructor

     """
-    pass
+    G = nx.empty_graph(0, create_using)
+    G.add_nodes_from(d)
+    # does dict d represent a MultiGraph or MultiDiGraph?
+    if multigraph_input:
+        if G.is_directed():
+            if G.is_multigraph():
+                G.add_edges_from(
+                    (u, v, key, data)
+                    for u, nbrs in d.items()
+                    for v, datadict in nbrs.items()
+                    for key, data in datadict.items()
+                )
+            else:
+                G.add_edges_from(
+                    (u, v, data)
+                    for u, nbrs in d.items()
+                    for v, datadict in nbrs.items()
+                    for key, data in datadict.items()
+                )
+        else:  # Undirected
+            if G.is_multigraph():
+                seen = set()  # don't add both directions of undirected graph
+                for u, nbrs in d.items():
+                    for v, datadict in nbrs.items():
+                        if (u, v) not in seen:
+                            G.add_edges_from(
+                                (u, v, key, data) for key, data in datadict.items()
+                            )
+                            seen.add((v, u))
+            else:
+                seen = set()  # don't add both directions of undirected graph
+                for u, nbrs in d.items():
+                    for v, datadict in nbrs.items():
+                        if (u, v) not in seen:
+                            G.add_edges_from(
+                                (u, v, data) for key, data in datadict.items()
+                            )
+                            seen.add((v, u))
+
+    else:  # not a multigraph to multigraph transfer
+        if G.is_multigraph() and not G.is_directed():
+            # d can have both representations u-v, v-u in dict.  Only add one.
+            # We don't need this check for digraphs since we add both directions,
+            # or for Graph() since it is done implicitly (parallel edges not allowed)
+            seen = set()
+            for u, nbrs in d.items():
+                for v, data in nbrs.items():
+                    if (u, v) not in seen:
+                        G.add_edge(u, v, key=0)
+                        G[u][v][0].update(data)
+                    seen.add((v, u))
+        else:
+            G.add_edges_from(
+                ((u, v, data) for u, nbrs in d.items() for v, data in nbrs.items())
+            )
+    return G


 @nx._dispatchable(preserve_edge_attrs=True)
@@ -254,7 +462,9 @@ def to_edgelist(G, nodelist=None):
        Use only nodes specified in nodelist

     """
-    pass
+    if nodelist is None:
+        return G.edges(data=True)
+    return G.edges(nodelist, data=True)


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -279,4 +489,6 @@ def from_edgelist(edgelist, create_using=None):
     >>> G = nx.Graph(edgelist)  # use Graph constructor

     """
-    pass
+    G = nx.empty_graph(0, create_using)
+    G.add_edges_from(edgelist)
+    return G
diff --git a/networkx/convert_matrix.py b/networkx/convert_matrix.py
index a3278bf21..6165ac18e 100644
--- a/networkx/convert_matrix.py
+++ b/networkx/convert_matrix.py
@@ -24,18 +24,35 @@ See Also
 --------
 nx_agraph, nx_pydot
 """
+
 import itertools
 from collections import defaultdict
+
 import networkx as nx
 from networkx.utils import not_implemented_for
-__all__ = ['from_pandas_adjacency', 'to_pandas_adjacency',
-    'from_pandas_edgelist', 'to_pandas_edgelist', 'from_scipy_sparse_array',
-    'to_scipy_sparse_array', 'from_numpy_array', 'to_numpy_array']
-

-@nx._dispatchable(edge_attrs='weight')
-def to_pandas_adjacency(G, nodelist=None, dtype=None, order=None,
-    multigraph_weight=sum, weight='weight', nonedge=0.0):
+__all__ = [
+    "from_pandas_adjacency",
+    "to_pandas_adjacency",
+    "from_pandas_edgelist",
+    "to_pandas_edgelist",
+    "from_scipy_sparse_array",
+    "to_scipy_sparse_array",
+    "from_numpy_array",
+    "to_numpy_array",
+]
+
+
+@nx._dispatchable(edge_attrs="weight")
+def to_pandas_adjacency(
+    G,
+    nodelist=None,
+    dtype=None,
+    order=None,
+    multigraph_weight=sum,
+    weight="weight",
+    nonedge=0.0,
+):
     """Returns the graph adjacency matrix as a Pandas DataFrame.

     Parameters
@@ -118,12 +135,25 @@ def to_pandas_adjacency(G, nodelist=None, dtype=None, order=None,
     2  0  0  4

     """
-    pass
+    import pandas as pd
+
+    M = to_numpy_array(
+        G,
+        nodelist=nodelist,
+        dtype=dtype,
+        order=order,
+        multigraph_weight=multigraph_weight,
+        weight=weight,
+        nonedge=nonedge,
+    )
+    if nodelist is None:
+        nodelist = list(G)
+    return pd.DataFrame(data=M, index=nodelist, columns=nodelist)


 @nx._dispatchable(graphs=None, returns_graph=True)
 def from_pandas_adjacency(df, create_using=None):
-    """Returns a graph from Pandas DataFrame.
+    r"""Returns a graph from Pandas DataFrame.

     The Pandas DataFrame is interpreted as an adjacency matrix for the graph.

@@ -175,12 +205,30 @@ def from_pandas_adjacency(df, create_using=None):
     >>> print(G)
     Graph named 'Graph from pandas adjacency matrix' with 2 nodes and 3 edges
     """
-    pass
+
+    try:
+        df = df[df.index]
+    except Exception as err:
+        missing = list(set(df.index).difference(set(df.columns)))
+        msg = f"{missing} not in columns"
+        raise nx.NetworkXError("Columns must match Indices.", msg) from err
+
+    A = df.values
+    G = from_numpy_array(A, create_using=create_using)
+
+    nx.relabel.relabel_nodes(G, dict(enumerate(df.columns)), copy=False)
+    return G


 @nx._dispatchable(preserve_edge_attrs=True)
-def to_pandas_edgelist(G, source='source', target='target', nodelist=None,
-    dtype=None, edge_key=None):
+def to_pandas_edgelist(
+    G,
+    source="source",
+    target="target",
+    nodelist=None,
+    dtype=None,
+    edge_key=None,
+):
     """Returns the graph edge list as a Pandas DataFrame.

     Parameters
@@ -234,12 +282,45 @@ def to_pandas_edgelist(G, source='source', target='target', nodelist=None,
     1      A      B     9     1

     """
-    pass
+    import pandas as pd
+
+    if nodelist is None:
+        edgelist = G.edges(data=True)
+    else:
+        edgelist = G.edges(nodelist, data=True)
+    source_nodes = [s for s, _, _ in edgelist]
+    target_nodes = [t for _, t, _ in edgelist]
+
+    all_attrs = set().union(*(d.keys() for _, _, d in edgelist))
+    if source in all_attrs:
+        raise nx.NetworkXError(f"Source name {source!r} is an edge attr name")
+    if target in all_attrs:
+        raise nx.NetworkXError(f"Target name {target!r} is an edge attr name")
+
+    nan = float("nan")
+    edge_attr = {k: [d.get(k, nan) for _, _, d in edgelist] for k in all_attrs}
+
+    if G.is_multigraph() and edge_key is not None:
+        if edge_key in all_attrs:
+            raise nx.NetworkXError(f"Edge key name {edge_key!r} is an edge attr name")
+        edge_keys = [k for _, _, k in G.edges(keys=True)]
+        edgelistdict = {source: source_nodes, target: target_nodes, edge_key: edge_keys}
+    else:
+        edgelistdict = {source: source_nodes, target: target_nodes}
+
+    edgelistdict.update(edge_attr)
+    return pd.DataFrame(edgelistdict, dtype=dtype)


 @nx._dispatchable(graphs=None, returns_graph=True)
-def from_pandas_edgelist(df, source='source', target='target', edge_attr=
-    None, create_using=None, edge_key=None):
+def from_pandas_edgelist(
+    df,
+    source="source",
+    target="target",
+    edge_attr=None,
+    create_using=None,
+    edge_key=None,
+):
     """Returns a graph from Pandas DataFrame containing an edge list.

     The Pandas DataFrame should contain at least two columns of node names and
@@ -349,12 +430,62 @@ def from_pandas_edgelist(df, source='source', target='target', edge_attr=


     """
-    pass
-
-
-@nx._dispatchable(edge_attrs='weight')
-def to_scipy_sparse_array(G, nodelist=None, dtype=None, weight='weight',
-    format='csr'):
+    g = nx.empty_graph(0, create_using)
+
+    if edge_attr is None:
+        g.add_edges_from(zip(df[source], df[target]))
+        return g
+
+    reserved_columns = [source, target]
+
+    # Additional columns requested
+    attr_col_headings = []
+    attribute_data = []
+    if edge_attr is True:
+        attr_col_headings = [c for c in df.columns if c not in reserved_columns]
+    elif isinstance(edge_attr, list | tuple):
+        attr_col_headings = edge_attr
+    else:
+        attr_col_headings = [edge_attr]
+    if len(attr_col_headings) == 0:
+        raise nx.NetworkXError(
+            f"Invalid edge_attr argument: No columns found with name: {attr_col_headings}"
+        )
+
+    try:
+        attribute_data = zip(*[df[col] for col in attr_col_headings])
+    except (KeyError, TypeError) as err:
+        msg = f"Invalid edge_attr argument: {edge_attr}"
+        raise nx.NetworkXError(msg) from err
+
+    if g.is_multigraph():
+        # => append the edge keys from the df to the bundled data
+        if edge_key is not None:
+            try:
+                multigraph_edge_keys = df[edge_key]
+                attribute_data = zip(attribute_data, multigraph_edge_keys)
+            except (KeyError, TypeError) as err:
+                msg = f"Invalid edge_key argument: {edge_key}"
+                raise nx.NetworkXError(msg) from err
+
+        for s, t, attrs in zip(df[source], df[target], attribute_data):
+            if edge_key is not None:
+                attrs, multigraph_edge_key = attrs
+                key = g.add_edge(s, t, key=multigraph_edge_key)
+            else:
+                key = g.add_edge(s, t)
+
+            g[s][t][key].update(zip(attr_col_headings, attrs))
+    else:
+        for s, t, attrs in zip(df[source], df[target], attribute_data):
+            g.add_edge(s, t)
+            g[s][t].update(zip(attr_col_headings, attrs))
+
+    return g
+
+
+@nx._dispatchable(edge_attrs="weight")
+def to_scipy_sparse_array(G, nodelist=None, dtype=None, weight="weight", format="csr"):
     """Returns the graph adjacency matrix as a SciPy sparse array.

     Parameters
@@ -434,7 +565,57 @@ def to_scipy_sparse_array(G, nodelist=None, dtype=None, weight='weight',
     .. [1] Scipy Dev. References, "Sparse Matrices",
        https://docs.scipy.org/doc/scipy/reference/sparse.html
     """
-    pass
+    import scipy as sp
+
+    if len(G) == 0:
+        raise nx.NetworkXError("Graph has no nodes or edges")
+
+    if nodelist is None:
+        nodelist = list(G)
+        nlen = len(G)
+    else:
+        nlen = len(nodelist)
+        if nlen == 0:
+            raise nx.NetworkXError("nodelist has no nodes")
+        nodeset = set(G.nbunch_iter(nodelist))
+        if nlen != len(nodeset):
+            for n in nodelist:
+                if n not in G:
+                    raise nx.NetworkXError(f"Node {n} in nodelist is not in G")
+            raise nx.NetworkXError("nodelist contains duplicates.")
+        if nlen < len(G):
+            G = G.subgraph(nodelist)
+
+    index = dict(zip(nodelist, range(nlen)))
+    coefficients = zip(
+        *((index[u], index[v], wt) for u, v, wt in G.edges(data=weight, default=1))
+    )
+    try:
+        row, col, data = coefficients
+    except ValueError:
+        # there is no edge in the subgraph
+        row, col, data = [], [], []
+
+    if G.is_directed():
+        A = sp.sparse.coo_array((data, (row, col)), shape=(nlen, nlen), dtype=dtype)
+    else:
+        # symmetrize matrix
+        d = data + data
+        r = row + col
+        c = col + row
+        # selfloop entries get double counted when symmetrizing
+        # so we subtract the data on the diagonal
+        selfloops = list(nx.selfloop_edges(G, data=weight, default=1))
+        if selfloops:
+            diag_index, diag_data = zip(*((index[u], -wt) for u, v, wt in selfloops))
+            d += diag_data
+            r += diag_index
+            c += diag_index
+        A = sp.sparse.coo_array((d, (r, c)), shape=(nlen, nlen), dtype=dtype)
+    try:
+        return A.asformat(format)
+    except ValueError as err:
+        raise nx.NetworkXError(f"Unknown sparse matrix format: {format}") from err


 def _csr_gen_triples(A):
@@ -442,7 +623,12 @@ def _csr_gen_triples(A):
     an iterable of weighted edge triples.

     """
-    pass
+    nrows = A.shape[0]
+    indptr, dst_indices, data = A.indptr, A.indices, A.data
+    import numpy as np
+
+    src_indices = np.repeat(np.arange(nrows), np.diff(indptr))
+    return zip(src_indices.tolist(), dst_indices.tolist(), A.data.tolist())


 def _csc_gen_triples(A):
@@ -450,7 +636,12 @@ def _csc_gen_triples(A):
     an iterable of weighted edge triples.

     """
-    pass
+    ncols = A.shape[1]
+    indptr, src_indices, data = A.indptr, A.indices, A.data
+    import numpy as np
+
+    dst_indices = np.repeat(np.arange(ncols), np.diff(indptr))
+    return zip(src_indices.tolist(), dst_indices.tolist(), A.data.tolist())


 def _coo_gen_triples(A):
@@ -458,7 +649,7 @@ def _coo_gen_triples(A):
     of weighted edge triples.

     """
-    pass
+    return zip(A.row.tolist(), A.col.tolist(), A.data.tolist())


 def _dok_gen_triples(A):
@@ -466,7 +657,9 @@ def _dok_gen_triples(A):
     iterable of weighted edge triples.

     """
-    pass
+    for (r, c), v in A.items():
+        # Use `v.item()` to convert a NumPy scalar to the appropriate Python scalar
+        yield int(r), int(c), v.item()


 def _generate_weighted_edges(A):
@@ -476,12 +669,20 @@ def _generate_weighted_edges(A):
     `A` is a SciPy sparse array (in any format).

     """
-    pass
+    if A.format == "csr":
+        return _csr_gen_triples(A)
+    if A.format == "csc":
+        return _csc_gen_triples(A)
+    if A.format == "dok":
+        return _dok_gen_triples(A)
+    # If A is in any other format (including COO), convert it to COO format.
+    return _coo_gen_triples(A.tocoo())


 @nx._dispatchable(graphs=None, returns_graph=True)
-def from_scipy_sparse_array(A, parallel_edges=False, create_using=None,
-    edge_attribute='weight'):
+def from_scipy_sparse_array(
+    A, parallel_edges=False, create_using=None, edge_attribute="weight"
+):
     """Creates a new graph from an adjacency matrix given as a SciPy sparse
     array.

@@ -544,12 +745,53 @@ def from_scipy_sparse_array(A, parallel_edges=False, create_using=None,
     AtlasView({0: {'weight': 1}, 1: {'weight': 1}})

     """
-    pass
-
-
-@nx._dispatchable(edge_attrs='weight')
-def to_numpy_array(G, nodelist=None, dtype=None, order=None,
-    multigraph_weight=sum, weight='weight', nonedge=0.0):
+    G = nx.empty_graph(0, create_using)
+    n, m = A.shape
+    if n != m:
+        raise nx.NetworkXError(f"Adjacency matrix not square: nx,ny={A.shape}")
+    # Make sure we get even the isolated nodes of the graph.
+    G.add_nodes_from(range(n))
+    # Create an iterable over (u, v, w) triples and for each triple, add an
+    # edge from u to v with weight w.
+    triples = _generate_weighted_edges(A)
+    # If the entries in the adjacency matrix are integers, the graph is a
+    # multigraph, and parallel_edges is True, then create parallel edges, each
+    # with weight 1, for each entry in the adjacency matrix. Otherwise, create
+    # one edge for each positive entry in the adjacency matrix and set the
+    # weight of that edge to be the entry in the matrix.
+    if A.dtype.kind in ("i", "u") and G.is_multigraph() and parallel_edges:
+        chain = itertools.chain.from_iterable
+        # The following line is equivalent to:
+        #
+        #     for (u, v) in edges:
+        #         for d in range(A[u, v]):
+        #             G.add_edge(u, v, weight=1)
+        #
+        triples = chain(((u, v, 1) for d in range(w)) for (u, v, w) in triples)
+    # If we are creating an undirected multigraph, only add the edges from the
+    # upper triangle of the matrix. Otherwise, add all the edges. This relies
+    # on the fact that the vertices created in the
+    # `_generated_weighted_edges()` function are actually the row/column
+    # indices for the matrix `A`.
+    #
+    # Without this check, we run into a problem where each edge is added twice
+    # when `G.add_weighted_edges_from()` is invoked below.
+    if G.is_multigraph() and not G.is_directed():
+        triples = ((u, v, d) for u, v, d in triples if u <= v)
+    G.add_weighted_edges_from(triples, weight=edge_attribute)
+    return G
+
+
+@nx._dispatchable(edge_attrs="weight")  # edge attrs may also be obtained from `dtype`
+def to_numpy_array(
+    G,
+    nodelist=None,
+    dtype=None,
+    order=None,
+    multigraph_weight=sum,
+    weight="weight",
+    nonedge=0.0,
+):
     """Returns the graph adjacency matrix as a NumPy array.

     Parameters
@@ -700,12 +942,87 @@ def to_numpy_array(G, nodelist=None, dtype=None, order=None,
            [-1.,  0., -1.,  0.],
            [ 1., -1.,  0., -1.]])
     """
-    pass
+    import numpy as np
+
+    if nodelist is None:
+        nodelist = list(G)
+    nlen = len(nodelist)
+
+    # Input validation
+    nodeset = set(nodelist)
+    if nodeset - set(G):
+        raise nx.NetworkXError(f"Nodes {nodeset - set(G)} in nodelist is not in G")
+    if len(nodeset) < nlen:
+        raise nx.NetworkXError("nodelist contains duplicates.")
+
+    A = np.full((nlen, nlen), fill_value=nonedge, dtype=dtype, order=order)
+
+    # Corner cases: empty nodelist or graph without any edges
+    if nlen == 0 or G.number_of_edges() == 0:
+        return A
+
+    # If dtype is structured and weight is None, use dtype field names as
+    # edge attributes
+    edge_attrs = None  # Only single edge attribute by default
+    if A.dtype.names:
+        if weight is None:
+            edge_attrs = dtype.names
+        else:
+            raise ValueError(
+                "Specifying `weight` not supported for structured dtypes\n."
+                "To create adjacency matrices from structured dtypes, use `weight=None`."
+            )
+
+    # Map nodes to row/col in matrix
+    idx = dict(zip(nodelist, range(nlen)))
+    if len(nodelist) < len(G):
+        G = G.subgraph(nodelist).copy()
+
+    # Collect all edge weights and reduce with `multigraph_weights`
+    if G.is_multigraph():
+        if edge_attrs:
+            raise nx.NetworkXError(
+                "Structured arrays are not supported for MultiGraphs"
+            )
+        d = defaultdict(list)
+        for u, v, wt in G.edges(data=weight, default=1.0):
+            d[(idx[u], idx[v])].append(wt)
+        i, j = np.array(list(d.keys())).T  # indices
+        wts = [multigraph_weight(ws) for ws in d.values()]  # reduced weights
+    else:
+        i, j, wts = [], [], []
+
+        # Special branch: multi-attr adjacency from structured dtypes
+        if edge_attrs:
+            # Extract edges with all data
+            for u, v, data in G.edges(data=True):
+                i.append(idx[u])
+                j.append(idx[v])
+                wts.append(data)
+            # Map each attribute to the appropriate named field in the
+            # structured dtype
+            for attr in edge_attrs:
+                attr_data = [wt.get(attr, 1.0) for wt in wts]
+                A[attr][i, j] = attr_data
+                if not G.is_directed():
+                    A[attr][j, i] = attr_data
+            return A
+
+        for u, v, wt in G.edges(data=weight, default=1.0):
+            i.append(idx[u])
+            j.append(idx[v])
+            wts.append(wt)
+
+    # Set array values with advanced indexing
+    A[i, j] = wts
+    if not G.is_directed():
+        A[j, i] = wts
+
+    return A


 @nx._dispatchable(graphs=None, returns_graph=True)
-def from_numpy_array(A, parallel_edges=False, create_using=None, edge_attr=
-    'weight'):
+def from_numpy_array(A, parallel_edges=False, create_using=None, edge_attr="weight"):
     """Returns a graph from a 2D NumPy array.

     The 2D NumPy array is interpreted as an adjacency matrix for the graph.
@@ -801,4 +1118,85 @@ def from_numpy_array(A, parallel_edges=False, create_using=None, edge_attr=
     1.0

     """
-    pass
+    kind_to_python_type = {
+        "f": float,
+        "i": int,
+        "u": int,
+        "b": bool,
+        "c": complex,
+        "S": str,
+        "U": str,
+        "V": "void",
+    }
+    G = nx.empty_graph(0, create_using)
+    if A.ndim != 2:
+        raise nx.NetworkXError(f"Input array must be 2D, not {A.ndim}")
+    n, m = A.shape
+    if n != m:
+        raise nx.NetworkXError(f"Adjacency matrix not square: nx,ny={A.shape}")
+    dt = A.dtype
+    try:
+        python_type = kind_to_python_type[dt.kind]
+    except Exception as err:
+        raise TypeError(f"Unknown numpy data type: {dt}") from err
+
+    # Make sure we get even the isolated nodes of the graph.
+    G.add_nodes_from(range(n))
+    # Get a list of all the entries in the array with nonzero entries. These
+    # coordinates become edges in the graph. (convert to int from np.int64)
+    edges = ((int(e[0]), int(e[1])) for e in zip(*A.nonzero()))
+    # handle numpy constructed data type
+    if python_type == "void":
+        # Sort the fields by their offset, then by dtype, then by name.
+        fields = sorted(
+            (offset, dtype, name) for name, (dtype, offset) in A.dtype.fields.items()
+        )
+        triples = (
+            (
+                u,
+                v,
+                {}
+                if edge_attr in [False, None]
+                else {
+                    name: kind_to_python_type[dtype.kind](val)
+                    for (_, dtype, name), val in zip(fields, A[u, v])
+                },
+            )
+            for u, v in edges
+        )
+    # If the entries in the adjacency matrix are integers, the graph is a
+    # multigraph, and parallel_edges is True, then create parallel edges, each
+    # with weight 1, for each entry in the adjacency matrix. Otherwise, create
+    # one edge for each positive entry in the adjacency matrix and set the
+    # weight of that edge to be the entry in the matrix.
+    elif python_type is int and G.is_multigraph() and parallel_edges:
+        chain = itertools.chain.from_iterable
+        # The following line is equivalent to:
+        #
+        #     for (u, v) in edges:
+        #         for d in range(A[u, v]):
+        #             G.add_edge(u, v, weight=1)
+        #
+        if edge_attr in [False, None]:
+            triples = chain(((u, v, {}) for d in range(A[u, v])) for (u, v) in edges)
+        else:
+            triples = chain(
+                ((u, v, {edge_attr: 1}) for d in range(A[u, v])) for (u, v) in edges
+            )
+    else:  # basic data type
+        if edge_attr in [False, None]:
+            triples = ((u, v, {}) for u, v in edges)
+        else:
+            triples = ((u, v, {edge_attr: python_type(A[u, v])}) for u, v in edges)
+    # If we are creating an undirected multigraph, only add the edges from the
+    # upper triangle of the matrix. Otherwise, add all the edges. This relies
+    # on the fact that the vertices created in the
+    # `_generated_weighted_edges()` function are actually the row/column
+    # indices for the matrix `A`.
+    #
+    # Without this check, we run into a problem where each edge is added twice
+    # when `G.add_edges_from()` is invoked below.
+    if G.is_multigraph() and not G.is_directed():
+        triples = ((u, v, d) for u, v, d in triples if u <= v)
+    G.add_edges_from(triples)
+    return G
diff --git a/networkx/drawing/layout.py b/networkx/drawing/layout.py
index 0979963e3..abded7a67 100644
--- a/networkx/drawing/layout.py
+++ b/networkx/drawing/layout.py
@@ -17,11 +17,45 @@ Warning: Most layout routines have only been tested in 2-dimensions.
 """
 import networkx as nx
 from networkx.utils import np_random_state
-__all__ = ['bipartite_layout', 'circular_layout', 'kamada_kawai_layout',
-    'random_layout', 'rescale_layout', 'rescale_layout_dict',
-    'shell_layout', 'spring_layout', 'spectral_layout', 'planar_layout',
-    'fruchterman_reingold_layout', 'spiral_layout', 'multipartite_layout',
-    'bfs_layout', 'arf_layout']
+
+__all__ = [
+    "bipartite_layout",
+    "circular_layout",
+    "kamada_kawai_layout",
+    "random_layout",
+    "rescale_layout",
+    "rescale_layout_dict",
+    "shell_layout",
+    "spring_layout",
+    "spectral_layout",
+    "planar_layout",
+    "fruchterman_reingold_layout",
+    "spiral_layout",
+    "multipartite_layout",
+    "bfs_layout",
+    "arf_layout",
+]
+
+
+def _process_params(G, center, dim):
+    # Some boilerplate code.
+    import numpy as np
+
+    if not isinstance(G, nx.Graph):
+        empty_graph = nx.Graph()
+        empty_graph.add_nodes_from(G)
+        G = empty_graph
+
+    if center is None:
+        center = np.zeros(dim)
+    else:
+        center = np.asarray(center)
+
+    if len(center) != dim:
+        msg = "length of center coordinates must match dimension of layout"
+        raise ValueError(msg)
+
+    return G, center


 @np_random_state(3)
@@ -63,10 +97,18 @@ def random_layout(G, center=None, dim=2, seed=None):
     >>> pos = nx.random_layout(G)

     """
-    pass
+    import numpy as np
+
+    G, center = _process_params(G, center, dim)
+    pos = seed.rand(len(G), dim) + center
+    pos = pos.astype(np.float32)
+    pos = dict(zip(G, pos))
+
+    return pos


 def circular_layout(G, scale=1, center=None, dim=2):
+    # dim=2 only
     """Position nodes on a circle.

     Parameters
@@ -107,7 +149,30 @@ def circular_layout(G, scale=1, center=None, dim=2):
     try to minimize edge crossings.

     """
-    pass
+    import numpy as np
+
+    if dim < 2:
+        raise ValueError("cannot handle dimensions < 2")
+
+    G, center = _process_params(G, center, dim)
+
+    paddims = max(0, (dim - 2))
+
+    if len(G) == 0:
+        pos = {}
+    elif len(G) == 1:
+        pos = {nx.utils.arbitrary_element(G): center}
+    else:
+        # Discard the extra angle since it matches 0 radians.
+        theta = np.linspace(0, 1, len(G) + 1)[:-1] * 2 * np.pi
+        theta = theta.astype(np.float32)
+        pos = np.column_stack(
+            [np.cos(theta), np.sin(theta), np.zeros((len(G), paddims))]
+        )
+        pos = rescale_layout(pos, scale=scale) + center
+        pos = dict(zip(G, pos))
+
+    return pos


 def shell_layout(G, nlist=None, rotate=None, scale=1, center=None, dim=2):
@@ -158,11 +223,52 @@ def shell_layout(G, nlist=None, rotate=None, scale=1, center=None, dim=2):
     try to minimize edge crossings.

     """
-    pass
-
-
-def bipartite_layout(G, nodes, align='vertical', scale=1, center=None,
-    aspect_ratio=4 / 3):
+    import numpy as np
+
+    if dim != 2:
+        raise ValueError("can only handle 2 dimensions")
+
+    G, center = _process_params(G, center, dim)
+
+    if len(G) == 0:
+        return {}
+    if len(G) == 1:
+        return {nx.utils.arbitrary_element(G): center}
+
+    if nlist is None:
+        # draw the whole graph in one shell
+        nlist = [list(G)]
+
+    radius_bump = scale / len(nlist)
+
+    if len(nlist[0]) == 1:
+        # single node at center
+        radius = 0.0
+    else:
+        # else start at r=1
+        radius = radius_bump
+
+    if rotate is None:
+        rotate = np.pi / len(nlist)
+    first_theta = rotate
+    npos = {}
+    for nodes in nlist:
+        # Discard the last angle (endpoint=False) since 2*pi matches 0 radians
+        theta = (
+            np.linspace(0, 2 * np.pi, len(nodes), endpoint=False, dtype=np.float32)
+            + first_theta
+        )
+        pos = radius * np.column_stack([np.cos(theta), np.sin(theta)]) + center
+        npos.update(zip(nodes, pos))
+        radius += radius_bump
+        first_theta += rotate
+
+    return npos
+
+
+def bipartite_layout(
+    G, nodes, align="vertical", scale=1, center=None, aspect_ratio=4 / 3
+):
     """Position nodes in two straight lines.

     Parameters
@@ -203,12 +309,55 @@ def bipartite_layout(G, nodes, align='vertical', scale=1, center=None,
     try to minimize edge crossings.

     """
-    pass
+
+    import numpy as np
+
+    if align not in ("vertical", "horizontal"):
+        msg = "align must be either vertical or horizontal."
+        raise ValueError(msg)
+
+    G, center = _process_params(G, center=center, dim=2)
+    if len(G) == 0:
+        return {}
+
+    height = 1
+    width = aspect_ratio * height
+    offset = (width / 2, height / 2)
+
+    top = dict.fromkeys(nodes)
+    bottom = [v for v in G if v not in top]
+    nodes = list(top) + bottom
+
+    left_xs = np.repeat(0, len(top))
+    right_xs = np.repeat(width, len(bottom))
+    left_ys = np.linspace(0, height, len(top))
+    right_ys = np.linspace(0, height, len(bottom))
+
+    top_pos = np.column_stack([left_xs, left_ys]) - offset
+    bottom_pos = np.column_stack([right_xs, right_ys]) - offset
+
+    pos = np.concatenate([top_pos, bottom_pos])
+    pos = rescale_layout(pos, scale=scale) + center
+    if align == "horizontal":
+        pos = pos[:, ::-1]  # swap x and y coords
+    pos = dict(zip(nodes, pos))
+    return pos


 @np_random_state(10)
-def spring_layout(G, k=None, pos=None, fixed=None, iterations=50, threshold
-    =0.0001, weight='weight', scale=1, center=None, dim=2, seed=None):
+def spring_layout(
+    G,
+    k=None,
+    pos=None,
+    fixed=None,
+    iterations=50,
+    threshold=1e-4,
+    weight="weight",
+    scale=1,
+    center=None,
+    dim=2,
+    seed=None,
+):
     """Position nodes using Fruchterman-Reingold force-directed algorithm.

     The algorithm simulates a force-directed representation of the network
@@ -290,14 +439,206 @@ def spring_layout(G, k=None, pos=None, fixed=None, iterations=50, threshold
     # The same using longer but equivalent function name
     >>> pos = nx.fruchterman_reingold_layout(G)
     """
-    pass
+    import numpy as np
+
+    G, center = _process_params(G, center, dim)
+
+    if fixed is not None:
+        if pos is None:
+            raise ValueError("nodes are fixed without positions given")
+        for node in fixed:
+            if node not in pos:
+                raise ValueError("nodes are fixed without positions given")
+        nfixed = {node: i for i, node in enumerate(G)}
+        fixed = np.asarray([nfixed[node] for node in fixed if node in nfixed])
+
+    if pos is not None:
+        # Determine size of existing domain to adjust initial positions
+        dom_size = max(coord for pos_tup in pos.values() for coord in pos_tup)
+        if dom_size == 0:
+            dom_size = 1
+        pos_arr = seed.rand(len(G), dim) * dom_size + center
+
+        for i, n in enumerate(G):
+            if n in pos:
+                pos_arr[i] = np.asarray(pos[n])
+    else:
+        pos_arr = None
+        dom_size = 1
+
+    if len(G) == 0:
+        return {}
+    if len(G) == 1:
+        return {nx.utils.arbitrary_element(G.nodes()): center}
+
+    try:
+        # Sparse matrix
+        if len(G) < 500:  # sparse solver for large graphs
+            raise ValueError
+        A = nx.to_scipy_sparse_array(G, weight=weight, dtype="f")
+        if k is None and fixed is not None:
+            # We must adjust k by domain size for layouts not near 1x1
+            nnodes, _ = A.shape
+            k = dom_size / np.sqrt(nnodes)
+        pos = _sparse_fruchterman_reingold(
+            A, k, pos_arr, fixed, iterations, threshold, dim, seed
+        )
+    except ValueError:
+        A = nx.to_numpy_array(G, weight=weight)
+        if k is None and fixed is not None:
+            # We must adjust k by domain size for layouts not near 1x1
+            nnodes, _ = A.shape
+            k = dom_size / np.sqrt(nnodes)
+        pos = _fruchterman_reingold(
+            A, k, pos_arr, fixed, iterations, threshold, dim, seed
+        )
+    if fixed is None and scale is not None:
+        pos = rescale_layout(pos, scale=scale) + center
+    pos = dict(zip(G, pos))
+    return pos


 fruchterman_reingold_layout = spring_layout


-def kamada_kawai_layout(G, dist=None, pos=None, weight='weight', scale=1,
-    center=None, dim=2):
+@np_random_state(7)
+def _fruchterman_reingold(
+    A, k=None, pos=None, fixed=None, iterations=50, threshold=1e-4, dim=2, seed=None
+):
+    # Position nodes in adjacency matrix A using Fruchterman-Reingold
+    # Entry point for NetworkX graph is fruchterman_reingold_layout()
+    import numpy as np
+
+    try:
+        nnodes, _ = A.shape
+    except AttributeError as err:
+        msg = "fruchterman_reingold() takes an adjacency matrix as input"
+        raise nx.NetworkXError(msg) from err
+
+    if pos is None:
+        # random initial positions
+        pos = np.asarray(seed.rand(nnodes, dim), dtype=A.dtype)
+    else:
+        # make sure positions are of same type as matrix
+        pos = pos.astype(A.dtype)
+
+    # optimal distance between nodes
+    if k is None:
+        k = np.sqrt(1.0 / nnodes)
+    # the initial "temperature"  is about .1 of domain area (=1x1)
+    # this is the largest step allowed in the dynamics.
+    # We need to calculate this in case our fixed positions force our domain
+    # to be much bigger than 1x1
+    t = max(max(pos.T[0]) - min(pos.T[0]), max(pos.T[1]) - min(pos.T[1])) * 0.1
+    # simple cooling scheme.
+    # linearly step down by dt on each iteration so last iteration is size dt.
+    dt = t / (iterations + 1)
+    delta = np.zeros((pos.shape[0], pos.shape[0], pos.shape[1]), dtype=A.dtype)
+    # the inscrutable (but fast) version
+    # this is still O(V^2)
+    # could use multilevel methods to speed this up significantly
+    for iteration in range(iterations):
+        # matrix of difference between points
+        delta = pos[:, np.newaxis, :] - pos[np.newaxis, :, :]
+        # distance between points
+        distance = np.linalg.norm(delta, axis=-1)
+        # enforce minimum distance of 0.01
+        np.clip(distance, 0.01, None, out=distance)
+        # displacement "force"
+        displacement = np.einsum(
+            "ijk,ij->ik", delta, (k * k / distance**2 - A * distance / k)
+        )
+        # update positions
+        length = np.linalg.norm(displacement, axis=-1)
+        length = np.where(length < 0.01, 0.1, length)
+        delta_pos = np.einsum("ij,i->ij", displacement, t / length)
+        if fixed is not None:
+            # don't change positions of fixed nodes
+            delta_pos[fixed] = 0.0
+        pos += delta_pos
+        # cool temperature
+        t -= dt
+        if (np.linalg.norm(delta_pos) / nnodes) < threshold:
+            break
+    return pos
+
+
+@np_random_state(7)
+def _sparse_fruchterman_reingold(
+    A, k=None, pos=None, fixed=None, iterations=50, threshold=1e-4, dim=2, seed=None
+):
+    # Position nodes in adjacency matrix A using Fruchterman-Reingold
+    # Entry point for NetworkX graph is fruchterman_reingold_layout()
+    # Sparse version
+    import numpy as np
+    import scipy as sp
+
+    try:
+        nnodes, _ = A.shape
+    except AttributeError as err:
+        msg = "fruchterman_reingold() takes an adjacency matrix as input"
+        raise nx.NetworkXError(msg) from err
+    # make sure we have a LIst of Lists representation
+    try:
+        A = A.tolil()
+    except AttributeError:
+        A = (sp.sparse.coo_array(A)).tolil()
+
+    if pos is None:
+        # random initial positions
+        pos = np.asarray(seed.rand(nnodes, dim), dtype=A.dtype)
+    else:
+        # make sure positions are of same type as matrix
+        pos = pos.astype(A.dtype)
+
+    # no fixed nodes
+    if fixed is None:
+        fixed = []
+
+    # optimal distance between nodes
+    if k is None:
+        k = np.sqrt(1.0 / nnodes)
+    # the initial "temperature"  is about .1 of domain area (=1x1)
+    # this is the largest step allowed in the dynamics.
+    t = max(max(pos.T[0]) - min(pos.T[0]), max(pos.T[1]) - min(pos.T[1])) * 0.1
+    # simple cooling scheme.
+    # linearly step down by dt on each iteration so last iteration is size dt.
+    dt = t / (iterations + 1)
+
+    displacement = np.zeros((dim, nnodes))
+    for iteration in range(iterations):
+        displacement *= 0
+        # loop over rows
+        for i in range(A.shape[0]):
+            if i in fixed:
+                continue
+            # difference between this row's node position and all others
+            delta = (pos[i] - pos).T
+            # distance between points
+            distance = np.sqrt((delta**2).sum(axis=0))
+            # enforce minimum distance of 0.01
+            distance = np.where(distance < 0.01, 0.01, distance)
+            # the adjacency matrix row
+            Ai = A.getrowview(i).toarray()  # TODO: revisit w/ sparse 1D container
+            # displacement "force"
+            displacement[:, i] += (
+                delta * (k * k / distance**2 - Ai * distance / k)
+            ).sum(axis=1)
+        # update positions
+        length = np.sqrt((displacement**2).sum(axis=0))
+        length = np.where(length < 0.01, 0.1, length)
+        delta_pos = (displacement * t / length).T
+        pos += delta_pos
+        # cool temperature
+        t -= dt
+        if (np.linalg.norm(delta_pos) / nnodes) < threshold:
+            break
+    return pos
+
+
+def kamada_kawai_layout(
+    G, dist=None, pos=None, weight="weight", scale=1, center=None, dim=2
+):
     """Position nodes using Kamada-Kawai path-length cost-function.

     Parameters
@@ -338,10 +679,88 @@ def kamada_kawai_layout(G, dist=None, pos=None, weight='weight', scale=1,
     >>> G = nx.path_graph(4)
     >>> pos = nx.kamada_kawai_layout(G)
     """
-    pass
+    import numpy as np
+
+    G, center = _process_params(G, center, dim)
+    nNodes = len(G)
+    if nNodes == 0:
+        return {}
+
+    if dist is None:
+        dist = dict(nx.shortest_path_length(G, weight=weight))
+    dist_mtx = 1e6 * np.ones((nNodes, nNodes))
+    for row, nr in enumerate(G):
+        if nr not in dist:
+            continue
+        rdist = dist[nr]
+        for col, nc in enumerate(G):
+            if nc not in rdist:
+                continue
+            dist_mtx[row][col] = rdist[nc]
+
+    if pos is None:
+        if dim >= 3:
+            pos = random_layout(G, dim=dim)
+        elif dim == 2:
+            pos = circular_layout(G, dim=dim)
+        else:
+            pos = dict(zip(G, np.linspace(0, 1, len(G))))
+    pos_arr = np.array([pos[n] for n in G])
+
+    pos = _kamada_kawai_solve(dist_mtx, pos_arr, dim)
+
+    pos = rescale_layout(pos, scale=scale) + center
+    return dict(zip(G, pos))
+
+
+def _kamada_kawai_solve(dist_mtx, pos_arr, dim):
+    # Anneal node locations based on the Kamada-Kawai cost-function,
+    # using the supplied matrix of preferred inter-node distances,
+    # and starting locations.
+
+    import numpy as np
+    import scipy as sp
+
+    meanwt = 1e-3
+    costargs = (np, 1 / (dist_mtx + np.eye(dist_mtx.shape[0]) * 1e-3), meanwt, dim)
+
+    optresult = sp.optimize.minimize(
+        _kamada_kawai_costfn,
+        pos_arr.ravel(),
+        method="L-BFGS-B",
+        args=costargs,
+        jac=True,
+    )

+    return optresult.x.reshape((-1, dim))

-def spectral_layout(G, weight='weight', scale=1, center=None, dim=2):
+
+def _kamada_kawai_costfn(pos_vec, np, invdist, meanweight, dim):
+    # Cost-function and gradient for Kamada-Kawai layout algorithm
+    nNodes = invdist.shape[0]
+    pos_arr = pos_vec.reshape((nNodes, dim))
+
+    delta = pos_arr[:, np.newaxis, :] - pos_arr[np.newaxis, :, :]
+    nodesep = np.linalg.norm(delta, axis=-1)
+    direction = np.einsum("ijk,ij->ijk", delta, 1 / (nodesep + np.eye(nNodes) * 1e-3))
+
+    offset = nodesep * invdist - 1.0
+    offset[np.diag_indices(nNodes)] = 0
+
+    cost = 0.5 * np.sum(offset**2)
+    grad = np.einsum("ij,ij,ijk->ik", invdist, offset, direction) - np.einsum(
+        "ij,ij,ijk->jk", invdist, offset, direction
+    )
+
+    # Additional parabolic term to encourage mean position to be near origin:
+    sumpos = np.sum(pos_arr, axis=0)
+    cost += 0.5 * meanweight * np.sum(sumpos**2)
+    grad += meanweight * sumpos
+
+    return (cost, grad.ravel())
+
+
+def spectral_layout(G, weight="weight", scale=1, center=None, dim=2):
     """Position nodes using the eigenvectors of the graph Laplacian.

     Using the unnormalized Laplacian, the layout shows possible clusters of
@@ -385,7 +804,87 @@ def spectral_layout(G, weight='weight', scale=1, center=None, dim=2):
     For larger graphs (>500 nodes) this will use the SciPy sparse
     eigenvalue solver (ARPACK).
     """
-    pass
+    # handle some special cases that break the eigensolvers
+    import numpy as np
+
+    G, center = _process_params(G, center, dim)
+
+    if len(G) <= 2:
+        if len(G) == 0:
+            pos = np.array([])
+        elif len(G) == 1:
+            pos = np.array([center])
+        else:
+            pos = np.array([np.zeros(dim), np.array(center) * 2.0])
+        return dict(zip(G, pos))
+    try:
+        # Sparse matrix
+        if len(G) < 500:  # dense solver is faster for small graphs
+            raise ValueError
+        A = nx.to_scipy_sparse_array(G, weight=weight, dtype="d")
+        # Symmetrize directed graphs
+        if G.is_directed():
+            A = A + np.transpose(A)
+        pos = _sparse_spectral(A, dim)
+    except (ImportError, ValueError):
+        # Dense matrix
+        A = nx.to_numpy_array(G, weight=weight)
+        # Symmetrize directed graphs
+        if G.is_directed():
+            A += A.T
+        pos = _spectral(A, dim)
+
+    pos = rescale_layout(pos, scale=scale) + center
+    pos = dict(zip(G, pos))
+    return pos
+
+
+def _spectral(A, dim=2):
+    # Input adjacency matrix A
+    # Uses dense eigenvalue solver from numpy
+    import numpy as np
+
+    try:
+        nnodes, _ = A.shape
+    except AttributeError as err:
+        msg = "spectral() takes an adjacency matrix as input"
+        raise nx.NetworkXError(msg) from err
+
+    # form Laplacian matrix where D is diagonal of degrees
+    D = np.identity(nnodes, dtype=A.dtype) * np.sum(A, axis=1)
+    L = D - A
+
+    eigenvalues, eigenvectors = np.linalg.eig(L)
+    # sort and keep smallest nonzero
+    index = np.argsort(eigenvalues)[1 : dim + 1]  # 0 index is zero eigenvalue
+    return np.real(eigenvectors[:, index])
+
+
+def _sparse_spectral(A, dim=2):
+    # Input adjacency matrix A
+    # Uses sparse eigenvalue solver from scipy
+    # Could use multilevel methods here, see Koren "On spectral graph drawing"
+    import numpy as np
+    import scipy as sp
+
+    try:
+        nnodes, _ = A.shape
+    except AttributeError as err:
+        msg = "sparse_spectral() takes an adjacency matrix as input"
+        raise nx.NetworkXError(msg) from err
+
+    # form Laplacian matrix
+    # TODO: Rm csr_array wrapper in favor of spdiags array constructor when available
+    D = sp.sparse.csr_array(sp.sparse.spdiags(A.sum(axis=1), 0, nnodes, nnodes))
+    L = D - A
+
+    k = dim + 1
+    # number of Lanczos vectors for ARPACK solver.What is the right scaling?
+    ncv = max(2 * k + 1, int(np.sqrt(nnodes)))
+    # return smallest k eigenvalues and eigenvectors
+    eigenvalues, eigenvectors = sp.sparse.linalg.eigsh(L, k, which="SM", ncv=ncv)
+    index = np.argsort(eigenvalues)[1:k]  # 0 index is zero eigenvalue
+    return np.real(eigenvectors[:, index])


 def planar_layout(G, scale=1, center=None, dim=2):
@@ -421,11 +920,31 @@ def planar_layout(G, scale=1, center=None, dim=2):
     >>> G = nx.path_graph(4)
     >>> pos = nx.planar_layout(G)
     """
-    pass
+    import numpy as np
+
+    if dim != 2:
+        raise ValueError("can only handle 2 dimensions")
+
+    G, center = _process_params(G, center, dim)
+
+    if len(G) == 0:
+        return {}

+    if isinstance(G, nx.PlanarEmbedding):
+        embedding = G
+    else:
+        is_planar, embedding = nx.check_planarity(G)
+        if not is_planar:
+            raise nx.NetworkXException("G is not planar.")
+    pos = nx.combinatorial_embedding_to_pos(embedding)
+    node_list = list(embedding)
+    pos = np.vstack([pos[x] for x in node_list])
+    pos = pos.astype(np.float64)
+    pos = rescale_layout(pos, scale=scale) + center
+    return dict(zip(node_list, pos))

-def spiral_layout(G, scale=1, center=None, dim=2, resolution=0.35,
-    equidistant=False):
+
+def spiral_layout(G, scale=1, center=None, dim=2, resolution=0.35, equidistant=False):
     """Position nodes in a spiral layout.

     Parameters
@@ -469,11 +988,42 @@ def spiral_layout(G, scale=1, center=None, dim=2, resolution=0.35,
     This algorithm currently only works in two dimensions.

     """
-    pass
+    import numpy as np
+
+    if dim != 2:
+        raise ValueError("can only handle 2 dimensions")
+
+    G, center = _process_params(G, center, dim)

+    if len(G) == 0:
+        return {}
+    if len(G) == 1:
+        return {nx.utils.arbitrary_element(G): center}

-def multipartite_layout(G, subset_key='subset', align='vertical', scale=1,
-    center=None):
+    pos = []
+    if equidistant:
+        chord = 1
+        step = 0.5
+        theta = resolution
+        theta += chord / (step * theta)
+        for _ in range(len(G)):
+            r = step * theta
+            theta += chord / r
+            pos.append([np.cos(theta) * r, np.sin(theta) * r])
+
+    else:
+        dist = np.arange(len(G), dtype=float)
+        angle = resolution * dist
+        pos = np.transpose(dist * np.array([np.cos(angle), np.sin(angle)]))
+
+    pos = rescale_layout(np.array(pos), scale=scale) + center
+
+    pos = dict(zip(G, pos))
+
+    return pos
+
+
+def multipartite_layout(G, subset_key="subset", align="vertical", scale=1, center=None):
     """Position nodes in layers of straight lines.

     Parameters
@@ -519,11 +1069,67 @@ def multipartite_layout(G, subset_key='subset', align='vertical', scale=1,
     have subset_key data, they will be placed in the corresponding layers.

     """
-    pass
-
-
-def arf_layout(G, pos=None, scaling=1, a=1.1, etol=1e-06, dt=0.001,
-    max_iter=1000):
+    import numpy as np
+
+    if align not in ("vertical", "horizontal"):
+        msg = "align must be either vertical or horizontal."
+        raise ValueError(msg)
+
+    G, center = _process_params(G, center=center, dim=2)
+    if len(G) == 0:
+        return {}
+
+    try:
+        # check if subset_key is dict-like
+        if len(G) != sum(len(nodes) for nodes in subset_key.values()):
+            raise nx.NetworkXError(
+                "all nodes must be in one subset of `subset_key` dict"
+            )
+    except AttributeError:
+        # subset_key is not a dict, hence a string
+        node_to_subset = nx.get_node_attributes(G, subset_key)
+        if len(node_to_subset) != len(G):
+            raise nx.NetworkXError(
+                f"all nodes need a subset_key attribute: {subset_key}"
+            )
+        subset_key = nx.utils.groups(node_to_subset)
+
+    # Sort by layer, if possible
+    try:
+        layers = dict(sorted(subset_key.items()))
+    except TypeError:
+        layers = subset_key
+
+    pos = None
+    nodes = []
+    width = len(layers)
+    for i, layer in enumerate(layers.values()):
+        height = len(layer)
+        xs = np.repeat(i, height)
+        ys = np.arange(0, height, dtype=float)
+        offset = ((width - 1) / 2, (height - 1) / 2)
+        layer_pos = np.column_stack([xs, ys]) - offset
+        if pos is None:
+            pos = layer_pos
+        else:
+            pos = np.concatenate([pos, layer_pos])
+        nodes.extend(layer)
+    pos = rescale_layout(pos, scale=scale) + center
+    if align == "horizontal":
+        pos = pos[:, ::-1]  # swap x and y coords
+    pos = dict(zip(nodes, pos))
+    return pos
+
+
+def arf_layout(
+    G,
+    pos=None,
+    scaling=1,
+    a=1.1,
+    etol=1e-6,
+    dt=1e-3,
+    max_iter=1000,
+):
     """Arf layout for networkx

     The attractive and repulsive forces (arf) layout [1]
@@ -569,7 +1175,62 @@ def arf_layout(G, pos=None, scaling=1, a=1.1, etol=1e-06, dt=0.001,
     >>> pos = nx.arf_layout(G)

     """
-    pass
+    import warnings
+
+    import numpy as np
+
+    if a <= 1:
+        msg = "The parameter a should be larger than 1"
+        raise ValueError(msg)
+
+    pos_tmp = nx.random_layout(G)
+    if pos is None:
+        pos = pos_tmp
+    else:
+        for node in G.nodes():
+            if node not in pos:
+                pos[node] = pos_tmp[node].copy()
+
+    # Initialize spring constant matrix
+    N = len(G)
+    # No nodes no computation
+    if N == 0:
+        return pos
+
+    # init force of springs
+    K = np.ones((N, N)) - np.eye(N)
+    node_order = {node: i for i, node in enumerate(G)}
+    for x, y in G.edges():
+        if x != y:
+            idx, jdx = (node_order[i] for i in (x, y))
+            K[idx, jdx] = a
+
+    # vectorize values
+    p = np.asarray(list(pos.values()))
+
+    # equation 10 in [1]
+    rho = scaling * np.sqrt(N)
+
+    # looping variables
+    error = etol + 1
+    n_iter = 0
+    while error > etol:
+        diff = p[:, np.newaxis] - p[np.newaxis]
+        A = np.linalg.norm(diff, axis=-1)[..., np.newaxis]
+        # attraction_force - repulsions force
+        # suppress nans due to division; caused by diagonal set to zero.
+        # Does not affect the computation due to nansum
+        with warnings.catch_warnings():
+            warnings.simplefilter("ignore")
+            change = K[..., np.newaxis] * diff - rho / A * diff
+        change = np.nansum(change, axis=0)
+        p += change * dt
+
+        error = np.linalg.norm(change, axis=-1).sum()
+        if n_iter > max_iter:
+            break
+        n_iter += 1
+    return dict(zip(G.nodes(), p))


 def rescale_layout(pos, scale=1):
@@ -601,7 +1262,15 @@ def rescale_layout(pos, scale=1):
     --------
     rescale_layout_dict
     """
-    pass
+    import numpy as np
+
+    # Find max length over all dimensions
+    pos -= pos.mean(axis=0)
+    lim = np.abs(pos).max()  # max coordinate for all axes
+    # rescale to (-scale, scale) in all directions, preserves aspect
+    if lim > 0:
+        pos *= scale / lim
+    return pos


 def rescale_layout_dict(pos, scale=1):
@@ -633,10 +1302,16 @@ def rescale_layout_dict(pos, scale=1):
     --------
     rescale_layout
     """
-    pass
+    import numpy as np
+
+    if not pos:  # empty_graph
+        return {}
+    pos_v = np.array(list(pos.values()))
+    pos_v = rescale_layout(pos_v, scale=scale)
+    return dict(zip(pos, pos_v))


-def bfs_layout(G, start, *, align='vertical', scale=1, center=None):
+def bfs_layout(G, start, *, align="vertical", scale=1, center=None):
     """Position nodes according to breadth-first search algorithm.

     Parameters
@@ -666,4 +1341,18 @@ def bfs_layout(G, start, *, align='vertical', scale=1, center=None):
     try to minimize edge crossings.

     """
-    pass
+    G, center = _process_params(G, center, 2)
+
+    # Compute layers with BFS
+    layers = dict(enumerate(nx.bfs_layers(G, start)))
+
+    if len(G) != sum(len(nodes) for nodes in layers.values()):
+        raise nx.NetworkXError(
+            "bfs_layout didn't include all nodes. Perhaps use input graph:\n"
+            "        G.subgraph(nx.node_connected_component(G, start))"
+        )
+
+    # Compute node positions with multipartite_layout
+    return multipartite_layout(
+        G, subset_key=layers, align=align, scale=scale, center=center
+    )
diff --git a/networkx/drawing/nx_agraph.py b/networkx/drawing/nx_agraph.py
index b2be34698..f91031fca 100644
--- a/networkx/drawing/nx_agraph.py
+++ b/networkx/drawing/nx_agraph.py
@@ -19,9 +19,18 @@ See Also
 """
 import os
 import tempfile
+
 import networkx as nx
-__all__ = ['from_agraph', 'to_agraph', 'write_dot', 'read_dot',
-    'graphviz_layout', 'pygraphviz_layout', 'view_pygraphviz']
+
+__all__ = [
+    "from_agraph",
+    "to_agraph",
+    "write_dot",
+    "read_dot",
+    "graphviz_layout",
+    "pygraphviz_layout",
+    "view_pygraphviz",
+]


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -56,7 +65,49 @@ def from_agraph(A, create_using=None):
     attribute or the value 1 if no edge weight attribute is found.

     """
-    pass
+    if create_using is None:
+        if A.is_directed():
+            if A.is_strict():
+                create_using = nx.DiGraph
+            else:
+                create_using = nx.MultiDiGraph
+        else:
+            if A.is_strict():
+                create_using = nx.Graph
+            else:
+                create_using = nx.MultiGraph
+
+    # assign defaults
+    N = nx.empty_graph(0, create_using)
+    if A.name is not None:
+        N.name = A.name
+
+    # add graph attributes
+    N.graph.update(A.graph_attr)
+
+    # add nodes, attributes to N.node_attr
+    for n in A.nodes():
+        str_attr = {str(k): v for k, v in n.attr.items()}
+        N.add_node(str(n), **str_attr)
+
+    # add edges, assign edge data as dictionary of attributes
+    for e in A.edges():
+        u, v = str(e[0]), str(e[1])
+        attr = dict(e.attr)
+        str_attr = {str(k): v for k, v in attr.items()}
+        if not N.is_multigraph():
+            if e.name is not None:
+                str_attr["key"] = e.name
+            N.add_edge(u, v, **str_attr)
+        else:
+            N.add_edge(u, v, key=e.name, **str_attr)
+
+    # add default attributes for graph, nodes, and edges
+    # hang them on N.graph_attr
+    N.graph["graph"] = dict(A.graph_attr)
+    N.graph["node"] = dict(A.node_attr)
+    N.graph["edge"] = dict(A.edge_attr)
+    return N


 def to_agraph(N):
@@ -79,7 +130,55 @@ def to_agraph(N):
     and then updated with the calling arguments if any.

     """
-    pass
+    try:
+        import pygraphviz
+    except ImportError as err:
+        raise ImportError("requires pygraphviz http://pygraphviz.github.io/") from err
+    directed = N.is_directed()
+    strict = nx.number_of_selfloops(N) == 0 and not N.is_multigraph()
+
+    for node in N:
+        if "pos" in N.nodes[node]:
+            N.nodes[node]["pos"] = "{},{}!".format(
+                N.nodes[node]["pos"][0], N.nodes[node]["pos"][1]
+            )
+
+    A = pygraphviz.AGraph(name=N.name, strict=strict, directed=directed)
+
+    # default graph attributes
+    A.graph_attr.update(N.graph.get("graph", {}))
+    A.node_attr.update(N.graph.get("node", {}))
+    A.edge_attr.update(N.graph.get("edge", {}))
+
+    A.graph_attr.update(
+        (k, v) for k, v in N.graph.items() if k not in ("graph", "node", "edge")
+    )
+
+    # add nodes
+    for n, nodedata in N.nodes(data=True):
+        A.add_node(n)
+        # Add node data
+        a = A.get_node(n)
+        a.attr.update({k: str(v) for k, v in nodedata.items()})
+
+    # loop over edges
+    if N.is_multigraph():
+        for u, v, key, edgedata in N.edges(data=True, keys=True):
+            str_edgedata = {k: str(v) for k, v in edgedata.items() if k != "key"}
+            A.add_edge(u, v, key=str(key))
+            # Add edge data
+            a = A.get_edge(u, v)
+            a.attr.update(str_edgedata)
+
+    else:
+        for u, v, edgedata in N.edges(data=True):
+            str_edgedata = {k: str(v) for k, v in edgedata.items()}
+            A.add_edge(u, v)
+            # Add edge data
+            a = A.get_edge(u, v)
+            a.attr.update(str_edgedata)
+
+    return A


 def write_dot(G, path):
@@ -98,10 +197,13 @@ def write_dot(G, path):
     Note that some graphviz layouts are not guaranteed to be deterministic,
     see https://gitlab.com/graphviz/graphviz/-/issues/1767 for more info.
     """
-    pass
+    A = to_agraph(G)
+    A.write(path)
+    A.clear()
+    return


-@nx._dispatchable(name='agraph_read_dot', graphs=None, returns_graph=True)
+@nx._dispatchable(name="agraph_read_dot", graphs=None, returns_graph=True)
 def read_dot(path):
     """Returns a NetworkX graph from a dot file on path.

@@ -110,10 +212,19 @@ def read_dot(path):
     path : file or string
        File name or file handle to read.
     """
-    pass
-
-
-def graphviz_layout(G, prog='neato', root=None, args=''):
+    try:
+        import pygraphviz
+    except ImportError as err:
+        raise ImportError(
+            "read_dot() requires pygraphviz http://pygraphviz.github.io/"
+        ) from err
+    A = pygraphviz.AGraph(file=path)
+    gr = from_agraph(A)
+    A.clear()
+    return gr
+
+
+def graphviz_layout(G, prog="neato", root=None, args=""):
     """Create node positions for G using Graphviz.

     Parameters
@@ -144,10 +255,10 @@ def graphviz_layout(G, prog='neato', root=None, args=''):
     Note that some graphviz layouts are not guaranteed to be deterministic,
     see https://gitlab.com/graphviz/graphviz/-/issues/1767 for more info.
     """
-    pass
+    return pygraphviz_layout(G, prog=prog, root=root, args=args)


-def pygraphviz_layout(G, prog='neato', root=None, args=''):
+def pygraphviz_layout(G, prog="neato", root=None, args=""):
     """Create node positions for G using Graphviz.

     Parameters
@@ -187,12 +298,30 @@ def pygraphviz_layout(G, prog='neato', root=None, args=''):
     Note that some graphviz layouts are not guaranteed to be deterministic,
     see https://gitlab.com/graphviz/graphviz/-/issues/1767 for more info.
     """
-    pass
-
-
-@nx.utils.open_file(5, 'w+b')
-def view_pygraphviz(G, edgelabel=None, prog='dot', args='', suffix='', path
-    =None, show=True):
+    try:
+        import pygraphviz
+    except ImportError as err:
+        raise ImportError("requires pygraphviz http://pygraphviz.github.io/") from err
+    if root is not None:
+        args += f"-Groot={root}"
+    A = to_agraph(G)
+    A.layout(prog=prog, args=args)
+    node_pos = {}
+    for n in G:
+        node = pygraphviz.Node(A, n)
+        try:
+            xs = node.attr["pos"].split(",")
+            node_pos[n] = tuple(float(x) for x in xs)
+        except:
+            print("no position for node", n)
+            node_pos[n] = (0.0, 0.0)
+    return node_pos
+
+
+@nx.utils.open_file(5, "w+b")
+def view_pygraphviz(
+    G, edgelabel=None, prog="dot", args="", suffix="", path=None, show=True
+):
     """Views the graph G using the specified layout algorithm.

     Parameters
@@ -237,4 +366,100 @@ def view_pygraphviz(G, edgelabel=None, prog='dot', args='', suffix='', path
     see https://gitlab.com/graphviz/graphviz/-/issues/1767 for more info.

     """
-    pass
+    if not len(G):
+        raise nx.NetworkXException("An empty graph cannot be drawn.")
+
+    # If we are providing default values for graphviz, these must be set
+    # before any nodes or edges are added to the PyGraphviz graph object.
+    # The reason for this is that default values only affect incoming objects.
+    # If you change the default values after the objects have been added,
+    # then they inherit no value and are set only if explicitly set.
+
+    # to_agraph() uses these values.
+    attrs = ["edge", "node", "graph"]
+    for attr in attrs:
+        if attr not in G.graph:
+            G.graph[attr] = {}
+
+    # These are the default values.
+    edge_attrs = {"fontsize": "10"}
+    node_attrs = {
+        "style": "filled",
+        "fillcolor": "#0000FF40",
+        "height": "0.75",
+        "width": "0.75",
+        "shape": "circle",
+    }
+    graph_attrs = {}
+
+    def update_attrs(which, attrs):
+        # Update graph attributes. Return list of those which were added.
+        added = []
+        for k, v in attrs.items():
+            if k not in G.graph[which]:
+                G.graph[which][k] = v
+                added.append(k)
+
+    def clean_attrs(which, added):
+        # Remove added attributes
+        for attr in added:
+            del G.graph[which][attr]
+        if not G.graph[which]:
+            del G.graph[which]
+
+    # Update all default values
+    update_attrs("edge", edge_attrs)
+    update_attrs("node", node_attrs)
+    update_attrs("graph", graph_attrs)
+
+    # Convert to agraph, so we inherit default values
+    A = to_agraph(G)
+
+    # Remove the default values we added to the original graph.
+    clean_attrs("edge", edge_attrs)
+    clean_attrs("node", node_attrs)
+    clean_attrs("graph", graph_attrs)
+
+    # If the user passed in an edgelabel, we update the labels for all edges.
+    if edgelabel is not None:
+        if not callable(edgelabel):
+
+            def func(data):
+                return "".join(["  ", str(data[edgelabel]), "  "])
+
+        else:
+            func = edgelabel
+
+        # update all the edge labels
+        if G.is_multigraph():
+            for u, v, key, data in G.edges(keys=True, data=True):
+                # PyGraphviz doesn't convert the key to a string. See #339
+                edge = A.get_edge(u, v, str(key))
+                edge.attr["label"] = str(func(data))
+        else:
+            for u, v, data in G.edges(data=True):
+                edge = A.get_edge(u, v)
+                edge.attr["label"] = str(func(data))
+
+    if path is None:
+        ext = "png"
+        if suffix:
+            suffix = f"_{suffix}.{ext}"
+        else:
+            suffix = f".{ext}"
+        path = tempfile.NamedTemporaryFile(suffix=suffix, delete=False)
+    else:
+        # Assume the decorator worked and it is a file-object.
+        pass
+
+    # Write graph to file
+    A.draw(path=path, format=None, prog=prog, args=args)
+    path.close()
+
+    # Show graph in a new window (depends on platform configuration)
+    if show:
+        from PIL import Image
+
+        Image.open(path.name).show()
+
+    return path.name, A
diff --git a/networkx/drawing/nx_latex.py b/networkx/drawing/nx_latex.py
index fc0638643..8bc6ba219 100644
--- a/networkx/drawing/nx_latex.py
+++ b/networkx/drawing/nx_latex.py
@@ -1,4 +1,4 @@
-"""
+r"""
 *****
 LaTeX
 *****
@@ -14,7 +14,7 @@ To construct a figure with subfigures for each graph to be shown, provide
 ``to_latex`` or ``write_latex`` a list of graphs, a list of subcaptions,
 and a number of rows of subfigures inside the figure.

-To be able to refer to the figures or subfigures in latex using ``\\\\ref``,
+To be able to refer to the figures or subfigures in latex using ``\\ref``,
 the keyword ``latex_label`` is available for figures and `sub_labels` for
 a list of labels, one for each subfigure.

@@ -84,14 +84,14 @@ If you want **subfigures** each containing one graph, you can input a list of gr
 >>> pos = nx.circular_layout(H3)
 >>> latex_code = nx.to_latex(H3, pos, node_options=node_color, edge_options=edge_width)
 >>> print(latex_code)
-\\documentclass{report}
-\\usepackage{tikz}
-\\usepackage{subcaption}
+\documentclass{report}
+\usepackage{tikz}
+\usepackage{subcaption}
 <BLANKLINE>
-\\begin{document}
-\\begin{figure}
-  \\begin{tikzpicture}
-      \\draw
+\begin{document}
+\begin{figure}
+  \begin{tikzpicture}
+      \draw
         (1.0, 0.0) node[red] (0){0}
         (0.707, 0.707) node[orange] (1){1}
         (-0.0, 1.0) node[blue] (2){2}
@@ -100,18 +100,18 @@ If you want **subfigures** each containing one graph, you can input a list of gr
         (-0.707, -0.707) node (5){5}
         (0.0, -1.0) node (6){6}
         (0.707, -0.707) node (7){7};
-      \\begin{scope}[-]
-        \\draw[line width=1.5] (0) to (1);
-        \\draw[line width=1.5] (1) to (2);
-        \\draw[line width=1.5] (2) to (3);
-        \\draw[line width=1.5] (3) to (4);
-        \\draw[line width=1.5] (4) to (5);
-        \\draw[line width=1.5] (5) to (6);
-        \\draw[line width=1.5] (6) to (7);
-      \\end{scope}
-    \\end{tikzpicture}
-\\end{figure}
-\\end{document}
+      \begin{scope}[-]
+        \draw[line width=1.5] (0) to (1);
+        \draw[line width=1.5] (1) to (2);
+        \draw[line width=1.5] (2) to (3);
+        \draw[line width=1.5] (3) to (4);
+        \draw[line width=1.5] (4) to (5);
+        \draw[line width=1.5] (5) to (6);
+        \draw[line width=1.5] (6) to (7);
+      \end{scope}
+    \end{tikzpicture}
+\end{figure}
+\end{document}

 Notes
 -----
@@ -128,15 +128,29 @@ TikZ options details:   https://tikz.dev/tikz-actions
 """
 import numbers
 import os
-import networkx as nx
-__all__ = ['to_latex_raw', 'to_latex', 'write_latex']

+import networkx as nx

-@nx.utils.not_implemented_for('multigraph')
-def to_latex_raw(G, pos='pos', tikz_options='', default_node_options='',
-    node_options='node_options', node_label='label', default_edge_options=
-    '', edge_options='edge_options', edge_label='label', edge_label_options
-    ='edge_label_options'):
+__all__ = [
+    "to_latex_raw",
+    "to_latex",
+    "write_latex",
+]
+
+
+@nx.utils.not_implemented_for("multigraph")
+def to_latex_raw(
+    G,
+    pos="pos",
+    tikz_options="",
+    default_node_options="",
+    node_options="node_options",
+    node_label="label",
+    default_edge_options="",
+    edge_options="edge_options",
+    edge_label="label",
+    edge_label_options="edge_label_options",
+):
     """Return a string of the LaTeX/TikZ code to draw `G`

     This function produces just the code for the tikzpicture
@@ -195,31 +209,127 @@ def to_latex_raw(G, pos='pos', tikz_options='', default_node_options='',
     to_latex
     write_latex
     """
-    pass
-
+    i4 = "\n    "
+    i8 = "\n        "
+
+    # set up position dict
+    # TODO allow pos to be None and use a nice TikZ default
+    if not isinstance(pos, dict):
+        pos = nx.get_node_attributes(G, pos)
+    if not pos:
+        # circular layout with radius 2
+        pos = {n: f"({round(360.0 * i / len(G), 3)}:2)" for i, n in enumerate(G)}
+    for node in G:
+        if node not in pos:
+            raise nx.NetworkXError(f"node {node} has no specified pos {pos}")
+        posnode = pos[node]
+        if not isinstance(posnode, str):
+            try:
+                posx, posy = posnode
+                pos[node] = f"({round(posx, 3)}, {round(posy, 3)})"
+            except (TypeError, ValueError):
+                msg = f"position pos[{node}] is not 2-tuple or a string: {posnode}"
+                raise nx.NetworkXError(msg)
+
+    # set up all the dicts
+    if not isinstance(node_options, dict):
+        node_options = nx.get_node_attributes(G, node_options)
+    if not isinstance(node_label, dict):
+        node_label = nx.get_node_attributes(G, node_label)
+    if not isinstance(edge_options, dict):
+        edge_options = nx.get_edge_attributes(G, edge_options)
+    if not isinstance(edge_label, dict):
+        edge_label = nx.get_edge_attributes(G, edge_label)
+    if not isinstance(edge_label_options, dict):
+        edge_label_options = nx.get_edge_attributes(G, edge_label_options)
+
+    # process default options (add brackets or not)
+    topts = "" if tikz_options == "" else f"[{tikz_options.strip('[]')}]"
+    defn = "" if default_node_options == "" else f"[{default_node_options.strip('[]')}]"
+    linestyle = f"{'->' if G.is_directed() else '-'}"
+    if default_edge_options == "":
+        defe = "[" + linestyle + "]"
+    elif "-" in default_edge_options:
+        defe = default_edge_options
+    else:
+        defe = f"[{linestyle},{default_edge_options.strip('[]')}]"
+
+    # Construct the string line by line
+    result = "  \\begin{tikzpicture}" + topts
+    result += i4 + "  \\draw" + defn
+    # load the nodes
+    for n in G:
+        # node options goes inside square brackets
+        nopts = f"[{node_options[n].strip('[]')}]" if n in node_options else ""
+        # node text goes inside curly brackets {}
+        ntext = f"{{{node_label[n]}}}" if n in node_label else f"{{{n}}}"
+
+        result += i8 + f"{pos[n]} node{nopts} ({n}){ntext}"
+    result += ";\n"
+
+    # load the edges
+    result += "      \\begin{scope}" + defe
+    for edge in G.edges:
+        u, v = edge[:2]
+        e_opts = f"{edge_options[edge]}".strip("[]") if edge in edge_options else ""
+        # add loop options for selfloops if not present
+        if u == v and "loop" not in e_opts:
+            e_opts = "loop," + e_opts
+        e_opts = f"[{e_opts}]" if e_opts != "" else ""
+        # TODO -- handle bending of multiedges
+
+        els = edge_label_options[edge] if edge in edge_label_options else ""
+        # edge label options goes inside square brackets []
+        els = f"[{els.strip('[]')}]"
+        # edge text is drawn using the TikZ node command inside curly brackets {}
+        e_label = f" node{els} {{{edge_label[edge]}}}" if edge in edge_label else ""
+
+        result += i8 + f"\\draw{e_opts} ({u}) to{e_label} ({v});"
+
+    result += "\n      \\end{scope}\n    \\end{tikzpicture}\n"
+    return result
+
+
+_DOC_WRAPPER_TIKZ = r"""\documentclass{{report}}
+\usepackage{{tikz}}
+\usepackage{{subcaption}}
+
+\begin{{document}}
+{content}
+\end{{document}}"""

-_DOC_WRAPPER_TIKZ = """\\documentclass{{report}}
-\\usepackage{{tikz}}
-\\usepackage{{subcaption}}

-\\begin{{document}}
-{content}
-\\end{{document}}"""
-_FIG_WRAPPER = """\\begin{{figure}}
-{content}{caption}{label}
-\\end{{figure}}"""
-_SUBFIG_WRAPPER = """  \\begin{{subfigure}}{{{size}\\textwidth}}
+_FIG_WRAPPER = r"""\begin{{figure}}
 {content}{caption}{label}
-  \\end{{subfigure}}"""
+\end{{figure}}"""


-def to_latex(Gbunch, pos='pos', tikz_options='', default_node_options='',
-    node_options='node_options', node_label='node_label',
-    default_edge_options='', edge_options='edge_options', edge_label=
-    'edge_label', edge_label_options='edge_label_options', caption='',
-    latex_label='', sub_captions=None, sub_labels=None, n_rows=1,
-    as_document=True, document_wrapper=_DOC_WRAPPER_TIKZ, figure_wrapper=
-    _FIG_WRAPPER, subfigure_wrapper=_SUBFIG_WRAPPER):
+_SUBFIG_WRAPPER = r"""  \begin{{subfigure}}{{{size}\textwidth}}
+{content}{caption}{label}
+  \end{{subfigure}}"""
+
+
+def to_latex(
+    Gbunch,
+    pos="pos",
+    tikz_options="",
+    default_node_options="",
+    node_options="node_options",
+    node_label="node_label",
+    default_edge_options="",
+    edge_options="edge_options",
+    edge_label="edge_label",
+    edge_label_options="edge_label_options",
+    caption="",
+    latex_label="",
+    sub_captions=None,
+    sub_labels=None,
+    n_rows=1,
+    as_document=True,
+    document_wrapper=_DOC_WRAPPER_TIKZ,
+    figure_wrapper=_FIG_WRAPPER,
+    subfigure_wrapper=_SUBFIG_WRAPPER,
+):
     """Return latex code to draw the graph(s) in `Gbunch`

     The TikZ drawing utility in LaTeX is used to draw the graph(s).
@@ -311,10 +421,65 @@ def to_latex(Gbunch, pos='pos', tikz_options='', default_node_options='',
     write_latex
     to_latex_raw
     """
-    pass
-
-
-@nx.utils.open_file(1, mode='w')
+    if hasattr(Gbunch, "adj"):
+        raw = to_latex_raw(
+            Gbunch,
+            pos,
+            tikz_options,
+            default_node_options,
+            node_options,
+            node_label,
+            default_edge_options,
+            edge_options,
+            edge_label,
+            edge_label_options,
+        )
+    else:  # iterator of graphs
+        sbf = subfigure_wrapper
+        size = 1 / n_rows
+
+        N = len(Gbunch)
+        if isinstance(pos, str | dict):
+            pos = [pos] * N
+        if sub_captions is None:
+            sub_captions = [""] * N
+        if sub_labels is None:
+            sub_labels = [""] * N
+        if not (len(Gbunch) == len(pos) == len(sub_captions) == len(sub_labels)):
+            raise nx.NetworkXError(
+                "length of Gbunch, sub_captions and sub_figures must agree"
+            )
+
+        raw = ""
+        for G, pos, subcap, sublbl in zip(Gbunch, pos, sub_captions, sub_labels):
+            subraw = to_latex_raw(
+                G,
+                pos,
+                tikz_options,
+                default_node_options,
+                node_options,
+                node_label,
+                default_edge_options,
+                edge_options,
+                edge_label,
+                edge_label_options,
+            )
+            cap = f"    \\caption{{{subcap}}}" if subcap else ""
+            lbl = f"\\label{{{sublbl}}}" if sublbl else ""
+            raw += sbf.format(size=size, content=subraw, caption=cap, label=lbl)
+            raw += "\n"
+
+    # put raw latex code into a figure environment and optionally into a document
+    raw = raw[:-1]
+    cap = f"\n  \\caption{{{caption}}}" if caption else ""
+    lbl = f"\\label{{{latex_label}}}" if latex_label else ""
+    fig = figure_wrapper.format(content=raw, caption=cap, label=lbl)
+    if as_document:
+        return document_wrapper.format(content=fig)
+    return fig
+
+
+@nx.utils.open_file(1, mode="w")
 def write_latex(Gbunch, path, **options):
     """Write the latex code to draw the graph(s) onto `path`.

@@ -403,4 +568,4 @@ def write_latex(Gbunch, path, **options):
     ========
     to_latex
     """
-    pass
+    path.write(to_latex(Gbunch, **options))
diff --git a/networkx/drawing/nx_pydot.py b/networkx/drawing/nx_pydot.py
index ff51cde9c..92c5f333e 100644
--- a/networkx/drawing/nx_pydot.py
+++ b/networkx/drawing/nx_pydot.py
@@ -20,23 +20,33 @@ See Also
  - DOT Language:  http://www.graphviz.org/doc/info/lang.html
 """
 from locale import getpreferredencoding
+
 import networkx as nx
 from networkx.utils import open_file
-__all__ = ['write_dot', 'read_dot', 'graphviz_layout', 'pydot_layout',
-    'to_pydot', 'from_pydot']
+
+__all__ = [
+    "write_dot",
+    "read_dot",
+    "graphviz_layout",
+    "pydot_layout",
+    "to_pydot",
+    "from_pydot",
+]


-@open_file(1, mode='w')
+@open_file(1, mode="w")
 def write_dot(G, path):
     """Write NetworkX graph G to Graphviz dot format on path.

     Path can be a string or a file handle.
     """
-    pass
+    P = to_pydot(G)
+    path.write(P.to_string())
+    return


-@open_file(0, mode='r')
-@nx._dispatchable(name='pydot_read_dot', graphs=None, returns_graph=True)
+@open_file(0, mode="r")
+@nx._dispatchable(name="pydot_read_dot", graphs=None, returns_graph=True)
 def read_dot(path):
     """Returns a NetworkX :class:`MultiGraph` or :class:`MultiDiGraph` from the
     dot file with the passed path.
@@ -59,7 +69,15 @@ def read_dot(path):
     Use `G = nx.Graph(nx.nx_pydot.read_dot(path))` to return a :class:`Graph` instead of a
     :class:`MultiGraph`.
     """
-    pass
+    import pydot
+
+    data = path.read()
+
+    # List of one or more "pydot.Dot" instances deserialized from this file.
+    P_list = pydot.graph_from_dot_data(data)
+
+    # Convert only the first such instance into a NetworkX graph.
+    return from_pydot(P_list[0])


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -86,7 +104,79 @@ def from_pydot(P):
     >>> G = nx.Graph(nx.nx_pydot.from_pydot(A))

     """
-    pass
+
+    if P.get_strict(None):  # pydot bug: get_strict() shouldn't take argument
+        multiedges = False
+    else:
+        multiedges = True
+
+    if P.get_type() == "graph":  # undirected
+        if multiedges:
+            N = nx.MultiGraph()
+        else:
+            N = nx.Graph()
+    else:
+        if multiedges:
+            N = nx.MultiDiGraph()
+        else:
+            N = nx.DiGraph()
+
+    # assign defaults
+    name = P.get_name().strip('"')
+    if name != "":
+        N.name = name
+
+    # add nodes, attributes to N.node_attr
+    for p in P.get_node_list():
+        n = p.get_name().strip('"')
+        if n in ("node", "graph", "edge"):
+            continue
+        N.add_node(n, **p.get_attributes())
+
+    # add edges
+    for e in P.get_edge_list():
+        u = e.get_source()
+        v = e.get_destination()
+        attr = e.get_attributes()
+        s = []
+        d = []
+
+        if isinstance(u, str):
+            s.append(u.strip('"'))
+        else:
+            for unodes in u["nodes"]:
+                s.append(unodes.strip('"'))
+
+        if isinstance(v, str):
+            d.append(v.strip('"'))
+        else:
+            for vnodes in v["nodes"]:
+                d.append(vnodes.strip('"'))
+
+        for source_node in s:
+            for destination_node in d:
+                N.add_edge(source_node, destination_node, **attr)
+
+    # add default attributes for graph, nodes, edges
+    pattr = P.get_attributes()
+    if pattr:
+        N.graph["graph"] = pattr
+    try:
+        N.graph["node"] = P.get_node_defaults()[0]
+    except (IndexError, TypeError):
+        pass  # N.graph['node']={}
+    try:
+        N.graph["edge"] = P.get_edge_defaults()[0]
+    except (IndexError, TypeError):
+        pass  # N.graph['edge']={}
+    return N
+
+
+def _check_colon_quotes(s):
+    # A quick helper function to check if a string has a colon in it
+    # and if it is quoted properly with double quotes.
+    # refer https://github.com/pydot/pydot/issues/258
+    return ":" in s and (s[0] != '"' or s[-1] != '"')


 def to_pydot(N):
@@ -106,10 +196,100 @@ def to_pydot(N):
     -----

     """
-    pass
-
-
-def graphviz_layout(G, prog='neato', root=None):
+    import pydot
+
+    # set Graphviz graph type
+    if N.is_directed():
+        graph_type = "digraph"
+    else:
+        graph_type = "graph"
+    strict = nx.number_of_selfloops(N) == 0 and not N.is_multigraph()
+
+    name = N.name
+    graph_defaults = N.graph.get("graph", {})
+    if name == "":
+        P = pydot.Dot("", graph_type=graph_type, strict=strict, **graph_defaults)
+    else:
+        P = pydot.Dot(
+            f'"{name}"', graph_type=graph_type, strict=strict, **graph_defaults
+        )
+    try:
+        P.set_node_defaults(**N.graph["node"])
+    except KeyError:
+        pass
+    try:
+        P.set_edge_defaults(**N.graph["edge"])
+    except KeyError:
+        pass
+
+    for n, nodedata in N.nodes(data=True):
+        str_nodedata = {str(k): str(v) for k, v in nodedata.items()}
+        # Explicitly catch nodes with ":" in node names or nodedata.
+        n = str(n)
+        raise_error = _check_colon_quotes(n) or (
+            any(
+                (_check_colon_quotes(k) or _check_colon_quotes(v))
+                for k, v in str_nodedata.items()
+            )
+        )
+        if raise_error:
+            raise ValueError(
+                f'Node names and attributes should not contain ":" unless they are quoted with "".\
+                For example the string \'attribute:data1\' should be written as \'"attribute:data1"\'.\
+                Please refer https://github.com/pydot/pydot/issues/258'
+            )
+        p = pydot.Node(n, **str_nodedata)
+        P.add_node(p)
+
+    if N.is_multigraph():
+        for u, v, key, edgedata in N.edges(data=True, keys=True):
+            str_edgedata = {str(k): str(v) for k, v in edgedata.items() if k != "key"}
+            u, v = str(u), str(v)
+            raise_error = (
+                _check_colon_quotes(u)
+                or _check_colon_quotes(v)
+                or (
+                    any(
+                        (_check_colon_quotes(k) or _check_colon_quotes(val))
+                        for k, val in str_edgedata.items()
+                    )
+                )
+            )
+            if raise_error:
+                raise ValueError(
+                    f'Node names and attributes should not contain ":" unless they are quoted with "".\
+                    For example the string \'attribute:data1\' should be written as \'"attribute:data1"\'.\
+                    Please refer https://github.com/pydot/pydot/issues/258'
+                )
+            edge = pydot.Edge(u, v, key=str(key), **str_edgedata)
+            P.add_edge(edge)
+
+    else:
+        for u, v, edgedata in N.edges(data=True):
+            str_edgedata = {str(k): str(v) for k, v in edgedata.items()}
+            u, v = str(u), str(v)
+            raise_error = (
+                _check_colon_quotes(u)
+                or _check_colon_quotes(v)
+                or (
+                    any(
+                        (_check_colon_quotes(k) or _check_colon_quotes(val))
+                        for k, val in str_edgedata.items()
+                    )
+                )
+            )
+            if raise_error:
+                raise ValueError(
+                    f'Node names and attributes should not contain ":" unless they are quoted with "".\
+                    For example the string \'attribute:data1\' should be written as \'"attribute:data1"\'.\
+                    Please refer https://github.com/pydot/pydot/issues/258'
+                )
+            edge = pydot.Edge(u, v, **str_edgedata)
+            P.add_edge(edge)
+    return P
+
+
+def graphviz_layout(G, prog="neato", root=None):
     """Create node positions using Pydot and Graphviz.

     Returns a dictionary of positions keyed by node.
@@ -139,10 +319,10 @@ def graphviz_layout(G, prog='neato', root=None):
     -----
     This is a wrapper for pydot_layout.
     """
-    pass
+    return pydot_layout(G=G, prog=prog, root=root)


-def pydot_layout(G, prog='neato', root=None):
+def pydot_layout(G, prog="neato", root=None):
     """Create node positions using :mod:`pydot` and Graphviz.

     Parameters
@@ -180,4 +360,52 @@ def pydot_layout(G, prog='neato', root=None):
         G_layout = {H.nodes[n]["node_label"]: p for n, p in H_layout.items()}

     """
-    pass
+    import pydot
+
+    P = to_pydot(G)
+    if root is not None:
+        P.set("root", str(root))
+
+    # List of low-level bytes comprising a string in the dot language converted
+    # from the passed graph with the passed external GraphViz command.
+    D_bytes = P.create_dot(prog=prog)
+
+    # Unique string decoded from these bytes with the preferred locale encoding
+    D = str(D_bytes, encoding=getpreferredencoding())
+
+    if D == "":  # no data returned
+        print(f"Graphviz layout with {prog} failed")
+        print()
+        print("To debug what happened try:")
+        print("P = nx.nx_pydot.to_pydot(G)")
+        print('P.write_dot("file.dot")')
+        print(f"And then run {prog} on file.dot")
+        return
+
+    # List of one or more "pydot.Dot" instances deserialized from this string.
+    Q_list = pydot.graph_from_dot_data(D)
+    assert len(Q_list) == 1
+
+    # The first and only such instance, as guaranteed by the above assertion.
+    Q = Q_list[0]
+
+    node_pos = {}
+    for n in G.nodes():
+        str_n = str(n)
+        # Explicitly catch nodes with ":" in node names or nodedata.
+        if _check_colon_quotes(str_n):
+            raise ValueError(
+                f'Node names and node attributes should not contain ":" unless they are quoted with "".\
+                For example the string \'attribute:data1\' should be written as \'"attribute:data1"\'.\
+                Please refer https://github.com/pydot/pydot/issues/258'
+            )
+        pydot_node = pydot.Node(str_n).get_name()
+        node = Q.get_node(pydot_node)
+
+        if isinstance(node, list):
+            node = node[0]
+        pos = node.get_pos()[1:-1]  # strip leading and trailing double quotes
+        if pos is not None:
+            xx, yy = pos.split(",")
+            node_pos[n] = (float(xx), float(yy))
+    return node_pos
diff --git a/networkx/drawing/nx_pylab.py b/networkx/drawing/nx_pylab.py
index 6f49522e0..7c25f63bf 100644
--- a/networkx/drawing/nx_pylab.py
+++ b/networkx/drawing/nx_pylab.py
@@ -19,12 +19,33 @@ See Also
 import collections
 import itertools
 from numbers import Number
+
 import networkx as nx
-from networkx.drawing.layout import circular_layout, kamada_kawai_layout, planar_layout, random_layout, shell_layout, spectral_layout, spring_layout
-__all__ = ['draw', 'draw_networkx', 'draw_networkx_nodes',
-    'draw_networkx_edges', 'draw_networkx_labels',
-    'draw_networkx_edge_labels', 'draw_circular', 'draw_kamada_kawai',
-    'draw_random', 'draw_spectral', 'draw_spring', 'draw_planar', 'draw_shell']
+from networkx.drawing.layout import (
+    circular_layout,
+    kamada_kawai_layout,
+    planar_layout,
+    random_layout,
+    shell_layout,
+    spectral_layout,
+    spring_layout,
+)
+
+__all__ = [
+    "draw",
+    "draw_networkx",
+    "draw_networkx_nodes",
+    "draw_networkx_edges",
+    "draw_networkx_labels",
+    "draw_networkx_edge_labels",
+    "draw_circular",
+    "draw_kamada_kawai",
+    "draw_random",
+    "draw_spectral",
+    "draw_spring",
+    "draw_planar",
+    "draw_shell",
+]


 def draw(G, pos=None, ax=None, **kwds):
@@ -83,11 +104,30 @@ def draw(G, pos=None, ax=None, **kwds):
     Also see the NetworkX drawing examples at
     https://networkx.org/documentation/latest/auto_examples/index.html
     """
-    pass
+    import matplotlib.pyplot as plt
+
+    if ax is None:
+        cf = plt.gcf()
+    else:
+        cf = ax.get_figure()
+    cf.set_facecolor("w")
+    if ax is None:
+        if cf.axes:
+            ax = cf.gca()
+        else:
+            ax = cf.add_axes((0, 0, 1, 1))
+
+    if "with_labels" not in kwds:
+        kwds["with_labels"] = "labels" in kwds
+
+    draw_networkx(G, pos=pos, ax=ax, **kwds)
+    ax.set_axis_off()
+    plt.draw_if_interactive()
+    return


 def draw_networkx(G, pos=None, arrows=None, with_labels=True, **kwds):
-    """Draw the graph G using Matplotlib.
+    r"""Draw the graph G using Matplotlib.

     Draw the graph with Matplotlib with options for node positions,
     labeling, titles, and many other drawing features.
@@ -113,7 +153,7 @@ def draw_networkx(G, pos=None, arrows=None, with_labels=True, **kwds):
         For directed graphs, if True draw arrowheads.
         Note: Arrows will be the same color as edges.

-    arrowstyle : str (default='-\\|>' for directed graphs)
+    arrowstyle : str (default='-\|>' for directed graphs)
         For directed graphs, choose the style of the arrowsheads.
         For undirected graphs default to '-'

@@ -237,13 +277,62 @@ def draw_networkx(G, pos=None, arrows=None, with_labels=True, **kwds):
     draw_networkx_labels
     draw_networkx_edge_labels
     """
-    pass
-
-
-def draw_networkx_nodes(G, pos, nodelist=None, node_size=300, node_color=
-    '#1f78b4', node_shape='o', alpha=None, cmap=None, vmin=None, vmax=None,
-    ax=None, linewidths=None, edgecolors=None, label=None, margins=None,
-    hide_ticks=True):
+    from inspect import signature
+
+    import matplotlib.pyplot as plt
+
+    # Get all valid keywords by inspecting the signatures of draw_networkx_nodes,
+    # draw_networkx_edges, draw_networkx_labels
+
+    valid_node_kwds = signature(draw_networkx_nodes).parameters.keys()
+    valid_edge_kwds = signature(draw_networkx_edges).parameters.keys()
+    valid_label_kwds = signature(draw_networkx_labels).parameters.keys()
+
+    # Create a set with all valid keywords across the three functions and
+    # remove the arguments of this function (draw_networkx)
+    valid_kwds = (valid_node_kwds | valid_edge_kwds | valid_label_kwds) - {
+        "G",
+        "pos",
+        "arrows",
+        "with_labels",
+    }
+
+    if any(k not in valid_kwds for k in kwds):
+        invalid_args = ", ".join([k for k in kwds if k not in valid_kwds])
+        raise ValueError(f"Received invalid argument(s): {invalid_args}")
+
+    node_kwds = {k: v for k, v in kwds.items() if k in valid_node_kwds}
+    edge_kwds = {k: v for k, v in kwds.items() if k in valid_edge_kwds}
+    label_kwds = {k: v for k, v in kwds.items() if k in valid_label_kwds}
+
+    if pos is None:
+        pos = nx.drawing.spring_layout(G)  # default to spring layout
+
+    draw_networkx_nodes(G, pos, **node_kwds)
+    draw_networkx_edges(G, pos, arrows=arrows, **edge_kwds)
+    if with_labels:
+        draw_networkx_labels(G, pos, **label_kwds)
+    plt.draw_if_interactive()
+
+
+def draw_networkx_nodes(
+    G,
+    pos,
+    nodelist=None,
+    node_size=300,
+    node_color="#1f78b4",
+    node_shape="o",
+    alpha=None,
+    cmap=None,
+    vmin=None,
+    vmax=None,
+    ax=None,
+    linewidths=None,
+    edgecolors=None,
+    label=None,
+    margins=None,
+    hide_ticks=True,
+):
     """Draw the nodes of the graph G.

     This draws only the nodes of the graph G.
@@ -333,44 +422,157 @@ def draw_networkx_nodes(G, pos, nodelist=None, node_size=300, node_color=
     draw_networkx_labels
     draw_networkx_edge_labels
     """
-    pass
+    from collections.abc import Iterable
+
+    import matplotlib as mpl
+    import matplotlib.collections  # call as mpl.collections
+    import matplotlib.pyplot as plt
+    import numpy as np
+
+    if ax is None:
+        ax = plt.gca()
+
+    if nodelist is None:
+        nodelist = list(G)
+
+    if len(nodelist) == 0:  # empty nodelist, no drawing
+        return mpl.collections.PathCollection(None)
+
+    try:
+        xy = np.asarray([pos[v] for v in nodelist])
+    except KeyError as err:
+        raise nx.NetworkXError(f"Node {err} has no position.") from err
+
+    if isinstance(alpha, Iterable):
+        node_color = apply_alpha(node_color, alpha, nodelist, cmap, vmin, vmax)
+        alpha = None
+
+    node_collection = ax.scatter(
+        xy[:, 0],
+        xy[:, 1],
+        s=node_size,
+        c=node_color,
+        marker=node_shape,
+        cmap=cmap,
+        vmin=vmin,
+        vmax=vmax,
+        alpha=alpha,
+        linewidths=linewidths,
+        edgecolors=edgecolors,
+        label=label,
+    )
+    if hide_ticks:
+        ax.tick_params(
+            axis="both",
+            which="both",
+            bottom=False,
+            left=False,
+            labelbottom=False,
+            labelleft=False,
+        )
+
+    if margins is not None:
+        if isinstance(margins, Iterable):
+            ax.margins(*margins)
+        else:
+            ax.margins(margins)
+
+    node_collection.set_zorder(2)
+    return node_collection


 class FancyArrowFactory:
     """Draw arrows with `matplotlib.patches.FancyarrowPatch`"""

-
     class ConnectionStyleFactory:
-
         def __init__(self, connectionstyles, selfloop_height, ax=None):
             import matplotlib as mpl
-            import matplotlib.path
+            import matplotlib.path  # call as mpl.path
             import numpy as np
+
             self.ax = ax
             self.mpl = mpl
             self.np = np
-            self.base_connection_styles = [mpl.patches.ConnectionStyle(cs) for
-                cs in connectionstyles]
+            self.base_connection_styles = [
+                mpl.patches.ConnectionStyle(cs) for cs in connectionstyles
+            ]
             self.n = len(self.base_connection_styles)
             self.selfloop_height = selfloop_height

-    def __init__(self, edge_pos, edgelist, nodelist, edge_indices,
-        node_size, selfloop_height, connectionstyle='arc3', node_shape='o',
-        arrowstyle='-', arrowsize=10, edge_color='k', alpha=None, linewidth
-        =1.0, style='solid', min_source_margin=0, min_target_margin=0, ax=None
-        ):
+        def curved(self, edge_index):
+            return self.base_connection_styles[edge_index % self.n]
+
+        def self_loop(self, edge_index):
+            def self_loop_connection(posA, posB, *args, **kwargs):
+                if not self.np.all(posA == posB):
+                    raise nx.NetworkXError(
+                        "`self_loop` connection style method"
+                        "is only to be used for self-loops"
+                    )
+                # this is called with _screen space_ values
+                # so convert back to data space
+                data_loc = self.ax.transData.inverted().transform(posA)
+                v_shift = 0.1 * self.selfloop_height
+                h_shift = v_shift * 0.5
+                # put the top of the loop first so arrow is not hidden by node
+                path = self.np.asarray(
+                    [
+                        # 1
+                        [0, v_shift],
+                        # 4 4 4
+                        [h_shift, v_shift],
+                        [h_shift, 0],
+                        [0, 0],
+                        # 4 4 4
+                        [-h_shift, 0],
+                        [-h_shift, v_shift],
+                        [0, v_shift],
+                    ]
+                )
+                # Rotate self loop 90 deg. if more than 1
+                # This will allow for maximum of 4 visible self loops
+                if edge_index % 4:
+                    x, y = path.T
+                    for _ in range(edge_index % 4):
+                        x, y = y, -x
+                    path = self.np.array([x, y]).T
+                return self.mpl.path.Path(
+                    self.ax.transData.transform(data_loc + path), [1, 4, 4, 4, 4, 4, 4]
+                )
+
+            return self_loop_connection
+
+    def __init__(
+        self,
+        edge_pos,
+        edgelist,
+        nodelist,
+        edge_indices,
+        node_size,
+        selfloop_height,
+        connectionstyle="arc3",
+        node_shape="o",
+        arrowstyle="-",
+        arrowsize=10,
+        edge_color="k",
+        alpha=None,
+        linewidth=1.0,
+        style="solid",
+        min_source_margin=0,
+        min_target_margin=0,
+        ax=None,
+    ):
         import matplotlib as mpl
-        import matplotlib.patches
+        import matplotlib.patches  # call as mpl.patches
         import matplotlib.pyplot as plt
         import numpy as np
+
         if isinstance(connectionstyle, str):
             connectionstyle = [connectionstyle]
         elif np.iterable(connectionstyle):
             connectionstyle = list(connectionstyle)
         else:
-            msg = (
-                'ConnectionStyleFactory arg `connectionstyle` must be str or iterable'
-                )
+            msg = "ConnectionStyleFactory arg `connectionstyle` must be str or iterable"
             raise nx.NetworkXError(msg)
         self.ax = ax
         self.mpl = mpl
@@ -384,45 +586,45 @@ class FancyArrowFactory:
         self.edge_indices = edge_indices
         self.node_size = node_size
         self.connectionstyle_factory = self.ConnectionStyleFactory(
-            connectionstyle, selfloop_height, ax)
+            connectionstyle, selfloop_height, ax
+        )
         self.arrowstyle = arrowstyle
         self.arrowsize = arrowsize
-        self.arrow_colors = mpl.colors.colorConverter.to_rgba_array(edge_color,
-            alpha)
+        self.arrow_colors = mpl.colors.colorConverter.to_rgba_array(edge_color, alpha)
         self.linewidth = linewidth
         self.style = style
         if isinstance(arrowsize, list) and len(arrowsize) != len(edge_pos):
-            raise ValueError(
-                'arrowsize should have the same length as edgelist')
+            raise ValueError("arrowsize should have the same length as edgelist")

     def __call__(self, i):
         (x1, y1), (x2, y2) = self.edge_pos[i]
-        shrink_source = 0
-        shrink_target = 0
-        if self.np.iterable(self.node_size):
+        shrink_source = 0  # space from source to tail
+        shrink_target = 0  # space from  head to target
+        if self.np.iterable(self.node_size):  # many node sizes
             source, target = self.edgelist[i][:2]
             source_node_size = self.node_size[self.nodelist.index(source)]
             target_node_size = self.node_size[self.nodelist.index(target)]
-            shrink_source = self.to_marker_edge(source_node_size, self.
-                node_shape)
-            shrink_target = self.to_marker_edge(target_node_size, self.
-                node_shape)
+            shrink_source = self.to_marker_edge(source_node_size, self.node_shape)
+            shrink_target = self.to_marker_edge(target_node_size, self.node_shape)
         else:
-            shrink_source = self.to_marker_edge(self.node_size, self.node_shape
-                )
+            shrink_source = self.to_marker_edge(self.node_size, self.node_shape)
             shrink_target = shrink_source
         shrink_source = max(shrink_source, self.min_source_margin)
         shrink_target = max(shrink_target, self.min_target_margin)
+
+        # scale factor of arrow head
         if isinstance(self.arrowsize, list):
             mutation_scale = self.arrowsize[i]
         else:
             mutation_scale = self.arrowsize
+
         if len(self.arrow_colors) > i:
             arrow_color = self.arrow_colors[i]
         elif len(self.arrow_colors) == 1:
             arrow_color = self.arrow_colors[0]
-        else:
+        else:  # Cycle through colors
             arrow_color = self.arrow_colors[i % len(self.arrow_colors)]
+
         if self.np.iterable(self.linewidth):
             if len(self.linewidth) > i:
                 linewidth = self.linewidth[i]
@@ -430,33 +632,71 @@ class FancyArrowFactory:
                 linewidth = self.linewidth[i % len(self.linewidth)]
         else:
             linewidth = self.linewidth
-        if self.np.iterable(self.style) and not isinstance(self.style, str
-            ) and not isinstance(self.style, tuple):
+
+        if (
+            self.np.iterable(self.style)
+            and not isinstance(self.style, str)
+            and not isinstance(self.style, tuple)
+        ):
             if len(self.style) > i:
                 linestyle = self.style[i]
-            else:
+            else:  # Cycle through styles
                 linestyle = self.style[i % len(self.style)]
         else:
             linestyle = self.style
+
         if x1 == x2 and y1 == y2:
-            connectionstyle = self.connectionstyle_factory.self_loop(self.
-                edge_indices[i])
+            connectionstyle = self.connectionstyle_factory.self_loop(
+                self.edge_indices[i]
+            )
+        else:
+            connectionstyle = self.connectionstyle_factory.curved(self.edge_indices[i])
+        return self.mpl.patches.FancyArrowPatch(
+            (x1, y1),
+            (x2, y2),
+            arrowstyle=self.arrowstyle,
+            shrinkA=shrink_source,
+            shrinkB=shrink_target,
+            mutation_scale=mutation_scale,
+            color=arrow_color,
+            linewidth=linewidth,
+            connectionstyle=connectionstyle,
+            linestyle=linestyle,
+            zorder=1,  # arrows go behind nodes
+        )
+
+    def to_marker_edge(self, marker_size, marker):
+        if marker in "s^>v<d":  # `large` markers need extra space
+            return self.np.sqrt(2 * marker_size) / 2
         else:
-            connectionstyle = self.connectionstyle_factory.curved(self.
-                edge_indices[i])
-        return self.mpl.patches.FancyArrowPatch((x1, y1), (x2, y2),
-            arrowstyle=self.arrowstyle, shrinkA=shrink_source, shrinkB=
-            shrink_target, mutation_scale=mutation_scale, color=arrow_color,
-            linewidth=linewidth, connectionstyle=connectionstyle, linestyle
-            =linestyle, zorder=1)
-
-
-def draw_networkx_edges(G, pos, edgelist=None, width=1.0, edge_color='k',
-    style='solid', alpha=None, arrowstyle=None, arrowsize=10, edge_cmap=
-    None, edge_vmin=None, edge_vmax=None, ax=None, arrows=None, label=None,
-    node_size=300, nodelist=None, node_shape='o', connectionstyle='arc3',
-    min_source_margin=0, min_target_margin=0, hide_ticks=True):
-    """Draw the edges of the graph G.
+            return self.np.sqrt(marker_size) / 2
+
+
+def draw_networkx_edges(
+    G,
+    pos,
+    edgelist=None,
+    width=1.0,
+    edge_color="k",
+    style="solid",
+    alpha=None,
+    arrowstyle=None,
+    arrowsize=10,
+    edge_cmap=None,
+    edge_vmin=None,
+    edge_vmax=None,
+    ax=None,
+    arrows=None,
+    label=None,
+    node_size=300,
+    nodelist=None,
+    node_shape="o",
+    connectionstyle="arc3",
+    min_source_margin=0,
+    min_target_margin=0,
+    hide_ticks=True,
+):
+    r"""Draw the edges of the graph G.

     This draws only the edges of the graph G.

@@ -516,8 +756,8 @@ def draw_networkx_edges(G, pos, edgelist=None, width=1.0, edge_color='k',

         Note: Arrowheads will be the same color as edges.

-    arrowstyle : str (default='-\\|>' for directed graphs)
-        For directed graphs and `arrows==True` defaults to '-\\|>',
+    arrowstyle : str (default='-\|>' for directed graphs)
+        For directed graphs and `arrows==True` defaults to '-\|>',
         For undirected graphs default to '-'.

         See `matplotlib.patches.ArrowStyle` for more options.
@@ -617,13 +857,210 @@ def draw_networkx_edges(G, pos, edgelist=None, width=1.0, edge_color='k',
     draw_networkx_edge_labels

     """
-    pass
-
-
-def draw_networkx_labels(G, pos, labels=None, font_size=12, font_color='k',
-    font_family='sans-serif', font_weight='normal', alpha=None, bbox=None,
-    horizontalalignment='center', verticalalignment='center', ax=None,
-    clip_on=True, hide_ticks=True):
+    import warnings
+
+    import matplotlib as mpl
+    import matplotlib.collections  # call as mpl.collections
+    import matplotlib.colors  # call as mpl.colors
+    import matplotlib.pyplot as plt
+    import numpy as np
+
+    # The default behavior is to use LineCollection to draw edges for
+    # undirected graphs (for performance reasons) and use FancyArrowPatches
+    # for directed graphs.
+    # The `arrows` keyword can be used to override the default behavior
+    if arrows is None:
+        use_linecollection = not (G.is_directed() or G.is_multigraph())
+    else:
+        if not isinstance(arrows, bool):
+            raise TypeError("Argument `arrows` must be of type bool or None")
+        use_linecollection = not arrows
+
+    if isinstance(connectionstyle, str):
+        connectionstyle = [connectionstyle]
+    elif np.iterable(connectionstyle):
+        connectionstyle = list(connectionstyle)
+    else:
+        msg = "draw_networkx_edges arg `connectionstyle` must be str or iterable"
+        raise nx.NetworkXError(msg)
+
+    # Some kwargs only apply to FancyArrowPatches. Warn users when they use
+    # non-default values for these kwargs when LineCollection is being used
+    # instead of silently ignoring the specified option
+    if use_linecollection:
+        msg = (
+            "\n\nThe {0} keyword argument is not applicable when drawing edges\n"
+            "with LineCollection.\n\n"
+            "To make this warning go away, either specify `arrows=True` to\n"
+            "force FancyArrowPatches or use the default values.\n"
+            "Note that using FancyArrowPatches may be slow for large graphs.\n"
+        )
+        if arrowstyle is not None:
+            warnings.warn(msg.format("arrowstyle"), category=UserWarning, stacklevel=2)
+        if arrowsize != 10:
+            warnings.warn(msg.format("arrowsize"), category=UserWarning, stacklevel=2)
+        if min_source_margin != 0:
+            warnings.warn(
+                msg.format("min_source_margin"), category=UserWarning, stacklevel=2
+            )
+        if min_target_margin != 0:
+            warnings.warn(
+                msg.format("min_target_margin"), category=UserWarning, stacklevel=2
+            )
+        if any(cs != "arc3" for cs in connectionstyle):
+            warnings.warn(
+                msg.format("connectionstyle"), category=UserWarning, stacklevel=2
+            )
+
+    # NOTE: Arrowstyle modification must occur after the warnings section
+    if arrowstyle is None:
+        arrowstyle = "-|>" if G.is_directed() else "-"
+
+    if ax is None:
+        ax = plt.gca()
+
+    if edgelist is None:
+        edgelist = list(G.edges)  # (u, v, k) for multigraph (u, v) otherwise
+
+    if len(edgelist):
+        if G.is_multigraph():
+            key_count = collections.defaultdict(lambda: itertools.count(0))
+            edge_indices = [next(key_count[tuple(e[:2])]) for e in edgelist]
+        else:
+            edge_indices = [0] * len(edgelist)
+    else:  # no edges!
+        return []
+
+    if nodelist is None:
+        nodelist = list(G.nodes())
+
+    # FancyArrowPatch handles color=None different from LineCollection
+    if edge_color is None:
+        edge_color = "k"
+
+    # set edge positions
+    edge_pos = np.asarray([(pos[e[0]], pos[e[1]]) for e in edgelist])
+
+    # Check if edge_color is an array of floats and map to edge_cmap.
+    # This is the only case handled differently from matplotlib
+    if (
+        np.iterable(edge_color)
+        and (len(edge_color) == len(edge_pos))
+        and np.all([isinstance(c, Number) for c in edge_color])
+    ):
+        if edge_cmap is not None:
+            assert isinstance(edge_cmap, mpl.colors.Colormap)
+        else:
+            edge_cmap = plt.get_cmap()
+        if edge_vmin is None:
+            edge_vmin = min(edge_color)
+        if edge_vmax is None:
+            edge_vmax = max(edge_color)
+        color_normal = mpl.colors.Normalize(vmin=edge_vmin, vmax=edge_vmax)
+        edge_color = [edge_cmap(color_normal(e)) for e in edge_color]
+
+    # compute initial view
+    minx = np.amin(np.ravel(edge_pos[:, :, 0]))
+    maxx = np.amax(np.ravel(edge_pos[:, :, 0]))
+    miny = np.amin(np.ravel(edge_pos[:, :, 1]))
+    maxy = np.amax(np.ravel(edge_pos[:, :, 1]))
+    w = maxx - minx
+    h = maxy - miny
+
+    # Self-loops are scaled by view extent, except in cases the extent
+    # is 0, e.g. for a single node. In this case, fall back to scaling
+    # by the maximum node size
+    selfloop_height = h if h != 0 else 0.005 * np.array(node_size).max()
+    fancy_arrow_factory = FancyArrowFactory(
+        edge_pos,
+        edgelist,
+        nodelist,
+        edge_indices,
+        node_size,
+        selfloop_height,
+        connectionstyle,
+        node_shape,
+        arrowstyle,
+        arrowsize,
+        edge_color,
+        alpha,
+        width,
+        style,
+        min_source_margin,
+        min_target_margin,
+        ax=ax,
+    )
+
+    # Draw the edges
+    if use_linecollection:
+        edge_collection = mpl.collections.LineCollection(
+            edge_pos,
+            colors=edge_color,
+            linewidths=width,
+            antialiaseds=(1,),
+            linestyle=style,
+            alpha=alpha,
+        )
+        edge_collection.set_cmap(edge_cmap)
+        edge_collection.set_clim(edge_vmin, edge_vmax)
+        edge_collection.set_zorder(1)  # edges go behind nodes
+        edge_collection.set_label(label)
+        ax.add_collection(edge_collection)
+        edge_viz_obj = edge_collection
+
+        # Make sure selfloop edges are also drawn
+        # ---------------------------------------
+        selfloops_to_draw = [loop for loop in nx.selfloop_edges(G) if loop in edgelist]
+        if selfloops_to_draw:
+            edgelist_tuple = list(map(tuple, edgelist))
+            arrow_collection = []
+            for loop in selfloops_to_draw:
+                i = edgelist_tuple.index(loop)
+                arrow = fancy_arrow_factory(i)
+                arrow_collection.append(arrow)
+                ax.add_patch(arrow)
+    else:
+        edge_viz_obj = []
+        for i in range(len(edgelist)):
+            arrow = fancy_arrow_factory(i)
+            ax.add_patch(arrow)
+            edge_viz_obj.append(arrow)
+
+    # update view after drawing
+    padx, pady = 0.05 * w, 0.05 * h
+    corners = (minx - padx, miny - pady), (maxx + padx, maxy + pady)
+    ax.update_datalim(corners)
+    ax.autoscale_view()
+
+    if hide_ticks:
+        ax.tick_params(
+            axis="both",
+            which="both",
+            bottom=False,
+            left=False,
+            labelbottom=False,
+            labelleft=False,
+        )
+
+    return edge_viz_obj
+
+
+def draw_networkx_labels(
+    G,
+    pos,
+    labels=None,
+    font_size=12,
+    font_color="k",
+    font_family="sans-serif",
+    font_weight="normal",
+    alpha=None,
+    bbox=None,
+    horizontalalignment="center",
+    verticalalignment="center",
+    ax=None,
+    clip_on=True,
+    hide_ticks=True,
+):
     """Draw node labels on the graph G.

     Parameters
@@ -697,14 +1134,70 @@ def draw_networkx_labels(G, pos, labels=None, font_size=12, font_color='k',
     draw_networkx_edges
     draw_networkx_edge_labels
     """
-    pass
-
-
-def draw_networkx_edge_labels(G, pos, edge_labels=None, label_pos=0.5,
-    font_size=10, font_color='k', font_family='sans-serif', font_weight=
-    'normal', alpha=None, bbox=None, horizontalalignment='center',
-    verticalalignment='center', ax=None, rotate=True, clip_on=True,
-    node_size=300, nodelist=None, connectionstyle='arc3', hide_ticks=True):
+    import matplotlib.pyplot as plt
+
+    if ax is None:
+        ax = plt.gca()
+
+    if labels is None:
+        labels = {n: n for n in G.nodes()}
+
+    text_items = {}  # there is no text collection so we'll fake one
+    for n, label in labels.items():
+        (x, y) = pos[n]
+        if not isinstance(label, str):
+            label = str(label)  # this makes "1" and 1 labeled the same
+        t = ax.text(
+            x,
+            y,
+            label,
+            size=font_size,
+            color=font_color,
+            family=font_family,
+            weight=font_weight,
+            alpha=alpha,
+            horizontalalignment=horizontalalignment,
+            verticalalignment=verticalalignment,
+            transform=ax.transData,
+            bbox=bbox,
+            clip_on=clip_on,
+        )
+        text_items[n] = t
+
+    if hide_ticks:
+        ax.tick_params(
+            axis="both",
+            which="both",
+            bottom=False,
+            left=False,
+            labelbottom=False,
+            labelleft=False,
+        )
+
+    return text_items
+
+
+def draw_networkx_edge_labels(
+    G,
+    pos,
+    edge_labels=None,
+    label_pos=0.5,
+    font_size=10,
+    font_color="k",
+    font_family="sans-serif",
+    font_weight="normal",
+    alpha=None,
+    bbox=None,
+    horizontalalignment="center",
+    verticalalignment="center",
+    ax=None,
+    rotate=True,
+    clip_on=True,
+    node_size=300,
+    nodelist=None,
+    connectionstyle="arc3",
+    hide_ticks=True,
+):
     """Draw edge labels.

     Parameters
@@ -797,7 +1290,216 @@ def draw_networkx_edge_labels(G, pos, edge_labels=None, label_pos=0.5,
     draw_networkx_edges
     draw_networkx_labels
     """
-    pass
+    import matplotlib as mpl
+    import matplotlib.pyplot as plt
+    import numpy as np
+
+    class CurvedArrowText(mpl.text.Text):
+        def __init__(
+            self,
+            arrow,
+            *args,
+            label_pos=0.5,
+            labels_horizontal=False,
+            ax=None,
+            **kwargs,
+        ):
+            # Bind to FancyArrowPatch
+            self.arrow = arrow
+            # how far along the text should be on the curve,
+            # 0 is at start, 1 is at end etc.
+            self.label_pos = label_pos
+            self.labels_horizontal = labels_horizontal
+            if ax is None:
+                ax = plt.gca()
+            self.ax = ax
+            self.x, self.y, self.angle = self._update_text_pos_angle(arrow)
+
+            # Create text object
+            super().__init__(self.x, self.y, *args, rotation=self.angle, **kwargs)
+            # Bind to axis
+            self.ax.add_artist(self)
+
+        def _get_arrow_path_disp(self, arrow):
+            """
+            This is part of FancyArrowPatch._get_path_in_displaycoord
+            It omits the second part of the method where path is converted
+                to polygon based on width
+            The transform is taken from ax, not the object, as the object
+                has not been added yet, and doesn't have transform
+            """
+            dpi_cor = arrow._dpi_cor
+            # trans_data = arrow.get_transform()
+            trans_data = self.ax.transData
+            if arrow._posA_posB is not None:
+                posA = arrow._convert_xy_units(arrow._posA_posB[0])
+                posB = arrow._convert_xy_units(arrow._posA_posB[1])
+                (posA, posB) = trans_data.transform((posA, posB))
+                _path = arrow.get_connectionstyle()(
+                    posA,
+                    posB,
+                    patchA=arrow.patchA,
+                    patchB=arrow.patchB,
+                    shrinkA=arrow.shrinkA * dpi_cor,
+                    shrinkB=arrow.shrinkB * dpi_cor,
+                )
+            else:
+                _path = trans_data.transform_path(arrow._path_original)
+            # Return is in display coordinates
+            return _path
+
+        def _update_text_pos_angle(self, arrow):
+            # Fractional label position
+            path_disp = self._get_arrow_path_disp(arrow)
+            (x1, y1), (cx, cy), (x2, y2) = path_disp.vertices
+            # Text position at a proportion t along the line in display coords
+            # default is 0.5 so text appears at the halfway point
+            t = self.label_pos
+            tt = 1 - t
+            x = tt**2 * x1 + 2 * t * tt * cx + t**2 * x2
+            y = tt**2 * y1 + 2 * t * tt * cy + t**2 * y2
+            if self.labels_horizontal:
+                # Horizontal text labels
+                angle = 0
+            else:
+                # Labels parallel to curve
+                change_x = 2 * tt * (cx - x1) + 2 * t * (x2 - cx)
+                change_y = 2 * tt * (cy - y1) + 2 * t * (y2 - cy)
+                angle = (np.arctan2(change_y, change_x) / (2 * np.pi)) * 360
+                # Text is "right way up"
+                if angle > 90:
+                    angle -= 180
+                if angle < -90:
+                    angle += 180
+            (x, y) = self.ax.transData.inverted().transform((x, y))
+            return x, y, angle
+
+        def draw(self, renderer):
+            # recalculate the text position and angle
+            self.x, self.y, self.angle = self._update_text_pos_angle(self.arrow)
+            self.set_position((self.x, self.y))
+            self.set_rotation(self.angle)
+            # redraw text
+            super().draw(renderer)
+
+    # use default box of white with white border
+    if bbox is None:
+        bbox = {"boxstyle": "round", "ec": (1.0, 1.0, 1.0), "fc": (1.0, 1.0, 1.0)}
+
+    if isinstance(connectionstyle, str):
+        connectionstyle = [connectionstyle]
+    elif np.iterable(connectionstyle):
+        connectionstyle = list(connectionstyle)
+    else:
+        raise nx.NetworkXError(
+            "draw_networkx_edges arg `connectionstyle` must be"
+            "string or iterable of strings"
+        )
+
+    if ax is None:
+        ax = plt.gca()
+
+    if edge_labels is None:
+        kwds = {"keys": True} if G.is_multigraph() else {}
+        edge_labels = {tuple(edge): d for *edge, d in G.edges(data=True, **kwds)}
+    # NOTHING TO PLOT
+    if not edge_labels:
+        return {}
+    edgelist, labels = zip(*edge_labels.items())
+
+    if nodelist is None:
+        nodelist = list(G.nodes())
+
+    # set edge positions
+    edge_pos = np.asarray([(pos[e[0]], pos[e[1]]) for e in edgelist])
+
+    if G.is_multigraph():
+        key_count = collections.defaultdict(lambda: itertools.count(0))
+        edge_indices = [next(key_count[tuple(e[:2])]) for e in edgelist]
+    else:
+        edge_indices = [0] * len(edgelist)
+
+    # Used to determine self loop mid-point
+    # Note, that this will not be accurate,
+    #   if not drawing edge_labels for all edges drawn
+    h = 0
+    if edge_labels:
+        miny = np.amin(np.ravel(edge_pos[:, :, 1]))
+        maxy = np.amax(np.ravel(edge_pos[:, :, 1]))
+        h = maxy - miny
+    selfloop_height = h if h != 0 else 0.005 * np.array(node_size).max()
+    fancy_arrow_factory = FancyArrowFactory(
+        edge_pos,
+        edgelist,
+        nodelist,
+        edge_indices,
+        node_size,
+        selfloop_height,
+        connectionstyle,
+        ax=ax,
+    )
+
+    text_items = {}
+    for i, (edge, label) in enumerate(zip(edgelist, labels)):
+        if not isinstance(label, str):
+            label = str(label)  # this makes "1" and 1 labeled the same
+
+        n1, n2 = edge[:2]
+        arrow = fancy_arrow_factory(i)
+        if n1 == n2:
+            connectionstyle_obj = arrow.get_connectionstyle()
+            posA = ax.transData.transform(pos[n1])
+            path_disp = connectionstyle_obj(posA, posA)
+            path_data = ax.transData.inverted().transform_path(path_disp)
+            x, y = path_data.vertices[0]
+            text_items[edge] = ax.text(
+                x,
+                y,
+                label,
+                size=font_size,
+                color=font_color,
+                family=font_family,
+                weight=font_weight,
+                alpha=alpha,
+                horizontalalignment=horizontalalignment,
+                verticalalignment=verticalalignment,
+                rotation=0,
+                transform=ax.transData,
+                bbox=bbox,
+                zorder=1,
+                clip_on=clip_on,
+            )
+        else:
+            text_items[edge] = CurvedArrowText(
+                arrow,
+                label,
+                size=font_size,
+                color=font_color,
+                family=font_family,
+                weight=font_weight,
+                alpha=alpha,
+                horizontalalignment=horizontalalignment,
+                verticalalignment=verticalalignment,
+                transform=ax.transData,
+                bbox=bbox,
+                zorder=1,
+                clip_on=clip_on,
+                label_pos=label_pos,
+                labels_horizontal=not rotate,
+                ax=ax,
+            )
+
+    if hide_ticks:
+        ax.tick_params(
+            axis="both",
+            which="both",
+            bottom=False,
+            left=False,
+            labelbottom=False,
+            labelleft=False,
+        )
+
+    return text_items


 def draw_circular(G, **kwargs):
@@ -836,7 +1538,7 @@ def draw_circular(G, **kwargs):
     --------
     :func:`~networkx.drawing.layout.circular_layout`
     """
-    pass
+    draw(G, circular_layout(G), **kwargs)


 def draw_kamada_kawai(G, **kwargs):
@@ -876,7 +1578,7 @@ def draw_kamada_kawai(G, **kwargs):
     --------
     :func:`~networkx.drawing.layout.kamada_kawai_layout`
     """
-    pass
+    draw(G, kamada_kawai_layout(G), **kwargs)


 def draw_random(G, **kwargs):
@@ -915,7 +1617,7 @@ def draw_random(G, **kwargs):
     --------
     :func:`~networkx.drawing.layout.random_layout`
     """
-    pass
+    draw(G, random_layout(G), **kwargs)


 def draw_spectral(G, **kwargs):
@@ -957,7 +1659,7 @@ def draw_spectral(G, **kwargs):
     --------
     :func:`~networkx.drawing.layout.spectral_layout`
     """
-    pass
+    draw(G, spectral_layout(G), **kwargs)


 def draw_spring(G, **kwargs):
@@ -1000,7 +1702,7 @@ def draw_spring(G, **kwargs):
     draw
     :func:`~networkx.drawing.layout.spring_layout`
     """
-    pass
+    draw(G, spring_layout(G), **kwargs)


 def draw_shell(G, nlist=None, **kwargs):
@@ -1045,7 +1747,7 @@ def draw_shell(G, nlist=None, **kwargs):
     --------
     :func:`~networkx.drawing.layout.shell_layout`
     """
-    pass
+    draw(G, shell_layout(G, nlist=nlist), **kwargs)


 def draw_planar(G, **kwargs):
@@ -1089,7 +1791,7 @@ def draw_planar(G, **kwargs):
     --------
     :func:`~networkx.drawing.layout.planar_layout`
     """
-    pass
+    draw(G, planar_layout(G), **kwargs)


 def apply_alpha(colors, alpha, elem_list, cmap=None, vmin=None, vmax=None):
@@ -1129,4 +1831,41 @@ def apply_alpha(colors, alpha, elem_list, cmap=None, vmin=None, vmax=None):
         Array containing RGBA format values for each of the node colours.

     """
-    pass
+    from itertools import cycle, islice
+
+    import matplotlib as mpl
+    import matplotlib.cm  # call as mpl.cm
+    import matplotlib.colors  # call as mpl.colors
+    import numpy as np
+
+    # If we have been provided with a list of numbers as long as elem_list,
+    # apply the color mapping.
+    if len(colors) == len(elem_list) and isinstance(colors[0], Number):
+        mapper = mpl.cm.ScalarMappable(cmap=cmap)
+        mapper.set_clim(vmin, vmax)
+        rgba_colors = mapper.to_rgba(colors)
+    # Otherwise, convert colors to matplotlib's RGB using the colorConverter
+    # object.  These are converted to numpy ndarrays to be consistent with the
+    # to_rgba method of ScalarMappable.
+    else:
+        try:
+            rgba_colors = np.array([mpl.colors.colorConverter.to_rgba(colors)])
+        except ValueError:
+            rgba_colors = np.array(
+                [mpl.colors.colorConverter.to_rgba(color) for color in colors]
+            )
+    # Set the final column of the rgba_colors to have the relevant alpha values
+    try:
+        # If alpha is longer than the number of colors, resize to the number of
+        # elements.  Also, if rgba_colors.size (the number of elements of
+        # rgba_colors) is the same as the number of elements, resize the array,
+        # to avoid it being interpreted as a colormap by scatter()
+        if len(alpha) > len(rgba_colors) or rgba_colors.size == len(elem_list):
+            rgba_colors = np.resize(rgba_colors, (len(elem_list), 4))
+            rgba_colors[1:, 0] = rgba_colors[0, 0]
+            rgba_colors[1:, 1] = rgba_colors[0, 1]
+            rgba_colors[1:, 2] = rgba_colors[0, 2]
+        rgba_colors[:, 3] = list(islice(cycle(alpha), len(rgba_colors)))
+    except TypeError:
+        rgba_colors[:, -1] = alpha
+    return rgba_colors
diff --git a/networkx/exception.py b/networkx/exception.py
index adf73034e..96694cc32 100644
--- a/networkx/exception.py
+++ b/networkx/exception.py
@@ -5,11 +5,23 @@ Exceptions

 Base exceptions and errors for NetworkX.
 """
-__all__ = ['HasACycle', 'NodeNotFound', 'PowerIterationFailedConvergence',
-    'ExceededMaxIterations', 'AmbiguousSolution', 'NetworkXAlgorithmError',
-    'NetworkXException', 'NetworkXError', 'NetworkXNoCycle',
-    'NetworkXNoPath', 'NetworkXNotImplemented', 'NetworkXPointlessConcept',
-    'NetworkXUnbounded', 'NetworkXUnfeasible']
+
+__all__ = [
+    "HasACycle",
+    "NodeNotFound",
+    "PowerIterationFailedConvergence",
+    "ExceededMaxIterations",
+    "AmbiguousSolution",
+    "NetworkXAlgorithmError",
+    "NetworkXException",
+    "NetworkXError",
+    "NetworkXNoCycle",
+    "NetworkXNoPath",
+    "NetworkXNotImplemented",
+    "NetworkXPointlessConcept",
+    "NetworkXUnbounded",
+    "NetworkXUnfeasible",
+]


 class NetworkXException(Exception):
@@ -107,9 +119,7 @@ class PowerIterationFailedConvergence(ExceededMaxIterations):
     """

     def __init__(self, num_iterations, *args, **kw):
-        msg = (
-            f'power iteration failed to converge within {num_iterations} iterations'
-            )
+        msg = f"power iteration failed to converge within {num_iterations} iterations"
         exception_message = msg
         superinit = super().__init__
         superinit(self, exception_message, *args, **kw)
diff --git a/networkx/generators/atlas.py b/networkx/generators/atlas.py
index 708531538..262443628 100644
--- a/networkx/generators/atlas.py
+++ b/networkx/generators/atlas.py
@@ -6,10 +6,52 @@ import importlib.resources
 import os
 import os.path
 from itertools import islice
+
 import networkx as nx
-__all__ = ['graph_atlas', 'graph_atlas_g']
+
+__all__ = ["graph_atlas", "graph_atlas_g"]
+
+#: The total number of graphs in the atlas.
+#:
+#: The graphs are labeled starting from 0 and extending to (but not
+#: including) this number.
 NUM_GRAPHS = 1253
-ATLAS_FILE = importlib.resources.files('networkx.generators') / 'atlas.dat.gz'
+
+#: The path to the data file containing the graph edge lists.
+#:
+#: This is the absolute path of the gzipped text file containing the
+#: edge list for each graph in the atlas. The file contains one entry
+#: per graph in the atlas, in sequential order, starting from graph
+#: number 0 and extending through graph number 1252 (see
+#: :data:`NUM_GRAPHS`). Each entry looks like
+#:
+#: .. sourcecode:: text
+#:
+#:    GRAPH 6
+#:    NODES 3
+#:    0 1
+#:    0 2
+#:
+#: where the first two lines are the graph's index in the atlas and the
+#: number of nodes in the graph, and the remaining lines are the edge
+#: list.
+#:
+#: This file was generated from a Python list of graphs via code like
+#: the following::
+#:
+#:     import gzip
+#:     from networkx.generators.atlas import graph_atlas_g
+#:     from networkx.readwrite.edgelist import write_edgelist
+#:
+#:     with gzip.open('atlas.dat.gz', 'wb') as f:
+#:         for i, G in enumerate(graph_atlas_g()):
+#:             f.write(bytes(f'GRAPH {i}\n', encoding='utf-8'))
+#:             f.write(bytes(f'NODES {len(G)}\n', encoding='utf-8'))
+#:             write_edgelist(G, f, data=False)
+#:
+
+# Path to the atlas file
+ATLAS_FILE = importlib.resources.files("networkx.generators") / "atlas.dat.gz"


 def _generate_graphs():
@@ -19,7 +61,31 @@ def _generate_graphs():
     This function reads the file given in :data:`.ATLAS_FILE`.

     """
-    pass
+    with gzip.open(ATLAS_FILE, "rb") as f:
+        line = f.readline()
+        while line and line.startswith(b"GRAPH"):
+            # The first two lines of each entry tell us the index of the
+            # graph in the list and the number of nodes in the graph.
+            # They look like this:
+            #
+            #     GRAPH 3
+            #     NODES 2
+            #
+            graph_index = int(line[6:].rstrip())
+            line = f.readline()
+            num_nodes = int(line[6:].rstrip())
+            # The remaining lines contain the edge list, until the next
+            # GRAPH line (or until the end of the file).
+            edgelist = []
+            line = f.readline()
+            while line and not line.startswith(b"GRAPH"):
+                edgelist.append(line.rstrip())
+                line = f.readline()
+            G = nx.Graph()
+            G.name = f"G{graph_index}"
+            G.add_nodes_from(range(num_nodes))
+            G.add_edges_from(tuple(map(int, e.split())) for e in edgelist)
+            yield G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -56,7 +122,9 @@ def graph_atlas(i):
            Oxford University Press, 1998.

     """
-    pass
+    if not (0 <= i < NUM_GRAPHS):
+        raise ValueError(f"index must be between 0 and {NUM_GRAPHS}")
+    return next(islice(_generate_graphs(), i, None))


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -108,4 +176,4 @@ def graph_atlas_g():
                Oxford University Press, 1998.

     """
-    pass
+    return list(_generate_graphs())
diff --git a/networkx/generators/classic.py b/networkx/generators/classic.py
index 7ebfc0fb5..4278a338d 100644
--- a/networkx/generators/classic.py
+++ b/networkx/generators/classic.py
@@ -9,19 +9,61 @@ as a simple graph. Except for `empty_graph`, all the functions
 in this module return a Graph class (i.e. a simple, undirected graph).

 """
+
 import itertools
 import numbers
+
 import networkx as nx
 from networkx.classes import Graph
 from networkx.exception import NetworkXError
 from networkx.utils import nodes_or_number, pairwise
-__all__ = ['balanced_tree', 'barbell_graph', 'binomial_tree',
-    'complete_graph', 'complete_multipartite_graph',
-    'circular_ladder_graph', 'circulant_graph', 'cycle_graph',
-    'dorogovtsev_goltsev_mendes_graph', 'empty_graph', 'full_rary_tree',
-    'kneser_graph', 'ladder_graph', 'lollipop_graph', 'null_graph',
-    'path_graph', 'star_graph', 'tadpole_graph', 'trivial_graph',
-    'turan_graph', 'wheel_graph']
+
+__all__ = [
+    "balanced_tree",
+    "barbell_graph",
+    "binomial_tree",
+    "complete_graph",
+    "complete_multipartite_graph",
+    "circular_ladder_graph",
+    "circulant_graph",
+    "cycle_graph",
+    "dorogovtsev_goltsev_mendes_graph",
+    "empty_graph",
+    "full_rary_tree",
+    "kneser_graph",
+    "ladder_graph",
+    "lollipop_graph",
+    "null_graph",
+    "path_graph",
+    "star_graph",
+    "tadpole_graph",
+    "trivial_graph",
+    "turan_graph",
+    "wheel_graph",
+]
+
+
+# -------------------------------------------------------------------
+#   Some Classic Graphs
+# -------------------------------------------------------------------
+
+
+def _tree_edges(n, r):
+    if n == 0:
+        return
+    # helper function for trees
+    # yields edges in rooted tree at 0 with n nodes and branching ratio r
+    nodes = iter(range(n))
+    parents = [next(nodes)]  # stack of max length r
+    while parents:
+        source = parents.pop(0)
+        for i in range(r):
+            try:
+                target = next(nodes)
+                parents.append(target)
+                yield source, target
+            except StopIteration:
+                break


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -57,7 +99,9 @@ def full_rary_tree(r, n, create_using=None):
     .. [1] An introduction to data structures and algorithms,
            James Andrew Storer,  Birkhauser Boston 2001, (page 225).
     """
-    pass
+    G = empty_graph(n, create_using)
+    G.add_edges_from(_tree_edges(n, r))
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -89,7 +133,22 @@ def kneser_graph(n, k):
     >>> nx.is_isomorphic(G, nx.petersen_graph())
     True
     """
-    pass
+    if n <= 0:
+        raise NetworkXError("n should be greater than zero")
+    if k <= 0 or k > n:
+        raise NetworkXError("k should be greater than zero and smaller than n")
+
+    G = nx.Graph()
+    # Create all k-subsets of [0, 1, ..., n-1]
+    subsets = list(itertools.combinations(range(n), k))
+
+    if 2 * k > n:
+        G.add_nodes_from(subsets)
+
+    universe = set(range(n))
+    comb = itertools.combinations  # only to make it all fit on one line
+    G.add_edges_from((s, t) for s in subsets for t in comb(universe - set(s), k))
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -128,7 +187,18 @@ def balanced_tree(r, h, create_using=None):
     A balanced tree is also known as a *complete r-ary tree*.

     """
-    pass
+    # The number of nodes in the balanced tree is `1 + r + ... + r^h`,
+    # which is computed by using the closed-form formula for a geometric
+    # sum with ratio `r`. In the special case that `r` is 1, the number
+    # of nodes is simply `h + 1` (since the tree is actually a path
+    # graph).
+    if r == 1:
+        n = h + 1
+    else:
+        # This must be an integer if both `r` and `h` are integers. If
+        # they are not, we force integer division anyway.
+        n = (1 - r ** (h + 1)) // (1 - r)
+    return full_rary_tree(r, n, create_using=create_using)


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -176,7 +246,32 @@ def barbell_graph(m1, m2, create_using=None):
     and Jim Fill's e-text on Random Walks on Graphs.

     """
-    pass
+    if m1 < 2:
+        raise NetworkXError("Invalid graph description, m1 should be >=2")
+    if m2 < 0:
+        raise NetworkXError("Invalid graph description, m2 should be >=0")
+
+    # left barbell
+    G = complete_graph(m1, create_using)
+    if G.is_directed():
+        raise NetworkXError("Directed Graph not supported")
+
+    # connecting path
+    G.add_nodes_from(range(m1, m1 + m2 - 1))
+    if m2 > 1:
+        G.add_edges_from(pairwise(range(m1, m1 + m2)))
+
+    # right barbell
+    G.add_edges_from(
+        (u, v) for u in range(m1 + m2, 2 * m1 + m2) for v in range(u + 1, 2 * m1 + m2)
+    )
+
+    # connect it up
+    G.add_edge(m1 - 1, m1)
+    if m2 > 0:
+        G.add_edge(m1 + m2 - 1, m1 + m2)
+
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -205,7 +300,16 @@ def binomial_tree(n, create_using=None):
         A binomial tree of $2^n$ nodes and $2^n - 1$ edges.

     """
-    pass
+    G = nx.empty_graph(1, create_using)
+
+    N = 1
+    for i in range(n):
+        # Use G.edges() to ensure 2-tuples. G.edges is 3-tuple for MultiGraph
+        edges = [(u + N, v + N) for (u, v) in G.edges()]
+        G.add_edges_from(edges)
+        G.add_edge(0, N)
+        N *= 2
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -245,7 +349,15 @@ def complete_graph(n, create_using=None):
     True

     """
-    pass
+    _, nodes = n
+    G = empty_graph(nodes, create_using)
+    if len(nodes) > 1:
+        if G.is_directed():
+            edges = itertools.permutations(nodes, 2)
+        else:
+            edges = itertools.combinations(nodes, 2)
+        G.add_edges_from(edges)
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -262,15 +374,18 @@ def circular_ladder_graph(n, create_using=None):
         >>> nx.draw(nx.circular_ladder_graph(5))

     """
-    pass
+    G = ladder_graph(n, create_using)
+    G.add_edge(0, n - 1)
+    G.add_edge(n, 2 * n - 1)
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
 def circulant_graph(n, offsets, create_using=None):
-    """Returns the circulant graph $Ci_n(x_1, x_2, ..., x_m)$ with $n$ nodes.
+    r"""Returns the circulant graph $Ci_n(x_1, x_2, ..., x_m)$ with $n$ nodes.

     The circulant graph $Ci_n(x_1, ..., x_m)$ consists of $n$ nodes $0, ..., n-1$
-    such that node $i$ is connected to nodes $(i + x) \\mod n$ and $(i - x) \\mod n$
+    such that node $i$ is connected to nodes $(i + x) \mod n$ and $(i - x) \mod n$
     for all $x$ in $x_1, ..., x_m$. Thus $Ci_n(1)$ is a cycle graph.

     .. plot::
@@ -332,7 +447,12 @@ def circulant_graph(n, offsets, create_using=None):
     True

     """
-    pass
+    G = empty_graph(n, create_using)
+    for i in range(n):
+        for j in offsets:
+            G.add_edge(i, (i - j) % n)
+            G.add_edge(i, (i + j) % n)
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -361,7 +481,10 @@ def cycle_graph(n, create_using=None):
     If create_using is directed, the direction is in increasing order.

     """
-    pass
+    _, nodes = n
+    G = empty_graph(nodes, create_using)
+    G.add_edges_from(pairwise(nodes, cyclic=True))
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -406,7 +529,24 @@ def dorogovtsev_goltsev_mendes_graph(n, create_using=None):
         "Pseudofractal scale-free web", Physical Review E 65, 066122, 2002.
         https://arxiv.org/pdf/cond-mat/0112143.pdf
     """
-    pass
+    G = empty_graph(0, create_using)
+    if G.is_directed():
+        raise NetworkXError("Directed Graph not supported")
+    if G.is_multigraph():
+        raise NetworkXError("Multigraph not supported")
+
+    G.add_edge(0, 1)
+    if n == 0:
+        return G
+    new_node = 2  # next node to be added
+    for i in range(1, n + 1):  # iterate over number of generations.
+        last_generation_edges = list(G.edges())
+        number_of_edges_in_last_generation = len(last_generation_edges)
+        for j in range(number_of_edges_in_last_generation):
+            G.add_edge(new_node, last_generation_edges[j][0])
+            G.add_edge(new_node, last_generation_edges[j][1])
+            new_node += 1
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -492,7 +632,20 @@ def empty_graph(n=0, create_using=None, default=Graph):
     See also create_empty_copy(G).

     """
-    pass
+    if create_using is None:
+        G = default()
+    elif isinstance(create_using, type):
+        G = create_using()
+    elif not hasattr(create_using, "adj"):
+        raise TypeError("create_using is not a valid NetworkX graph type or instance")
+    else:
+        # create_using is a NetworkX style Graph
+        create_using.clear()
+        G = create_using
+
+    _, nodes = n
+    G.add_nodes_from(nodes)
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -509,7 +662,13 @@ def ladder_graph(n, create_using=None):
         >>> nx.draw(nx.ladder_graph(5))

     """
-    pass
+    G = empty_graph(2 * n, create_using)
+    if G.is_directed():
+        raise NetworkXError("Directed Graph not supported")
+    G.add_edges_from(pairwise(range(n)))
+    G.add_edges_from(pairwise(range(n, 2 * n)))
+    G.add_edges_from((v, v + n) for v in range(n))
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -550,7 +709,33 @@ def lollipop_graph(m, n, create_using=None):
     Fill's etext on Random Walks on Graphs.)

     """
-    pass
+    m, m_nodes = m
+    M = len(m_nodes)
+    if M < 2:
+        raise NetworkXError("Invalid description: m should indicate at least 2 nodes")
+
+    n, n_nodes = n
+    if isinstance(m, numbers.Integral) and isinstance(n, numbers.Integral):
+        n_nodes = list(range(M, M + n))
+    N = len(n_nodes)
+
+    # the ball
+    G = complete_graph(m_nodes, create_using)
+    if G.is_directed():
+        raise NetworkXError("Directed Graph not supported")
+
+    # the stick
+    G.add_nodes_from(n_nodes)
+    if N > 1:
+        G.add_edges_from(pairwise(n_nodes))
+
+    if len(G) != M + N:
+        raise NetworkXError("Nodes must be distinct in containers m and n")
+
+    # connect ball to stick
+    if M > 0 and N > 0:
+        G.add_edge(m_nodes[-1], n_nodes[0])
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -560,7 +745,8 @@ def null_graph(create_using=None):
     See empty_graph for the use of create_using.

     """
-    pass
+    G = empty_graph(0, create_using)
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -583,7 +769,10 @@ def path_graph(n, create_using=None):
        Graph type to create. If graph instance, then cleared before populated.

     """
-    pass
+    _, nodes = n
+    G = empty_graph(nodes, create_using)
+    G.add_edges_from(pairwise(nodes))
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -612,7 +801,17 @@ def star_graph(n, create_using=None):
     The graph has n+1 nodes for integer n.
     So star_graph(3) is the same as star_graph(range(4)).
     """
-    pass
+    n, nodes = n
+    if isinstance(n, numbers.Integral):
+        nodes.append(int(n))  # there should be n+1 nodes
+    G = empty_graph(nodes, create_using)
+    if G.is_directed():
+        raise NetworkXError("Directed Graph not supported")
+
+    if len(nodes) > 1:
+        hub, *spokes = nodes
+        G.add_edges_from((hub, node) for node in spokes)
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -657,7 +856,24 @@ def tadpole_graph(m, n, create_using=None):
     `m` and/or `n` can be a container of nodes instead of an integer.

     """
-    pass
+    m, m_nodes = m
+    M = len(m_nodes)
+    if M < 2:
+        raise NetworkXError("Invalid description: m should indicate at least 2 nodes")
+
+    n, n_nodes = n
+    if isinstance(m, numbers.Integral) and isinstance(n, numbers.Integral):
+        n_nodes = list(range(M, M + n))
+
+    # the circle
+    G = cycle_graph(m_nodes, create_using)
+    if G.is_directed():
+        raise NetworkXError("Directed Graph not supported")
+
+    # the stick
+    nx.add_path(G, [m_nodes[-1]] + list(n_nodes))
+
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -669,20 +885,21 @@ def trivial_graph(create_using=None):
         >>> nx.draw(nx.trivial_graph(), with_labels=True)

     """
-    pass
+    G = empty_graph(1, create_using)
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
 def turan_graph(n, r):
-    """Return the Turan Graph
+    r"""Return the Turan Graph

     The Turan Graph is a complete multipartite graph on $n$ nodes
     with $r$ disjoint subsets. That is, edges connect each node to
     every node not in its subset.

     Given $n$ and $r$, we create a complete multipartite graph with
-    $r-(n \\mod r)$ partitions of size $n/r$, rounded down, and
-    $n \\mod r$ partitions of size $n/r+1$, rounded down.
+    $r-(n \mod r)$ partitions of size $n/r$, rounded down, and
+    $n \mod r$ partitions of size $n/r+1$, rounded down.

     .. plot::

@@ -701,7 +918,13 @@ def turan_graph(n, r):
     Must satisfy $1 <= r <= n$.
     The graph has $(r-1)(n^2)/(2r)$ edges, rounded down.
     """
-    pass
+
+    if not 1 <= r <= n:
+        raise NetworkXError("Must satisfy 1 <= r <= n")
+
+    partitions = [n // r] * (r - (n % r)) + [n // r + 1] * (n % r)
+    G = complete_multipartite_graph(*partitions)
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -727,7 +950,17 @@ def wheel_graph(n, create_using=None):

     Node labels are the integers 0 to n - 1.
     """
-    pass
+    _, nodes = n
+    G = empty_graph(nodes, create_using)
+    if G.is_directed():
+        raise NetworkXError("Directed Graph not supported")
+
+    if len(nodes) > 1:
+        hub, *rim = nodes
+        G.add_edges_from((hub, node) for node in rim)
+        if len(rim) > 1:
+            G.add_edges_from(pairwise(rim, cyclic=True))
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -790,4 +1023,32 @@ def complete_multipartite_graph(*subset_sizes):
     --------
     complete_bipartite_graph
     """
-    pass
+    # The complete multipartite graph is an undirected simple graph.
+    G = Graph()
+
+    if len(subset_sizes) == 0:
+        return G
+
+    # set up subsets of nodes
+    try:
+        extents = pairwise(itertools.accumulate((0,) + subset_sizes))
+        subsets = [range(start, end) for start, end in extents]
+    except TypeError:
+        subsets = subset_sizes
+    else:
+        if any(size < 0 for size in subset_sizes):
+            raise NetworkXError(f"Negative number of nodes not valid: {subset_sizes}")
+
+    # add nodes with subset attribute
+    # while checking that ints are not mixed with iterables
+    try:
+        for i, subset in enumerate(subsets):
+            G.add_nodes_from(subset, subset=i)
+    except TypeError as err:
+        raise NetworkXError("Arguments must be all ints or all iterables") from err
+
+    # Across subsets, all nodes should be adjacent.
+    # We can use itertools.combinations() because undirected.
+    for subset1, subset2 in itertools.combinations(subsets, 2):
+        G.add_edges_from(itertools.product(subset1, subset2))
+    return G
diff --git a/networkx/generators/cographs.py b/networkx/generators/cographs.py
index 89ce0f377..388a42ac5 100644
--- a/networkx/generators/cographs.py
+++ b/networkx/generators/cographs.py
@@ -1,4 +1,4 @@
-"""Generators for cographs
+r"""Generators for cographs

 A cograph is a graph containing no path on four vertices.
 Cographs or $P_4$-free graphs can be obtained from a single vertex
@@ -13,13 +13,14 @@ References
 """
 import networkx as nx
 from networkx.utils import py_random_state
-__all__ = ['random_cograph']
+
+__all__ = ["random_cograph"]


 @py_random_state(1)
 @nx._dispatchable(graphs=None, returns_graph=True)
 def random_cograph(n, seed=None):
-    """Returns a random cograph with $2 ^ n$ nodes.
+    r"""Returns a random cograph with $2 ^ n$ nodes.

     A cograph is a graph containing no path on four vertices.
     Cographs or $P_4$-free graphs can be obtained from a single vertex
@@ -53,4 +54,14 @@ def random_cograph(n, seed=None):
        Discrete Applied Mathematics, Volume 3, Issue 3, 1981, Pages 163-174,
        ISSN 0166-218X.
     """
-    pass
+    R = nx.empty_graph(1)
+
+    for i in range(n):
+        RR = nx.relabel_nodes(R.copy(), lambda x: x + len(R))
+
+        if seed.randint(0, 1) == 0:
+            R = nx.full_join(R, RR)
+        else:
+            R = nx.disjoint_union(R, RR)
+
+    return R
diff --git a/networkx/generators/community.py b/networkx/generators/community.py
index f40ed2569..5076d7288 100644
--- a/networkx/generators/community.py
+++ b/networkx/generators/community.py
@@ -1,13 +1,22 @@
 """Generators for classes of graphs used in studying social networks."""
 import itertools
 import math
+
 import networkx as nx
 from networkx.utils import py_random_state
-__all__ = ['caveman_graph', 'connected_caveman_graph',
-    'relaxed_caveman_graph', 'random_partition_graph',
-    'planted_partition_graph', 'gaussian_random_partition_graph',
-    'ring_of_cliques', 'windmill_graph', 'stochastic_block_model',
-    'LFR_benchmark_graph']
+
+__all__ = [
+    "caveman_graph",
+    "connected_caveman_graph",
+    "relaxed_caveman_graph",
+    "random_partition_graph",
+    "planted_partition_graph",
+    "gaussian_random_partition_graph",
+    "ring_of_cliques",
+    "windmill_graph",
+    "stochastic_block_model",
+    "LFR_benchmark_graph",
+]


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -48,7 +57,13 @@ def caveman_graph(l, k):
     .. [1] Watts, D. J. 'Networks, Dynamics, and the Small-World Phenomenon.'
        Amer. J. Soc. 105, 493-527, 1999.
     """
-    pass
+    # l disjoint cliques of size k
+    G = nx.empty_graph(l * k)
+    if k > 1:
+        for start in range(0, l * k, k):
+            edges = itertools.combinations(range(start, start + k), 2)
+            G.add_edges_from(edges)
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -93,7 +108,16 @@ def connected_caveman_graph(l, k):
     .. [1] Watts, D. J. 'Networks, Dynamics, and the Small-World Phenomenon.'
        Amer. J. Soc. 105, 493-527, 1999.
     """
-    pass
+    if k < 2:
+        raise nx.NetworkXError(
+            "The size of cliques in a connected caveman graph must be at least 2."
+        )
+
+    G = nx.caveman_graph(l, k)
+    for start in range(0, l * k, k):
+        G.remove_edge(start, start + 1)
+        G.add_edge(start, (start - 1) % (l * k))
+    return G


 @py_random_state(3)
@@ -136,7 +160,16 @@ def relaxed_caveman_graph(l, k, p, seed=None):
        Physics Reports Volume 486, Issues 3-5, February 2010, Pages 75-174.
        https://arxiv.org/abs/0906.0612
     """
-    pass
+    G = nx.caveman_graph(l, k)
+    nodes = list(G)
+    for u, v in G.edges():
+        if seed.random() < p:  # rewire the edge
+            x = seed.choice(nodes)
+            if G.has_edge(u, x):
+                continue
+            G.remove_edge(u, v)
+            G.add_edge(u, x)
+    return G


 @py_random_state(3)
@@ -194,7 +227,28 @@ def random_partition_graph(sizes, p_in, p_out, seed=None, directed=False):
     .. [1] Santo Fortunato 'Community Detection in Graphs' Physical Reports
        Volume 486, Issue 3-5 p. 75-174. https://arxiv.org/abs/0906.0612
     """
-    pass
+    # Use geometric method for O(n+m) complexity algorithm
+    # partition = nx.community_sets(nx.get_node_attributes(G, 'affiliation'))
+    if not 0.0 <= p_in <= 1.0:
+        raise nx.NetworkXError("p_in must be in [0,1]")
+    if not 0.0 <= p_out <= 1.0:
+        raise nx.NetworkXError("p_out must be in [0,1]")
+
+    # create connection matrix
+    num_blocks = len(sizes)
+    p = [[p_out for s in range(num_blocks)] for r in range(num_blocks)]
+    for r in range(num_blocks):
+        p[r][r] = p_in
+
+    return stochastic_block_model(
+        sizes,
+        p,
+        nodelist=None,
+        seed=seed,
+        directed=directed,
+        selfloops=False,
+        sparse=True,
+    )


 @py_random_state(4)
@@ -250,13 +304,12 @@ def planted_partition_graph(l, k, p_in, p_out, seed=None, directed=False):
     .. [2] Santo Fortunato 'Community Detection in Graphs' Physical Reports
        Volume 486, Issue 3-5 p. 75-174. https://arxiv.org/abs/0906.0612
     """
-    pass
+    return random_partition_graph([k] * l, p_in, p_out, seed=seed, directed=directed)


 @py_random_state(6)
 @nx._dispatchable(graphs=None, returns_graph=True)
-def gaussian_random_partition_graph(n, s, v, p_in, p_out, directed=False,
-    seed=None):
+def gaussian_random_partition_graph(n, s, v, p_in, p_out, directed=False, seed=None):
     """Generate a Gaussian random partition graph.

     A Gaussian random partition graph is created by creating k partitions
@@ -315,7 +368,20 @@ def gaussian_random_partition_graph(n, s, v, p_in, p_out, directed=False,
        Experiments on Graph Clustering Algorithms,
        In the proceedings of the 11th Europ. Symp. Algorithms, 2003.
     """
-    pass
+    if s > n:
+        raise nx.NetworkXError("s must be <= n")
+    assigned = 0
+    sizes = []
+    while True:
+        size = int(seed.gauss(s, s / v + 0.5))
+        if size < 1:  # how to handle 0 or negative sizes?
+            continue
+        if assigned + size >= n:
+            sizes.append(n - assigned)
+            break
+        assigned += size
+        sizes.append(size)
+    return random_partition_graph(sizes, p_in, p_out, seed=seed, directed=directed)


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -357,7 +423,21 @@ def ring_of_cliques(num_cliques, clique_size):
     connect it with the next clique. Instead, the `ring_of_cliques` graph
     simply adds the link without removing any link from the cliques.
     """
-    pass
+    if num_cliques < 2:
+        raise nx.NetworkXError("A ring of cliques must have at least two cliques")
+    if clique_size < 2:
+        raise nx.NetworkXError("The cliques must have at least two nodes")
+
+    G = nx.Graph()
+    for i in range(num_cliques):
+        edges = itertools.combinations(
+            range(i * clique_size, i * clique_size + clique_size), 2
+        )
+        G.add_edges_from(edges)
+        G.add_edge(
+            i * clique_size + 1, (i + 1) * clique_size % (num_cliques * clique_size)
+        )
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -398,13 +478,26 @@ def windmill_graph(n, k):
     Note that windmill graphs are usually denoted `Wd(k,n)`, so the parameters
     are in the opposite order as the parameters of this method.
     """
-    pass
+    if n < 2:
+        msg = "A windmill graph must have at least two cliques"
+        raise nx.NetworkXError(msg)
+    if k < 2:
+        raise nx.NetworkXError("The cliques must have at least two nodes")
+
+    G = nx.disjoint_union_all(
+        itertools.chain(
+            [nx.complete_graph(k)], (nx.complete_graph(k - 1) for _ in range(n - 1))
+        )
+    )
+    G.add_edges_from((0, i) for i in range(k, G.number_of_nodes()))
+    return G


 @py_random_state(3)
 @nx._dispatchable(graphs=None, returns_graph=True)
-def stochastic_block_model(sizes, p, nodelist=None, seed=None, directed=
-    False, selfloops=False, sparse=True):
+def stochastic_block_model(
+    sizes, p, nodelist=None, seed=None, directed=False, selfloops=False, sparse=True
+):
     """Returns a stochastic block model graph.

     This model partitions the nodes in blocks of arbitrary sizes, and places
@@ -480,7 +573,92 @@ def stochastic_block_model(sizes, p, nodelist=None, seed=None, directed=
            "Stochastic blockmodels: First steps",
            Social networks, 5(2), 109-137, 1983.
     """
-    pass
+    # Check if dimensions match
+    if len(sizes) != len(p):
+        raise nx.NetworkXException("'sizes' and 'p' do not match.")
+    # Check for probability symmetry (undirected) and shape (directed)
+    for row in p:
+        if len(p) != len(row):
+            raise nx.NetworkXException("'p' must be a square matrix.")
+    if not directed:
+        p_transpose = [list(i) for i in zip(*p)]
+        for i in zip(p, p_transpose):
+            for j in zip(i[0], i[1]):
+                if abs(j[0] - j[1]) > 1e-08:
+                    raise nx.NetworkXException("'p' must be symmetric.")
+    # Check for probability range
+    for row in p:
+        for prob in row:
+            if prob < 0 or prob > 1:
+                raise nx.NetworkXException("Entries of 'p' not in [0,1].")
+    # Check for nodelist consistency
+    if nodelist is not None:
+        if len(nodelist) != sum(sizes):
+            raise nx.NetworkXException("'nodelist' and 'sizes' do not match.")
+        if len(nodelist) != len(set(nodelist)):
+            raise nx.NetworkXException("nodelist contains duplicate.")
+    else:
+        nodelist = range(sum(sizes))
+
+    # Setup the graph conditionally to the directed switch.
+    block_range = range(len(sizes))
+    if directed:
+        g = nx.DiGraph()
+        block_iter = itertools.product(block_range, block_range)
+    else:
+        g = nx.Graph()
+        block_iter = itertools.combinations_with_replacement(block_range, 2)
+    # Split nodelist in a partition (list of sets).
+    size_cumsum = [sum(sizes[0:x]) for x in range(len(sizes) + 1)]
+    g.graph["partition"] = [
+        set(nodelist[size_cumsum[x] : size_cumsum[x + 1]])
+        for x in range(len(size_cumsum) - 1)
+    ]
+    # Setup nodes and graph name
+    for block_id, nodes in enumerate(g.graph["partition"]):
+        for node in nodes:
+            g.add_node(node, block=block_id)
+
+    g.name = "stochastic_block_model"
+
+    # Test for edge existence
+    parts = g.graph["partition"]
+    for i, j in block_iter:
+        if i == j:
+            if directed:
+                if selfloops:
+                    edges = itertools.product(parts[i], parts[i])
+                else:
+                    edges = itertools.permutations(parts[i], 2)
+            else:
+                edges = itertools.combinations(parts[i], 2)
+                if selfloops:
+                    edges = itertools.chain(edges, zip(parts[i], parts[i]))
+            for e in edges:
+                if seed.random() < p[i][j]:
+                    g.add_edge(*e)
+        else:
+            edges = itertools.product(parts[i], parts[j])
+        if sparse:
+            if p[i][j] == 1:  # Test edges cases p_ij = 0 or 1
+                for e in edges:
+                    g.add_edge(*e)
+            elif p[i][j] > 0:
+                while True:
+                    try:
+                        logrand = math.log(seed.random())
+                        skip = math.floor(logrand / math.log(1 - p[i][j]))
+                        # consume "skip" edges
+                        next(itertools.islice(edges, skip, skip), None)
+                        e = next(edges)
+                        g.add_edge(*e)  # __safe
+                    except StopIteration:
+                        break
+        else:
+            for e in edges:
+                if seed.random() < p[i][j]:
+                    g.add_edge(*e)  # __safe
+    return g


 def _zipf_rv_below(gamma, xmin, threshold, seed):
@@ -489,7 +667,10 @@ def _zipf_rv_below(gamma, xmin, threshold, seed):
     Repeatedly draws values from the Zipf distribution until the
     threshold is met, then returns that value.
     """
-    pass
+    result = nx.utils.zipf_rv(gamma, xmin, seed)
+    while result > threshold:
+        result = nx.utils.zipf_rv(gamma, xmin, seed)
+    return result


 def _powerlaw_sequence(gamma, low, high, condition, length, max_iters, seed):
@@ -514,7 +695,13 @@ def _powerlaw_sequence(gamma, low, high, condition, length, max_iters, seed):
         Indicator of random number generation state.
         See :ref:`Randomness<randomness>`.
     """
-    pass
+    for i in range(max_iters):
+        seq = []
+        while not length(seq):
+            seq.append(_zipf_rv_below(gamma, low, high, seed))
+        if condition(seq):
+            return seq
+    raise nx.ExceededMaxIterations("Could not create power law sequence")


 def _hurwitz_zeta(x, q, tolerance):
@@ -525,13 +712,46 @@ def _hurwitz_zeta(x, q, tolerance):
     This function repeatedly computes subsequent partial sums until
     convergence, as decided by ``tolerance``.
     """
-    pass
+    z = 0
+    z_prev = -float("inf")
+    k = 0
+    while abs(z - z_prev) > tolerance:
+        z_prev = z
+        z += 1 / ((k + q) ** x)
+        k += 1
+    return z


-def _generate_min_degree(gamma, average_degree, max_degree, tolerance,
-    max_iters):
+def _generate_min_degree(gamma, average_degree, max_degree, tolerance, max_iters):
     """Returns a minimum degree from the given average degree."""
-    pass
+    # Defines zeta function whether or not Scipy is available
+    try:
+        from scipy.special import zeta
+    except ImportError:
+
+        def zeta(x, q):
+            return _hurwitz_zeta(x, q, tolerance)
+
+    min_deg_top = max_degree
+    min_deg_bot = 1
+    min_deg_mid = (min_deg_top - min_deg_bot) / 2 + min_deg_bot
+    itrs = 0
+    mid_avg_deg = 0
+    while abs(mid_avg_deg - average_degree) > tolerance:
+        if itrs > max_iters:
+            raise nx.ExceededMaxIterations("Could not match average_degree")
+        mid_avg_deg = 0
+        for x in range(int(min_deg_mid), max_degree + 1):
+            mid_avg_deg += (x ** (-gamma + 1)) / zeta(gamma, min_deg_mid)
+        if mid_avg_deg > average_degree:
+            min_deg_top = min_deg_mid
+            min_deg_mid = (min_deg_top - min_deg_bot) / 2 + min_deg_bot
+        else:
+            min_deg_bot = min_deg_mid
+            min_deg_mid = (min_deg_top - min_deg_bot) / 2 + min_deg_bot
+        itrs += 1
+    # return int(min_deg_mid + 0.5)
+    return round(min_deg_mid)


 def _generate_communities(degree_seq, community_sizes, mu, max_iters, seed):
@@ -560,15 +780,48 @@ def _generate_communities(degree_seq, community_sizes, mu, max_iters, seed):
     ..., *n* - 1}, where *n* is the length of ``degree_seq``.

     """
-    pass
+    # This assumes the nodes in the graph will be natural numbers.
+    result = [set() for _ in community_sizes]
+    n = len(degree_seq)
+    free = list(range(n))
+    for i in range(max_iters):
+        v = free.pop()
+        c = seed.choice(range(len(community_sizes)))
+        # s = int(degree_seq[v] * (1 - mu) + 0.5)
+        s = round(degree_seq[v] * (1 - mu))
+        # If the community is large enough, add the node to the chosen
+        # community. Otherwise, return it to the list of unaffiliated
+        # nodes.
+        if s < community_sizes[c]:
+            result[c].add(v)
+        else:
+            free.append(v)
+        # If the community is too big, remove a node from it.
+        if len(result[c]) > community_sizes[c]:
+            free.append(result[c].pop())
+        if not free:
+            return result
+    msg = "Could not assign communities; try increasing min_community"
+    raise nx.ExceededMaxIterations(msg)


 @py_random_state(11)
 @nx._dispatchable(graphs=None, returns_graph=True)
-def LFR_benchmark_graph(n, tau1, tau2, mu, average_degree=None, min_degree=
-    None, max_degree=None, min_community=None, max_community=None, tol=
-    1e-07, max_iters=500, seed=None):
-    """Returns the LFR benchmark graph.
+def LFR_benchmark_graph(
+    n,
+    tau1,
+    tau2,
+    mu,
+    average_degree=None,
+    min_degree=None,
+    max_degree=None,
+    min_community=None,
+    max_community=None,
+    tol=1.0e-7,
+    max_iters=500,
+    seed=None,
+):
+    r"""Returns the LFR benchmark graph.

     This algorithm proceeds as follows:

@@ -581,9 +834,9 @@ def LFR_benchmark_graph(n, tau1, tau2, mu, average_degree=None, min_degree=
           case a suitable minimum degree will be found.

        ``max_degree`` can also be specified, otherwise it will be set to
-       ``n``. Each node *u* will have $\\mu \\mathrm{deg}(u)$ edges
+       ``n``. Each node *u* will have $\mu \mathrm{deg}(u)$ edges
        joining it to nodes in communities other than its own and $(1 -
-       \\mu) \\mathrm{deg}(u)$ edges joining it to nodes in its own
+       \mu) \mathrm{deg}(u)$ edges joining it to nodes in its own
        community.
     2) Generate community sizes according to a power law distribution
        with exponent ``tau2``. If ``min_community`` and
@@ -592,12 +845,12 @@ def LFR_benchmark_graph(n, tau1, tau2, mu, average_degree=None, min_degree=
        are generated until the sum of their sizes equals ``n``.
     3) Each node will be randomly assigned a community with the
        condition that the community is large enough for the node's
-       intra-community degree, $(1 - \\mu) \\mathrm{deg}(u)$ as
+       intra-community degree, $(1 - \mu) \mathrm{deg}(u)$ as
        described in step 2. If a community grows too large, a random node
        will be selected for reassignment to a new community, until all
        nodes have been assigned a community.
-    4) Each node *u* then adds $(1 - \\mu) \\mathrm{deg}(u)$
-       intra-community edges and $\\mu \\mathrm{deg}(u)$ inter-community
+    4) Each node *u* then adds $(1 - \mu) \mathrm{deg}(u)$
+       intra-community edges and $\mu \mathrm{deg}(u)$ inter-community
        edges.

     Parameters
@@ -736,4 +989,81 @@ def LFR_benchmark_graph(n, tau1, tau2, mu, average_degree=None, min_degree=
     .. [2] https://www.santofortunato.net/resources

     """
-    pass
+    # Perform some basic parameter validation.
+    if not tau1 > 1:
+        raise nx.NetworkXError("tau1 must be greater than one")
+    if not tau2 > 1:
+        raise nx.NetworkXError("tau2 must be greater than one")
+    if not 0 <= mu <= 1:
+        raise nx.NetworkXError("mu must be in the interval [0, 1]")
+
+    # Validate parameters for generating the degree sequence.
+    if max_degree is None:
+        max_degree = n
+    elif not 0 < max_degree <= n:
+        raise nx.NetworkXError("max_degree must be in the interval (0, n]")
+    if not ((min_degree is None) ^ (average_degree is None)):
+        raise nx.NetworkXError(
+            "Must assign exactly one of min_degree and average_degree"
+        )
+    if min_degree is None:
+        min_degree = _generate_min_degree(
+            tau1, average_degree, max_degree, tol, max_iters
+        )
+
+    # Generate a degree sequence with a power law distribution.
+    low, high = min_degree, max_degree
+
+    def condition(seq):
+        return sum(seq) % 2 == 0
+
+    def length(seq):
+        return len(seq) >= n
+
+    deg_seq = _powerlaw_sequence(tau1, low, high, condition, length, max_iters, seed)
+
+    # Validate parameters for generating the community size sequence.
+    if min_community is None:
+        min_community = min(deg_seq)
+    if max_community is None:
+        max_community = max(deg_seq)
+
+    # Generate a community size sequence with a power law distribution.
+    #
+    # TODO The original code incremented the number of iterations each
+    # time a new Zipf random value was drawn from the distribution. This
+    # differed from the way the number of iterations was incremented in
+    # `_powerlaw_degree_sequence`, so this code was changed to match
+    # that one. As a result, this code is allowed many more chances to
+    # generate a valid community size sequence.
+    low, high = min_community, max_community
+
+    def condition(seq):
+        return sum(seq) == n
+
+    def length(seq):
+        return sum(seq) >= n
+
+    comms = _powerlaw_sequence(tau2, low, high, condition, length, max_iters, seed)
+
+    # Generate the communities based on the given degree sequence and
+    # community sizes.
+    max_iters *= 10 * n
+    communities = _generate_communities(deg_seq, comms, mu, max_iters, seed)
+
+    # Finally, generate the benchmark graph based on the given
+    # communities, joining nodes according to the intra- and
+    # inter-community degrees.
+    G = nx.Graph()
+    G.add_nodes_from(range(n))
+    for c in communities:
+        for u in c:
+            while G.degree(u) < round(deg_seq[u] * (1 - mu)):
+                v = seed.choice(list(c))
+                G.add_edge(u, v)
+            while G.degree(u) < deg_seq[u]:
+                v = seed.choice(range(n))
+                if v not in c:
+                    G.add_edge(u, v)
+            G.nodes[u]["community"] = c
+    return G
diff --git a/networkx/generators/degree_seq.py b/networkx/generators/degree_seq.py
index eefee8b01..6bc85e2be 100644
--- a/networkx/generators/degree_seq.py
+++ b/networkx/generators/degree_seq.py
@@ -1,15 +1,24 @@
 """Generate graphs with a given degree sequence or expected degree sequence.
 """
+
 import heapq
 import math
 from itertools import chain, combinations, zip_longest
 from operator import itemgetter
+
 import networkx as nx
 from networkx.utils import py_random_state, random_weighted_sample
-__all__ = ['configuration_model', 'directed_configuration_model',
-    'expected_degree_graph', 'havel_hakimi_graph',
-    'directed_havel_hakimi_graph', 'degree_sequence_tree',
-    'random_degree_sequence_graph']
+
+__all__ = [
+    "configuration_model",
+    "directed_configuration_model",
+    "expected_degree_graph",
+    "havel_hakimi_graph",
+    "directed_havel_hakimi_graph",
+    "degree_sequence_tree",
+    "random_degree_sequence_graph",
+]
+
 chaini = chain.from_iterable


@@ -42,11 +51,12 @@ def _to_stublist(degree_sequence):
     [0, 0, 2]

     """
-    pass
+    return list(chaini([n] * d for n, d in enumerate(degree_sequence)))


-def _configuration_model(deg_sequence, create_using, directed=False,
-    in_deg_sequence=None, seed=None):
+def _configuration_model(
+    deg_sequence, create_using, directed=False, in_deg_sequence=None, seed=None
+):
     """Helper function for generating either undirected or directed
     configuration model graphs.

@@ -78,7 +88,39 @@ def _configuration_model(deg_sequence, create_using, directed=False,
     functions.

     """
-    pass
+    n = len(deg_sequence)
+    G = nx.empty_graph(n, create_using)
+    # If empty, return the null graph immediately.
+    if n == 0:
+        return G
+    # Build a list of available degree-repeated nodes.  For example,
+    # for degree sequence [3, 2, 1, 1, 1], the "stub list" is
+    # initially [0, 0, 0, 1, 1, 2, 3, 4], that is, node 0 has degree
+    # 3 and thus is repeated 3 times, etc.
+    #
+    # Also, shuffle the stub list in order to get a random sequence of
+    # node pairs.
+    if directed:
+        pairs = zip_longest(deg_sequence, in_deg_sequence, fillvalue=0)
+        # Unzip the list of pairs into a pair of lists.
+        out_deg, in_deg = zip(*pairs)
+
+        out_stublist = _to_stublist(out_deg)
+        in_stublist = _to_stublist(in_deg)
+
+        seed.shuffle(out_stublist)
+        seed.shuffle(in_stublist)
+    else:
+        stublist = _to_stublist(deg_sequence)
+        # Choose a random balanced bipartition of the stublist, which
+        # gives a random pairing of nodes. In this implementation, we
+        # shuffle the list and then split it in half.
+        n = len(stublist)
+        half = n // 2
+        seed.shuffle(stublist)
+        out_stublist, in_stublist = stublist[:half], stublist[half:]
+    G.add_edges_from(zip(out_stublist, in_stublist))
+    return G


 @py_random_state(2)
@@ -171,13 +213,24 @@ def configuration_model(deg_sequence, create_using=None, seed=None):
     >>> G.remove_edges_from(nx.selfloop_edges(G))

     """
-    pass
+    if sum(deg_sequence) % 2 != 0:
+        msg = "Invalid degree sequence: sum of degrees must be even, not odd"
+        raise nx.NetworkXError(msg)
+
+    G = nx.empty_graph(0, create_using, default=nx.MultiGraph)
+    if G.is_directed():
+        raise nx.NetworkXNotImplemented("not implemented for directed graphs")
+
+    G = _configuration_model(deg_sequence, G, seed=seed)
+
+    return G


 @py_random_state(3)
 @nx._dispatchable(graphs=None, returns_graph=True)
-def directed_configuration_model(in_degree_sequence, out_degree_sequence,
-    create_using=None, seed=None):
+def directed_configuration_model(
+    in_degree_sequence, out_degree_sequence, create_using=None, seed=None
+):
     """Returns a directed_random graph with the given degree sequences.

     The configuration model generates a random directed pseudograph
@@ -257,21 +310,37 @@ def directed_configuration_model(in_degree_sequence, out_degree_sequence,
     >>> D.remove_edges_from(nx.selfloop_edges(D))

     """
-    pass
+    if sum(in_degree_sequence) != sum(out_degree_sequence):
+        msg = "Invalid degree sequences: sequences must have equal sums"
+        raise nx.NetworkXError(msg)
+
+    if create_using is None:
+        create_using = nx.MultiDiGraph
+
+    G = _configuration_model(
+        out_degree_sequence,
+        create_using,
+        directed=True,
+        in_deg_sequence=in_degree_sequence,
+        seed=seed,
+    )
+
+    name = "directed configuration_model {} nodes {} edges"
+    return G


 @py_random_state(1)
 @nx._dispatchable(graphs=None, returns_graph=True)
 def expected_degree_graph(w, seed=None, selfloops=True):
-    """Returns a random graph with given expected degrees.
+    r"""Returns a random graph with given expected degrees.

-    Given a sequence of expected degrees $W=(w_0,w_1,\\ldots,w_{n-1})$
+    Given a sequence of expected degrees $W=(w_0,w_1,\ldots,w_{n-1})$
     of length $n$ this algorithm assigns an edge between node $u$ and
     node $v$ with probability

     .. math::

-       p_{uv} = \\frac{w_u w_v}{\\sum_k w_k} .
+       p_{uv} = \frac{w_u w_v}{\sum_k w_k} .

     Parameters
     ----------
@@ -297,7 +366,7 @@ def expected_degree_graph(w, seed=None, selfloops=True):
     The nodes have integer labels corresponding to index of expected degrees
     input sequence.

-    The complexity of this algorithm is $\\mathcal{O}(n+m)$ where $n$ is the
+    The complexity of this algorithm is $\mathcal{O}(n+m)$ where $n$ is the
     number of nodes and $m$ is the expected number of edges.

     The model in [1]_ includes the possibility of self-loop edges.
@@ -311,8 +380,8 @@ def expected_degree_graph(w, seed=None, selfloops=True):

     .. math::

-       E[deg(u)] = \\sum_{v \\ne u} p_{uv}
-                = w_u \\left( 1 - \\frac{w_u}{\\sum_k w_k} \\right) .
+       E[deg(u)] = \sum_{v \ne u} p_{uv}
+                = w_u \left( 1 - \frac{w_u}{\sum_k w_k} \right) .


     NetworkX uses the standard convention that a self-loop edge counts 2
@@ -320,8 +389,8 @@ def expected_degree_graph(w, seed=None, selfloops=True):

     .. math::

-       E[deg(u)] =  \\sum_{v \\ne u} p_{uv}  + 2 p_{uu}
-                = w_u \\left( 1 + \\frac{w_u}{\\sum_k w_k} \\right) .
+       E[deg(u)] =  \sum_{v \ne u} p_{uv}  + 2 p_{uu}
+                = w_u \left( 1 + \frac{w_u}{\sum_k w_k} \right) .

     References
     ----------
@@ -334,7 +403,40 @@ def expected_degree_graph(w, seed=None, selfloops=True):
        Alan Frieze, Paul Horn, and Paweł Prałat (Eds), LNCS 6732,
        pp. 115-126, 2011.
     """
-    pass
+    n = len(w)
+    G = nx.empty_graph(n)
+
+    # If there are no nodes are no edges in the graph, return the empty graph.
+    if n == 0 or max(w) == 0:
+        return G
+
+    rho = 1 / sum(w)
+    # Sort the weights in decreasing order. The original order of the
+    # weights dictates the order of the (integer) node labels, so we
+    # need to remember the permutation applied in the sorting.
+    order = sorted(enumerate(w), key=itemgetter(1), reverse=True)
+    mapping = {c: u for c, (u, v) in enumerate(order)}
+    seq = [v for u, v in order]
+    last = n
+    if not selfloops:
+        last -= 1
+    for u in range(last):
+        v = u
+        if not selfloops:
+            v += 1
+        factor = seq[u] * rho
+        p = min(seq[v] * factor, 1)
+        while v < n and p > 0:
+            if p != 1:
+                r = seed.random()
+                v += math.floor(math.log(r, 1 - p))
+            if v < n:
+                q = min(seq[v] * factor, 1)
+                if seed.random() < q / p:
+                    G.add_edge(mapping[u], mapping[v])
+                v += 1
+                p = q
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -377,12 +479,61 @@ def havel_hakimi_graph(deg_sequence, create_using=None):
        Algorithms for Constructing Graphs and Digraphs with Given Valences
        and Factors  Discrete Mathematics, 6(1), pp. 79-88 (1973)
     """
-    pass
+    if not nx.is_graphical(deg_sequence):
+        raise nx.NetworkXError("Invalid degree sequence")
+
+    p = len(deg_sequence)
+    G = nx.empty_graph(p, create_using)
+    if G.is_directed():
+        raise nx.NetworkXError("Directed graphs are not supported")
+    num_degs = [[] for i in range(p)]
+    dmax, dsum, n = 0, 0, 0
+    for d in deg_sequence:
+        # Process only the non-zero integers
+        if d > 0:
+            num_degs[d].append(n)
+            dmax, dsum, n = max(dmax, d), dsum + d, n + 1
+    # Return graph if no edges
+    if n == 0:
+        return G
+
+    modstubs = [(0, 0)] * (dmax + 1)
+    # Successively reduce degree sequence by removing the maximum degree
+    while n > 0:
+        # Retrieve the maximum degree in the sequence
+        while len(num_degs[dmax]) == 0:
+            dmax -= 1
+        # If there are not enough stubs to connect to, then the sequence is
+        # not graphical
+        if dmax > n - 1:
+            raise nx.NetworkXError("Non-graphical integer sequence")
+
+        # Remove largest stub in list
+        source = num_degs[dmax].pop()
+        n -= 1
+        # Reduce the next dmax largest stubs
+        mslen = 0
+        k = dmax
+        for i in range(dmax):
+            while len(num_degs[k]) == 0:
+                k -= 1
+            target = num_degs[k].pop()
+            G.add_edge(source, target)
+            n -= 1
+            if k > 1:
+                modstubs[mslen] = (k - 1, target)
+                mslen += 1
+        # Add back to the list any nonzero stubs that were removed
+        for i in range(mslen):
+            (stubval, stubtarget) = modstubs[i]
+            num_degs[stubval].append(stubtarget)
+            n += 1
+
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
-def directed_havel_hakimi_graph(in_deg_sequence, out_deg_sequence,
-    create_using=None):
+def directed_havel_hakimi_graph(in_deg_sequence, out_deg_sequence, create_using=None):
     """Returns a directed graph with the given degree sequences.

     Parameters
@@ -420,7 +571,77 @@ def directed_havel_hakimi_graph(in_deg_sequence, out_deg_sequence,
        Algorithms for Constructing Graphs and Digraphs with Given Valences
        and Factors Discrete Mathematics, 6(1), pp. 79-88 (1973)
     """
-    pass
+    in_deg_sequence = nx.utils.make_list_of_ints(in_deg_sequence)
+    out_deg_sequence = nx.utils.make_list_of_ints(out_deg_sequence)
+
+    # Process the sequences and form two heaps to store degree pairs with
+    # either zero or nonzero out degrees
+    sumin, sumout = 0, 0
+    nin, nout = len(in_deg_sequence), len(out_deg_sequence)
+    maxn = max(nin, nout)
+    G = nx.empty_graph(maxn, create_using, default=nx.DiGraph)
+    if maxn == 0:
+        return G
+    maxin = 0
+    stubheap, zeroheap = [], []
+    for n in range(maxn):
+        in_deg, out_deg = 0, 0
+        if n < nout:
+            out_deg = out_deg_sequence[n]
+        if n < nin:
+            in_deg = in_deg_sequence[n]
+        if in_deg < 0 or out_deg < 0:
+            raise nx.NetworkXError(
+                "Invalid degree sequences. Sequence values must be positive."
+            )
+        sumin, sumout, maxin = sumin + in_deg, sumout + out_deg, max(maxin, in_deg)
+        if in_deg > 0:
+            stubheap.append((-1 * out_deg, -1 * in_deg, n))
+        elif out_deg > 0:
+            zeroheap.append((-1 * out_deg, n))
+    if sumin != sumout:
+        raise nx.NetworkXError(
+            "Invalid degree sequences. Sequences must have equal sums."
+        )
+    heapq.heapify(stubheap)
+    heapq.heapify(zeroheap)
+
+    modstubs = [(0, 0, 0)] * (maxin + 1)
+    # Successively reduce degree sequence by removing the maximum
+    while stubheap:
+        # Remove first value in the sequence with a non-zero in degree
+        (freeout, freein, target) = heapq.heappop(stubheap)
+        freein *= -1
+        if freein > len(stubheap) + len(zeroheap):
+            raise nx.NetworkXError("Non-digraphical integer sequence")
+
+        # Attach arcs from the nodes with the most stubs
+        mslen = 0
+        for i in range(freein):
+            if zeroheap and (not stubheap or stubheap[0][0] > zeroheap[0][0]):
+                (stubout, stubsource) = heapq.heappop(zeroheap)
+                stubin = 0
+            else:
+                (stubout, stubin, stubsource) = heapq.heappop(stubheap)
+            if stubout == 0:
+                raise nx.NetworkXError("Non-digraphical integer sequence")
+            G.add_edge(stubsource, target)
+            # Check if source is now totally connected
+            if stubout + 1 < 0 or stubin < 0:
+                modstubs[mslen] = (stubout + 1, stubin, stubsource)
+                mslen += 1
+
+        # Add the nodes back to the heaps that still have available stubs
+        for i in range(mslen):
+            stub = modstubs[i]
+            if stub[1] < 0:
+                heapq.heappush(stubheap, stub)
+            else:
+                heapq.heappush(zeroheap, (stub[0], stub[2]))
+        if freeout < 0:
+            heapq.heappush(zeroheap, (freeout, target))
+
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -431,13 +652,48 @@ def degree_sequence_tree(deg_sequence, create_using=None):
     the degree sequence must have
     len(deg_sequence)-sum(deg_sequence)/2=1
     """
-    pass
+    # The sum of the degree sequence must be even (for any undirected graph).
+    degree_sum = sum(deg_sequence)
+    if degree_sum % 2 != 0:
+        msg = "Invalid degree sequence: sum of degrees must be even, not odd"
+        raise nx.NetworkXError(msg)
+    if len(deg_sequence) - degree_sum // 2 != 1:
+        msg = (
+            "Invalid degree sequence: tree must have number of nodes equal"
+            " to one less than the number of edges"
+        )
+        raise nx.NetworkXError(msg)
+    G = nx.empty_graph(0, create_using)
+    if G.is_directed():
+        raise nx.NetworkXError("Directed Graph not supported")
+
+    # Sort all degrees greater than 1 in decreasing order.
+    #
+    # TODO Does this need to be sorted in reverse order?
+    deg = sorted((s for s in deg_sequence if s > 1), reverse=True)
+
+    # make path graph as backbone
+    n = len(deg) + 2
+    nx.add_path(G, range(n))
+    last = n
+
+    # add the leaves
+    for source in range(1, n - 1):
+        nedges = deg.pop() - 2
+        for target in range(last, last + nedges):
+            G.add_edge(source, target)
+        last += nedges
+
+    # in case we added one too many
+    if len(G) > len(deg_sequence):
+        G.remove_node(0)
+    return G


 @py_random_state(1)
 @nx._dispatchable(graphs=None, returns_graph=True)
 def random_degree_sequence_graph(sequence, seed=None, tries=10):
-    """Returns a simple random graph with the given degree sequence.
+    r"""Returns a simple random graph with the given degree sequence.

     If the maximum degree $d_m$ in the sequence is $O(m^{1/4})$ then the
     algorithm produces almost uniform random graphs in $O(m d_m)$ time
@@ -489,25 +745,124 @@ def random_degree_sequence_graph(sequence, seed=None, tries=10):
     >>> sorted(d for n, d in G.degree())
     [1, 2, 2, 3]
     """
-    pass
+    DSRG = DegreeSequenceRandomGraph(sequence, seed)
+    for try_n in range(tries):
+        try:
+            return DSRG.generate()
+        except nx.NetworkXUnfeasible:
+            pass
+    raise nx.NetworkXError(f"failed to generate graph in {tries} tries")


 class DegreeSequenceRandomGraph:
-
+    # class to generate random graphs with a given degree sequence
+    # use random_degree_sequence_graph()
     def __init__(self, degree, rng):
         if not nx.is_graphical(degree):
-            raise nx.NetworkXUnfeasible('degree sequence is not graphical')
+            raise nx.NetworkXUnfeasible("degree sequence is not graphical")
         self.rng = rng
         self.degree = list(degree)
-        self.m = sum(self.degree) / 2.0
+        # node labels are integers 0,...,n-1
+        self.m = sum(self.degree) / 2.0  # number of edges
         try:
-            self.dmax = max(self.degree)
+            self.dmax = max(self.degree)  # maximum degree
         except ValueError:
             self.dmax = 0

+    def generate(self):
+        # remaining_degree is mapping from int->remaining degree
+        self.remaining_degree = dict(enumerate(self.degree))
+        # add all nodes to make sure we get isolated nodes
+        self.graph = nx.Graph()
+        self.graph.add_nodes_from(self.remaining_degree)
+        # remove zero degree nodes
+        for n, d in list(self.remaining_degree.items()):
+            if d == 0:
+                del self.remaining_degree[n]
+        if len(self.remaining_degree) > 0:
+            # build graph in three phases according to how many unmatched edges
+            self.phase1()
+            self.phase2()
+            self.phase3()
+        return self.graph
+
+    def update_remaining(self, u, v, aux_graph=None):
+        # decrement remaining nodes, modify auxiliary graph if in phase3
+        if aux_graph is not None:
+            # remove edges from auxiliary graph
+            aux_graph.remove_edge(u, v)
+        if self.remaining_degree[u] == 1:
+            del self.remaining_degree[u]
+            if aux_graph is not None:
+                aux_graph.remove_node(u)
+        else:
+            self.remaining_degree[u] -= 1
+        if self.remaining_degree[v] == 1:
+            del self.remaining_degree[v]
+            if aux_graph is not None:
+                aux_graph.remove_node(v)
+        else:
+            self.remaining_degree[v] -= 1
+
+    def p(self, u, v):
+        # degree probability
+        return 1 - self.degree[u] * self.degree[v] / (4.0 * self.m)
+
+    def q(self, u, v):
+        # remaining degree probability
+        norm = max(self.remaining_degree.values()) ** 2
+        return self.remaining_degree[u] * self.remaining_degree[v] / norm
+
     def suitable_edge(self):
         """Returns True if and only if an arbitrary remaining node can
         potentially be joined with some other remaining node.

         """
-        pass
+        nodes = iter(self.remaining_degree)
+        u = next(nodes)
+        return any(v not in self.graph[u] for v in nodes)
+
+    def phase1(self):
+        # choose node pairs from (degree) weighted distribution
+        rem_deg = self.remaining_degree
+        while sum(rem_deg.values()) >= 2 * self.dmax**2:
+            u, v = sorted(random_weighted_sample(rem_deg, 2, self.rng))
+            if self.graph.has_edge(u, v):
+                continue
+            if self.rng.random() < self.p(u, v):  # accept edge
+                self.graph.add_edge(u, v)
+                self.update_remaining(u, v)
+
+    def phase2(self):
+        # choose remaining nodes uniformly at random and use rejection sampling
+        remaining_deg = self.remaining_degree
+        rng = self.rng
+        while len(remaining_deg) >= 2 * self.dmax:
+            while True:
+                u, v = sorted(rng.sample(list(remaining_deg.keys()), 2))
+                if self.graph.has_edge(u, v):
+                    continue
+                if rng.random() < self.q(u, v):
+                    break
+            if rng.random() < self.p(u, v):  # accept edge
+                self.graph.add_edge(u, v)
+                self.update_remaining(u, v)
+
+    def phase3(self):
+        # build potential remaining edges and choose with rejection sampling
+        potential_edges = combinations(self.remaining_degree, 2)
+        # build auxiliary graph of potential edges not already in graph
+        H = nx.Graph(
+            [(u, v) for (u, v) in potential_edges if not self.graph.has_edge(u, v)]
+        )
+        rng = self.rng
+        while self.remaining_degree:
+            if not self.suitable_edge():
+                raise nx.NetworkXUnfeasible("no suitable edges left")
+            while True:
+                u, v = sorted(rng.choice(list(H.edges())))
+                if rng.random() < self.q(u, v):
+                    break
+            if rng.random() < self.p(u, v):  # accept edge
+                self.graph.add_edge(u, v)
+                self.update_remaining(u, v, aux_graph=H)
diff --git a/networkx/generators/directed.py b/networkx/generators/directed.py
index bfafe5557..4548726b9 100644
--- a/networkx/generators/directed.py
+++ b/networkx/generators/directed.py
@@ -3,13 +3,21 @@ Generators for some directed graphs, including growing network (GN) graphs and
 scale-free graphs.

 """
+
 import numbers
 from collections import Counter
+
 import networkx as nx
 from networkx.generators.classic import empty_graph
 from networkx.utils import discrete_sequence, py_random_state, weighted_choice
-__all__ = ['gn_graph', 'gnc_graph', 'gnr_graph', 'random_k_out_graph',
-    'scale_free_graph']
+
+__all__ = [
+    "gn_graph",
+    "gnc_graph",
+    "gnr_graph",
+    "random_k_out_graph",
+    "scale_free_graph",
+]


 @py_random_state(3)
@@ -54,7 +62,30 @@ def gn_graph(n, kernel=None, create_using=None, seed=None):
            Organization of Growing Random Networks,
            Phys. Rev. E, 63, 066123, 2001.
     """
-    pass
+    G = empty_graph(1, create_using, default=nx.DiGraph)
+    if not G.is_directed():
+        raise nx.NetworkXError("create_using must indicate a Directed Graph")
+
+    if kernel is None:
+
+        def kernel(x):
+            return x
+
+    if n == 1:
+        return G
+
+    G.add_edge(1, 0)  # get started
+    ds = [1, 1]  # degree sequence
+
+    for source in range(2, n):
+        # compute distribution from kernel and degree
+        dist = [kernel(d) for d in ds]
+        # choose target from discrete distribution
+        target = discrete_sequence(1, distribution=dist, seed=seed)[0]
+        G.add_edge(source, target)
+        ds.append(1)  # the source has only one link (degree one)
+        ds[target] += 1  # add one to the target link degree
+    return G


 @py_random_state(3)
@@ -96,7 +127,19 @@ def gnr_graph(n, p, create_using=None, seed=None):
            Organization of Growing Random Networks,
            Phys. Rev. E, 63, 066123, 2001.
     """
-    pass
+    G = empty_graph(1, create_using, default=nx.DiGraph)
+    if not G.is_directed():
+        raise nx.NetworkXError("create_using must indicate a Directed Graph")
+
+    if n == 1:
+        return G
+
+    for source in range(1, n):
+        target = seed.randrange(0, source)
+        if seed.random() < p and target != 0:
+            target = next(G.successors(target))
+        G.add_edge(source, target)
+    return G


 @py_random_state(2)
@@ -124,13 +167,33 @@ def gnc_graph(n, create_using=None, seed=None):
            Network Growth by Copying,
            Phys. Rev. E, 71, 036118, 2005k.},
     """
-    pass
+    G = empty_graph(1, create_using, default=nx.DiGraph)
+    if not G.is_directed():
+        raise nx.NetworkXError("create_using must indicate a Directed Graph")
+
+    if n == 1:
+        return G
+
+    for source in range(1, n):
+        target = seed.randrange(0, source)
+        for succ in G.successors(target):
+            G.add_edge(source, succ)
+        G.add_edge(source, target)
+    return G


 @py_random_state(6)
 @nx._dispatchable(graphs=None, returns_graph=True)
-def scale_free_graph(n, alpha=0.41, beta=0.54, gamma=0.05, delta_in=0.2,
-    delta_out=0, seed=None, initial_graph=None):
+def scale_free_graph(
+    n,
+    alpha=0.41,
+    beta=0.54,
+    gamma=0.05,
+    delta_in=0.2,
+    delta_out=0,
+    seed=None,
+    initial_graph=None,
+):
     """Returns a scale-free directed graph.

     Parameters
@@ -180,13 +243,99 @@ def scale_free_graph(n, alpha=0.41, beta=0.54, gamma=0.05, delta_in=0.2,
            Proceedings of the fourteenth annual ACM-SIAM Symposium on
            Discrete Algorithms, 132--139, 2003.
     """
-    pass
+
+    def _choose_node(candidates, node_list, delta):
+        if delta > 0:
+            bias_sum = len(node_list) * delta
+            p_delta = bias_sum / (bias_sum + len(candidates))
+            if seed.random() < p_delta:
+                return seed.choice(node_list)
+        return seed.choice(candidates)
+
+    if initial_graph is not None and hasattr(initial_graph, "_adj"):
+        if not isinstance(initial_graph, nx.MultiDiGraph):
+            raise nx.NetworkXError("initial_graph must be a MultiDiGraph.")
+        G = initial_graph
+    else:
+        # Start with 3-cycle
+        G = nx.MultiDiGraph([(0, 1), (1, 2), (2, 0)])
+
+    if alpha <= 0:
+        raise ValueError("alpha must be > 0.")
+    if beta <= 0:
+        raise ValueError("beta must be > 0.")
+    if gamma <= 0:
+        raise ValueError("gamma must be > 0.")
+
+    if abs(alpha + beta + gamma - 1.0) >= 1e-9:
+        raise ValueError("alpha+beta+gamma must equal 1.")
+
+    if delta_in < 0:
+        raise ValueError("delta_in must be >= 0.")
+
+    if delta_out < 0:
+        raise ValueError("delta_out must be >= 0.")
+
+    # pre-populate degree states
+    vs = sum((count * [idx] for idx, count in G.out_degree()), [])
+    ws = sum((count * [idx] for idx, count in G.in_degree()), [])
+
+    # pre-populate node state
+    node_list = list(G.nodes())
+
+    # see if there already are number-based nodes
+    numeric_nodes = [n for n in node_list if isinstance(n, numbers.Number)]
+    if len(numeric_nodes) > 0:
+        # set cursor for new nodes appropriately
+        cursor = max(int(n.real) for n in numeric_nodes) + 1
+    else:
+        # or start at zero
+        cursor = 0
+
+    while len(G) < n:
+        r = seed.random()
+
+        # random choice in alpha,beta,gamma ranges
+        if r < alpha:
+            # alpha
+            # add new node v
+            v = cursor
+            cursor += 1
+            # also add to node state
+            node_list.append(v)
+            # choose w according to in-degree and delta_in
+            w = _choose_node(ws, node_list, delta_in)
+
+        elif r < alpha + beta:
+            # beta
+            # choose v according to out-degree and delta_out
+            v = _choose_node(vs, node_list, delta_out)
+            # choose w according to in-degree and delta_in
+            w = _choose_node(ws, node_list, delta_in)
+
+        else:
+            # gamma
+            # choose v according to out-degree and delta_out
+            v = _choose_node(vs, node_list, delta_out)
+            # add new node w
+            w = cursor
+            cursor += 1
+            # also add to node state
+            node_list.append(w)
+
+        # add edge to graph
+        G.add_edge(v, w)
+
+        # update degree states
+        vs.append(v)
+        ws.append(w)
+
+    return G


 @py_random_state(4)
 @nx._dispatchable(graphs=None, returns_graph=True)
-def random_uniform_k_out_graph(n, k, self_loops=True, with_replacement=True,
-    seed=None):
+def random_uniform_k_out_graph(n, k, self_loops=True, with_replacement=True, seed=None):
     """Returns a random `k`-out graph with uniform attachment.

     A random `k`-out graph with uniform attachment is a multidigraph
@@ -242,7 +391,27 @@ def random_uniform_k_out_graph(n, k, self_loops=True, with_replacement=True,
     set to positive infinity.

     """
-    pass
+    if with_replacement:
+        create_using = nx.MultiDiGraph()
+
+        def sample(v, nodes):
+            if not self_loops:
+                nodes = nodes - {v}
+            return (seed.choice(list(nodes)) for i in range(k))
+
+    else:
+        create_using = nx.DiGraph()
+
+        def sample(v, nodes):
+            if not self_loops:
+                nodes = nodes - {v}
+            return seed.sample(list(nodes), k)
+
+    G = nx.empty_graph(n, create_using)
+    nodes = set(G)
+    for u in G:
+        G.add_edges_from((u, v) for v in sample(u, nodes))
+    return G


 @py_random_state(4)
@@ -314,4 +483,19 @@ def random_k_out_graph(n, k, alpha, self_loops=True, seed=None):
          <https://arxiv.org/abs/1311.5961>

     """
-    pass
+    if alpha < 0:
+        raise ValueError("alpha must be positive")
+    G = nx.empty_graph(n, create_using=nx.MultiDiGraph)
+    weights = Counter({v: alpha for v in G})
+    for i in range(k * n):
+        u = seed.choice([v for v, d in G.out_degree() if d < k])
+        # If self-loops are not allowed, make the source node `u` have
+        # weight zero.
+        if not self_loops:
+            adjustment = Counter({u: weights[u]})
+        else:
+            adjustment = Counter()
+        v = weighted_choice(weights - adjustment, seed=seed)
+        G.add_edge(u, v)
+        weights[v] += 1
+    return G
diff --git a/networkx/generators/duplication.py b/networkx/generators/duplication.py
index dbf086a76..ad8262382 100644
--- a/networkx/generators/duplication.py
+++ b/networkx/generators/duplication.py
@@ -8,7 +8,8 @@ generally inspired by biological networks.
 import networkx as nx
 from networkx.exception import NetworkXError
 from networkx.utils import py_random_state
-__all__ = ['partial_duplication_graph', 'duplication_divergence_graph']
+
+__all__ = ["partial_duplication_graph", "duplication_divergence_graph"]


 @py_random_state(4)
@@ -60,7 +61,30 @@ def partial_duplication_graph(N, n, p, q, seed=None):
            <https://doi.org/10.1155/2008/190836>

     """
-    pass
+    if p < 0 or p > 1 or q < 0 or q > 1:
+        msg = "partial duplication graph must have 0 <= p, q <= 1."
+        raise NetworkXError(msg)
+    if n > N:
+        raise NetworkXError("partial duplication graph must have n <= N.")
+
+    G = nx.complete_graph(n)
+    for new_node in range(n, N):
+        # Pick a random vertex, u, already in the graph.
+        src_node = seed.randint(0, new_node - 1)
+
+        # Add a new vertex, v, to the graph.
+        G.add_node(new_node)
+
+        # For each neighbor of u...
+        for nbr_node in list(nx.all_neighbors(G, src_node)):
+            # Add the neighbor to v with probability p.
+            if seed.random() < p:
+                G.add_edge(new_node, nbr_node)
+
+        # Join v and u with probability q.
+        if seed.random() < q:
+            G.add_edge(new_node, src_node)
+    return G


 @py_random_state(2)
@@ -106,4 +130,34 @@ def duplication_divergence_graph(n, p, seed=None):
        Phys. Rev. E, 71, 061911, 2005.

     """
-    pass
+    if p > 1 or p < 0:
+        msg = f"NetworkXError p={p} is not in [0,1]."
+        raise nx.NetworkXError(msg)
+    if n < 2:
+        msg = "n must be greater than or equal to 2"
+        raise nx.NetworkXError(msg)
+
+    G = nx.Graph()
+
+    # Initialize the graph with two connected nodes.
+    G.add_edge(0, 1)
+    i = 2
+    while i < n:
+        # Choose a random node from current graph to duplicate.
+        random_node = seed.choice(list(G))
+        # Make the replica.
+        G.add_node(i)
+        # flag indicates whether at least one edge is connected on the replica.
+        flag = False
+        for nbr in G.neighbors(random_node):
+            if seed.random() < p:
+                # Link retention step.
+                G.add_edge(i, nbr)
+                flag = True
+        if not flag:
+            # Delete replica if no edges retained.
+            G.remove_node(i)
+        else:
+            # Successful duplication.
+            i += 1
+    return G
diff --git a/networkx/generators/ego.py b/networkx/generators/ego.py
index c20cbfe2d..d959a1bd1 100644
--- a/networkx/generators/ego.py
+++ b/networkx/generators/ego.py
@@ -1,7 +1,8 @@
 """
 Ego graph.
 """
-__all__ = ['ego_graph']
+__all__ = ["ego_graph"]
+
 import networkx as nx


@@ -41,4 +42,24 @@ def ego_graph(G, n, radius=1, center=True, undirected=False, distance=None):

     Node, edge, and graph attributes are copied to the returned subgraph.
     """
-    pass
+    if undirected:
+        if distance is not None:
+            sp, _ = nx.single_source_dijkstra(
+                G.to_undirected(), n, cutoff=radius, weight=distance
+            )
+        else:
+            sp = dict(
+                nx.single_source_shortest_path_length(
+                    G.to_undirected(), n, cutoff=radius
+                )
+            )
+    else:
+        if distance is not None:
+            sp, _ = nx.single_source_dijkstra(G, n, cutoff=radius, weight=distance)
+        else:
+            sp = dict(nx.single_source_shortest_path_length(G, n, cutoff=radius))
+
+    H = G.subgraph(sp).copy()
+    if not center:
+        H.remove_node(n)
+    return H
diff --git a/networkx/generators/expanders.py b/networkx/generators/expanders.py
index a7dc6c848..69043fb57 100644
--- a/networkx/generators/expanders.py
+++ b/networkx/generators/expanders.py
@@ -2,19 +2,58 @@

 """
 import itertools
-import networkx as nx
-__all__ = ['margulis_gabber_galil_graph', 'chordal_cycle_graph',
-    'paley_graph', 'maybe_regular_expander', 'is_regular_expander',
-    'random_regular_expander_graph']

+import networkx as nx

+__all__ = [
+    "margulis_gabber_galil_graph",
+    "chordal_cycle_graph",
+    "paley_graph",
+    "maybe_regular_expander",
+    "is_regular_expander",
+    "random_regular_expander_graph",
+]
+
+
+# Other discrete torus expanders can be constructed by using the following edge
+# sets. For more information, see Chapter 4, "Expander Graphs", in
+# "Pseudorandomness", by Salil Vadhan.
+#
+# For a directed expander, add edges from (x, y) to:
+#
+#     (x, y),
+#     ((x + 1) % n, y),
+#     (x, (y + 1) % n),
+#     (x, (x + y) % n),
+#     (-y % n, x)
+#
+# For an undirected expander, add the reverse edges.
+#
+# Also appearing in the paper of Gabber and Galil:
+#
+#     (x, y),
+#     (x, (x + y) % n),
+#     (x, (x + y + 1) % n),
+#     ((x + y) % n, y),
+#     ((x + y + 1) % n, y)
+#
+# and:
+#
+#     (x, y),
+#     ((x + 2*y) % n, y),
+#     ((x + (2*y + 1)) % n, y),
+#     ((x + (2*y + 2)) % n, y),
+#     (x, (y + 2*x) % n),
+#     (x, (y + (2*x + 1)) % n),
+#     (x, (y + (2*x + 2)) % n),
+#
 @nx._dispatchable(graphs=None, returns_graph=True)
 def margulis_gabber_galil_graph(n, create_using=None):
-    """Returns the Margulis-Gabber-Galil undirected MultiGraph on `n^2` nodes.
+    r"""Returns the Margulis-Gabber-Galil undirected MultiGraph on `n^2` nodes.

     The undirected MultiGraph is regular with degree `8`. Nodes are integer
     pairs. The second-largest eigenvalue of the adjacency matrix of the graph
-    is at most `5 \\sqrt{2}`, regardless of `n`.
+    is at most `5 \sqrt{2}`, regardless of `n`.

     Parameters
     ----------
@@ -34,7 +73,21 @@ def margulis_gabber_galil_graph(n, create_using=None):
         If the graph is directed or not a multigraph.

     """
-    pass
+    G = nx.empty_graph(0, create_using, default=nx.MultiGraph)
+    if G.is_directed() or not G.is_multigraph():
+        msg = "`create_using` must be an undirected multigraph."
+        raise nx.NetworkXError(msg)
+
+    for x, y in itertools.product(range(n), repeat=2):
+        for u, v in (
+            ((x + 2 * y) % n, y),
+            ((x + (2 * y + 1)) % n, y),
+            (x, (y + 2 * x) % n),
+            (x, (y + (2 * x + 1)) % n),
+        ):
+            G.add_edge((x, y), (u, v))
+    G.graph["name"] = f"margulis_gabber_galil_graph({n})"
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -76,24 +129,46 @@ def chordal_cycle_graph(p, create_using=None):
            Birkhäuser Verlag, Basel, 1994.

     """
-    pass
+    G = nx.empty_graph(0, create_using, default=nx.MultiGraph)
+    if G.is_directed() or not G.is_multigraph():
+        msg = "`create_using` must be an undirected multigraph."
+        raise nx.NetworkXError(msg)
+
+    for x in range(p):
+        left = (x - 1) % p
+        right = (x + 1) % p
+        # Here we apply Fermat's Little Theorem to compute the multiplicative
+        # inverse of x in Z/pZ. By Fermat's Little Theorem,
+        #
+        #     x^p = x (mod p)
+        #
+        # Therefore,
+        #
+        #     x * x^(p - 2) = 1 (mod p)
+        #
+        # The number 0 is a special case: we just let its inverse be itself.
+        chord = pow(x, p - 2, p) if x > 0 else 0
+        for y in (left, right, chord):
+            G.add_edge(x, y)
+    G.graph["name"] = f"chordal_cycle_graph({p})"
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
 def paley_graph(p, create_using=None):
-    """Returns the Paley $\\frac{(p-1)}{2}$ -regular graph on $p$ nodes.
+    r"""Returns the Paley $\frac{(p-1)}{2}$ -regular graph on $p$ nodes.

-    The returned graph is a graph on $\\mathbb{Z}/p\\mathbb{Z}$ with edges between $x$ and $y$
-    if and only if $x-y$ is a nonzero square in $\\mathbb{Z}/p\\mathbb{Z}$.
+    The returned graph is a graph on $\mathbb{Z}/p\mathbb{Z}$ with edges between $x$ and $y$
+    if and only if $x-y$ is a nonzero square in $\mathbb{Z}/p\mathbb{Z}$.

-    If $p \\equiv 1  \\pmod 4$, $-1$ is a square in $\\mathbb{Z}/p\\mathbb{Z}$ and therefore $x-y$ is a square if and
+    If $p \equiv 1  \pmod 4$, $-1$ is a square in $\mathbb{Z}/p\mathbb{Z}$ and therefore $x-y$ is a square if and
     only if $y-x$ is also a square, i.e the edges in the Paley graph are symmetric.

-    If $p \\equiv 3 \\pmod 4$, $-1$ is not a square in $\\mathbb{Z}/p\\mathbb{Z}$ and therefore either $x-y$ or $y-x$
-    is a square in $\\mathbb{Z}/p\\mathbb{Z}$ but not both.
+    If $p \equiv 3 \pmod 4$, $-1$ is not a square in $\mathbb{Z}/p\mathbb{Z}$ and therefore either $x-y$ or $y-x$
+    is a square in $\mathbb{Z}/p\mathbb{Z}$ but not both.

     Note that a more general definition of Paley graphs extends this construction
-    to graphs over $q=p^n$ vertices, by using the finite field $F_q$ instead of $\\mathbb{Z}/p\\mathbb{Z}$.
+    to graphs over $q=p^n$ vertices, by using the finite field $F_q$ instead of $\mathbb{Z}/p\mathbb{Z}$.
     This construction requires to compute squares in general finite fields and is
     not what is implemented here (i.e `paley_graph(25)` does not return the true
     Paley graph associated with $5^2$).
@@ -121,14 +196,27 @@ def paley_graph(p, create_using=None):
     Cambridge Studies in Advanced Mathematics, 73.
     Cambridge University Press, Cambridge (2001).
     """
-    pass
+    G = nx.empty_graph(0, create_using, default=nx.DiGraph)
+    if G.is_multigraph():
+        msg = "`create_using` cannot be a multigraph."
+        raise nx.NetworkXError(msg)
+
+    # Compute the squares in Z/pZ.
+    # Make it a set to uniquify (there are exactly (p-1)/2 squares in Z/pZ
+    # when is prime).
+    square_set = {(x**2) % p for x in range(1, p) if (x**2) % p != 0}

+    for x in range(p):
+        for x2 in square_set:
+            G.add_edge(x, (x + x2) % p)
+    G.graph["name"] = f"paley({p})"
+    return G

-@nx.utils.decorators.np_random_state('seed')
+
+@nx.utils.decorators.np_random_state("seed")
 @nx._dispatchable(graphs=None, returns_graph=True)
-def maybe_regular_expander(n, d, *, create_using=None, max_tries=100, seed=None
-    ):
-    """Utility for creating a random regular expander.
+def maybe_regular_expander(n, d, *, create_using=None, max_tries=100, seed=None):
+    r"""Utility for creating a random regular expander.

     Returns a random $d$-regular graph on $n$ nodes which is an expander
     graph with very good probability.
@@ -157,7 +245,7 @@ def maybe_regular_expander(n, d, *, create_using=None, max_tries=100, seed=None

     Joel Friedman proved that in this model the resulting
     graph is an expander with probability
-    $1 - O(n^{-\\tau})$ where $\\tau = \\lceil (\\sqrt{d - 1}) / 2 \\rceil - 1$. [1]_
+    $1 - O(n^{-\tau})$ where $\tau = \lceil (\sqrt{d - 1}) / 2 \rceil - 1$. [1]_

     Examples
     --------
@@ -187,23 +275,75 @@ def maybe_regular_expander(n, d, *, create_using=None, max_tries=100, seed=None
        https://arxiv.org/abs/cs/0405020

     """
-    pass

+    import numpy as np
+
+    if n < 1:
+        raise nx.NetworkXError("n must be a positive integer")
+
+    if not (d >= 2):
+        raise nx.NetworkXError("d must be greater than or equal to 2")
+
+    if not (d % 2 == 0):
+        raise nx.NetworkXError("d must be even")
+
+    if not (n - 1 >= d):
+        raise nx.NetworkXError(
+            f"Need n-1>= d to have room for {d//2} independent cycles with {n} nodes"
+        )
+
+    G = nx.empty_graph(n, create_using)
+
+    if n < 2:
+        return G
+
+    cycles = []
+    edges = set()
+
+    # Create d / 2 cycles
+    for i in range(d // 2):
+        iterations = max_tries
+        # Make sure the cycles are independent to have a regular graph
+        while len(edges) != (i + 1) * n:
+            iterations -= 1
+            # Faster than random.permutation(n) since there are only
+            # (n-1)! distinct cycles against n! permutations of size n
+            cycle = seed.permutation(n - 1).tolist()
+            cycle.append(n - 1)

-@nx.utils.not_implemented_for('directed')
-@nx.utils.not_implemented_for('multigraph')
-@nx._dispatchable(preserve_edge_attrs={'G': {'weight': 1}})
+            new_edges = {
+                (u, v)
+                for u, v in nx.utils.pairwise(cycle, cyclic=True)
+                if (u, v) not in edges and (v, u) not in edges
+            }
+            # If the new cycle has no edges in common with previous cycles
+            # then add it to the list otherwise try again
+            if len(new_edges) == n:
+                cycles.append(cycle)
+                edges.update(new_edges)
+
+            if iterations == 0:
+                raise nx.NetworkXError("Too many iterations in maybe_regular_expander")
+
+    G.add_edges_from(edges)
+
+    return G
+
+
+@nx.utils.not_implemented_for("directed")
+@nx.utils.not_implemented_for("multigraph")
+@nx._dispatchable(preserve_edge_attrs={"G": {"weight": 1}})
 def is_regular_expander(G, *, epsilon=0):
-    """Determines whether the graph G is a regular expander. [1]_
+    r"""Determines whether the graph G is a regular expander. [1]_

     An expander graph is a sparse graph with strong connectivity properties.

     More precisely, this helper checks whether the graph is a
-    regular $(n, d, \\lambda)$-expander with $\\lambda$ close to
+    regular $(n, d, \lambda)$-expander with $\lambda$ close to
     the Alon-Boppana bound and given by
-    $\\lambda = 2 \\sqrt{d - 1} + \\epsilon$. [2]_
+    $\lambda = 2 \sqrt{d - 1} + \epsilon$. [2]_

-    In the case where $\\epsilon = 0$ then if the graph successfully passes the test
+    In the case where $\epsilon = 0$ then if the graph successfully passes the test
     it is a Ramanujan graph. [3]_

     A Ramanujan graph has spectral gap almost as large as possible, which makes them
@@ -217,8 +357,8 @@ def is_regular_expander(G, *, epsilon=0):
     Returns
     -------
     bool
-        Whether the given graph is a regular $(n, d, \\lambda)$-expander
-        where $\\lambda = 2 \\sqrt{d - 1} + \\epsilon$.
+        Whether the given graph is a regular $(n, d, \lambda)$-expander
+        where $\lambda = 2 \sqrt{d - 1} + \epsilon$.

     Examples
     --------
@@ -238,21 +378,41 @@ def is_regular_expander(G, *, epsilon=0):
     .. [3] Ramanujan graphs, https://en.wikipedia.org/wiki/Ramanujan_graph

     """
-    pass
+
+    import numpy as np
+    from scipy.sparse.linalg import eigsh
+
+    if epsilon < 0:
+        raise nx.NetworkXError("epsilon must be non negative")
+
+    if not nx.is_regular(G):
+        return False
+
+    _, d = nx.utils.arbitrary_element(G.degree)
+
+    A = nx.adjacency_matrix(G, dtype=float)
+    lams = eigsh(A, which="LM", k=2, return_eigenvectors=False)
+
+    # lambda2 is the second biggest eigenvalue
+    lambda2 = min(lams)
+
+    # Use bool() to convert numpy scalar to Python Boolean
+    return bool(abs(lambda2) < 2 ** np.sqrt(d - 1) + epsilon)


-@nx.utils.decorators.np_random_state('seed')
+@nx.utils.decorators.np_random_state("seed")
 @nx._dispatchable(graphs=None, returns_graph=True)
-def random_regular_expander_graph(n, d, *, epsilon=0, create_using=None,
-    max_tries=100, seed=None):
-    """Returns a random regular expander graph on $n$ nodes with degree $d$.
+def random_regular_expander_graph(
+    n, d, *, epsilon=0, create_using=None, max_tries=100, seed=None
+):
+    r"""Returns a random regular expander graph on $n$ nodes with degree $d$.

     An expander graph is a sparse graph with strong connectivity properties. [1]_

-    More precisely the returned graph is a $(n, d, \\lambda)$-expander with
-    $\\lambda = 2 \\sqrt{d - 1} + \\epsilon$, close to the Alon-Boppana bound. [2]_
+    More precisely the returned graph is a $(n, d, \lambda)$-expander with
+    $\lambda = 2 \sqrt{d - 1} + \epsilon$, close to the Alon-Boppana bound. [2]_

-    In the case where $\\epsilon = 0$ it returns a Ramanujan graph.
+    In the case where $\epsilon = 0$ it returns a Ramanujan graph.
     A Ramanujan graph has spectral gap almost as large as possible,
     which makes them excellent expanders. [3]_

@@ -282,7 +442,7 @@ def random_regular_expander_graph(n, d, *, epsilon=0, create_using=None,
     Notes
     -----
     This loops over `maybe_regular_expander` and can be slow when
-    $n$ is too big or $\\epsilon$ too small.
+    $n$ is too big or $\epsilon$ too small.

     See Also
     --------
@@ -296,4 +456,20 @@ def random_regular_expander_graph(n, d, *, epsilon=0, create_using=None,
     .. [3] Ramanujan graphs, https://en.wikipedia.org/wiki/Ramanujan_graph

     """
-    pass
+    G = maybe_regular_expander(
+        n, d, create_using=create_using, max_tries=max_tries, seed=seed
+    )
+    iterations = max_tries
+
+    while not is_regular_expander(G, epsilon=epsilon):
+        iterations -= 1
+        G = maybe_regular_expander(
+            n=n, d=d, create_using=create_using, max_tries=max_tries, seed=seed
+        )
+
+        if iterations == 0:
+            raise nx.NetworkXError(
+                "Too many iterations in random_regular_expander_graph"
+            )
+
+    return G
diff --git a/networkx/generators/geometric.py b/networkx/generators/geometric.py
index 5cfd11642..bb7fbd76d 100644
--- a/networkx/generators/geometric.py
+++ b/networkx/generators/geometric.py
@@ -1,18 +1,27 @@
 """Generators for geometric graphs.
 """
+
 import math
 from bisect import bisect_left
 from itertools import accumulate, combinations, product
+
 import networkx as nx
 from networkx.utils import py_random_state
-__all__ = ['geometric_edges', 'geographical_threshold_graph',
-    'navigable_small_world_graph', 'random_geometric_graph',
-    'soft_random_geometric_graph', 'thresholded_random_geometric_graph',
-    'waxman_graph', 'geometric_soft_configuration_graph']

+__all__ = [
+    "geometric_edges",
+    "geographical_threshold_graph",
+    "navigable_small_world_graph",
+    "random_geometric_graph",
+    "soft_random_geometric_graph",
+    "thresholded_random_geometric_graph",
+    "waxman_graph",
+    "geometric_soft_configuration_graph",
+]

-@nx._dispatchable(node_attrs='pos_name')
-def geometric_edges(G, radius, p=2, *, pos_name='pos'):
+
+@nx._dispatchable(node_attrs="pos_name")
+def geometric_edges(G, radius, p=2, *, pos_name="pos"):
     """Returns edge list of node pairs within `radius` of each other.

     Parameters
@@ -64,7 +73,18 @@ def geometric_edges(G, radius, p=2, *, pos_name='pos'):
     >>> nx.geometric_edges(G, radius=9)
     [(0, 1), (0, 2), (1, 2)]
     """
-    pass
+    # Input validation - every node must have a "pos" attribute
+    for n, pos in G.nodes(data=pos_name):
+        if pos is None:
+            raise nx.NetworkXError(
+                f"Node {n} (and all nodes) must have a '{pos_name}' attribute."
+            )
+
+    # NOTE: See _geometric_edges for the actual implementation. The reason this
+    # is split into two functions is to avoid the overhead of input validation
+    # every time the function is called internally in one of the other
+    # geometric generators
+    return _geometric_edges(G, radius, p, pos_name)


 def _geometric_edges(G, radius, p, pos_name):
@@ -72,13 +92,31 @@ def _geometric_edges(G, radius, p, pos_name):
     Implements `geometric_edges` without input validation. See `geometric_edges`
     for complete docstring.
     """
-    pass
+    nodes_pos = G.nodes(data=pos_name)
+    try:
+        import scipy as sp
+    except ImportError:
+        # no scipy KDTree so compute by for-loop
+        radius_p = radius**p
+        edges = [
+            (u, v)
+            for (u, pu), (v, pv) in combinations(nodes_pos, 2)
+            if sum(abs(a - b) ** p for a, b in zip(pu, pv)) <= radius_p
+        ]
+        return edges
+    # scipy KDTree is available
+    nodes, coords = list(zip(*nodes_pos))
+    kdtree = sp.spatial.cKDTree(coords)  # Cannot provide generator.
+    edge_indexes = kdtree.query_pairs(radius, p)
+    edges = [(nodes[u], nodes[v]) for u, v in sorted(edge_indexes)]
+    return edges


 @py_random_state(5)
 @nx._dispatchable(graphs=None, returns_graph=True)
-def random_geometric_graph(n, radius, dim=2, pos=None, p=2, seed=None, *,
-    pos_name='pos'):
+def random_geometric_graph(
+    n, radius, dim=2, pos=None, p=2, seed=None, *, pos_name="pos"
+):
     """Returns a random geometric graph in the unit cube of dimensions `dim`.

     The random geometric graph model places `n` nodes uniformly at
@@ -150,14 +188,30 @@ def random_geometric_graph(n, radius, dim=2, pos=None, p=2, seed=None, *,
            Oxford Studies in Probability, 5, 2003.

     """
-    pass
+    # TODO Is this function just a special case of the geographical
+    # threshold graph?
+    #
+    #     half_radius = {v: radius / 2 for v in n}
+    #     return geographical_threshold_graph(nodes, theta=1, alpha=1,
+    #                                         weight=half_radius)
+    #
+    G = nx.empty_graph(n)
+    # If no positions are provided, choose uniformly random vectors in
+    # Euclidean space of the specified dimension.
+    if pos is None:
+        pos = {v: [seed.random() for i in range(dim)] for v in G}
+    nx.set_node_attributes(G, pos, pos_name)
+
+    G.add_edges_from(_geometric_edges(G, radius, p, pos_name))
+    return G


 @py_random_state(6)
 @nx._dispatchable(graphs=None, returns_graph=True)
-def soft_random_geometric_graph(n, radius, dim=2, pos=None, p=2, p_dist=
-    None, seed=None, *, pos_name='pos'):
-    """Returns a soft random geometric graph in the unit cube.
+def soft_random_geometric_graph(
+    n, radius, dim=2, pos=None, p=2, p_dist=None, seed=None, *, pos_name="pos"
+):
+    r"""Returns a soft random geometric graph in the unit cube.

     The soft random geometric graph [1] model places `n` nodes uniformly at
     random in the unit cube in dimension `dim`. Two nodes of distance, `dist`,
@@ -198,7 +252,7 @@ def soft_random_geometric_graph(n, radius, dim=2, pos=None, p=2, p_dist=
         tools for custom probability distribution definitions [2], and passing
         the .pdf method of scipy.stats distributions can be used here.  If the
         probability function, `p_dist`, is not supplied, the default function
-        is an exponential distribution with rate parameter :math:`\\lambda=1`.
+        is an exponential distribution with rate parameter :math:`\lambda=1`.
     seed : integer, random_state, or None (default)
         Indicator of random number generation state.
         See :ref:`Randomness<randomness>`.
@@ -225,7 +279,7 @@ def soft_random_geometric_graph(n, radius, dim=2, pos=None, p=2, p_dist=

     Create a soft random geometric graph on 100 uniformly distributed nodes
     where nodes are joined by an edge with probability computed from an
-    exponential distribution with rate parameter :math:`\\lambda=1` if their
+    exponential distribution with rate parameter :math:`\lambda=1` if their
     Euclidean distance is at most 0.2.

     Notes
@@ -258,15 +312,46 @@ def soft_random_geometric_graph(n, radius, dim=2, pos=None, p=2, p_dist=
            https://docs.scipy.org/doc/scipy/reference/tutorial/stats.html

     """
-    pass
+    G = nx.empty_graph(n)
+    G.name = f"soft_random_geometric_graph({n}, {radius}, {dim})"
+    # If no positions are provided, choose uniformly random vectors in
+    # Euclidean space of the specified dimension.
+    if pos is None:
+        pos = {v: [seed.random() for i in range(dim)] for v in G}
+    nx.set_node_attributes(G, pos, pos_name)
+
+    # if p_dist function not supplied the default function is an exponential
+    # distribution with rate parameter :math:`\lambda=1`.
+    if p_dist is None:
+
+        def p_dist(dist):
+            return math.exp(-dist)
+
+    def should_join(edge):
+        u, v = edge
+        dist = (sum(abs(a - b) ** p for a, b in zip(pos[u], pos[v]))) ** (1 / p)
+        return seed.random() < p_dist(dist)
+
+    G.add_edges_from(filter(should_join, _geometric_edges(G, radius, p, pos_name)))
+    return G


 @py_random_state(7)
 @nx._dispatchable(graphs=None, returns_graph=True)
-def geographical_threshold_graph(n, theta, dim=2, pos=None, weight=None,
-    metric=None, p_dist=None, seed=None, *, pos_name='pos', weight_name=
-    'weight'):
-    """Returns a geographical threshold graph.
+def geographical_threshold_graph(
+    n,
+    theta,
+    dim=2,
+    pos=None,
+    weight=None,
+    metric=None,
+    p_dist=None,
+    seed=None,
+    *,
+    pos_name="pos",
+    weight_name="weight",
+):
+    r"""Returns a geographical threshold graph.

     The geographical threshold graph model places $n$ nodes uniformly at
     random in a rectangular domain.  Each node $u$ is assigned a weight
@@ -274,10 +359,10 @@ def geographical_threshold_graph(n, theta, dim=2, pos=None, weight=None,

     .. math::

-       (w_u + w_v)p_{dist}(r) \\ge \\theta
+       (w_u + w_v)p_{dist}(r) \ge \theta

     where `r` is the distance between `u` and `v`, `p_dist` is any function of
-    `r`, and :math:`\\theta` as the threshold parameter. `p_dist` is used to
+    `r`, and :math:`\theta` as the threshold parameter. `p_dist` is used to
     give weight to the distance between nodes when deciding whether or not
     they should be connected. The larger `p_dist` is, the more prone nodes
     separated by `r` are to be connected, and vice versa.
@@ -302,10 +387,10 @@ def geographical_threshold_graph(n, theta, dim=2, pos=None, weight=None,
         Specifically, if $d$ is the function and $x$, $y$,
         and $z$ are vectors in the graph, then $d$ must satisfy

-        1. $d(x, y) \\ge 0$,
+        1. $d(x, y) \ge 0$,
         2. $d(x, y) = 0$ if and only if $x = y$,
         3. $d(x, y) = d(y, x)$,
-        4. $d(x, z) \\le d(x, y) + d(y, z)$.
+        4. $d(x, z) \le d(x, y) + d(y, z)$.

         If this argument is not specified, the Euclidean distance metric is
         used.
@@ -361,7 +446,7 @@ def geographical_threshold_graph(n, theta, dim=2, pos=None, weight=None,
     Notes
     -----
     If weights are not specified they are assigned to nodes by drawing randomly
-    from the exponential distribution with rate parameter $\\lambda=1$.
+    from the exponential distribution with rate parameter $\lambda=1$.
     To specify weights from a different distribution, use the `weight` keyword
     argument::

@@ -384,21 +469,61 @@ def geographical_threshold_graph(n, theta, dim=2, pos=None, weight=None,
        in Algorithms and Models for the Web-Graph (WAW 2007),
        Antony Bonato and Fan Chung (Eds), pp. 209--216, 2007
     """
-    pass
+    G = nx.empty_graph(n)
+    # If no weights are provided, choose them from an exponential
+    # distribution.
+    if weight is None:
+        weight = {v: seed.expovariate(1) for v in G}
+    # If no positions are provided, choose uniformly random vectors in
+    # Euclidean space of the specified dimension.
+    if pos is None:
+        pos = {v: [seed.random() for i in range(dim)] for v in G}
+    # If no distance metric is provided, use Euclidean distance.
+    if metric is None:
+        metric = math.dist
+    nx.set_node_attributes(G, weight, weight_name)
+    nx.set_node_attributes(G, pos, pos_name)
+
+    # if p_dist is not supplied, use default r^-2
+    if p_dist is None:
+
+        def p_dist(r):
+            return r**-2
+
+    # Returns ``True`` if and only if the nodes whose attributes are
+    # ``du`` and ``dv`` should be joined, according to the threshold
+    # condition.
+    def should_join(pair):
+        u, v = pair
+        u_pos, v_pos = pos[u], pos[v]
+        u_weight, v_weight = weight[u], weight[v]
+        return (u_weight + v_weight) * p_dist(metric(u_pos, v_pos)) >= theta
+
+    G.add_edges_from(filter(should_join, combinations(G, 2)))
+    return G


 @py_random_state(6)
 @nx._dispatchable(graphs=None, returns_graph=True)
-def waxman_graph(n, beta=0.4, alpha=0.1, L=None, domain=(0, 0, 1, 1),
-    metric=None, seed=None, *, pos_name='pos'):
-    """Returns a Waxman random graph.
+def waxman_graph(
+    n,
+    beta=0.4,
+    alpha=0.1,
+    L=None,
+    domain=(0, 0, 1, 1),
+    metric=None,
+    seed=None,
+    *,
+    pos_name="pos",
+):
+    r"""Returns a Waxman random graph.

     The Waxman random graph model places `n` nodes uniformly at random
     in a rectangular domain. Each pair of nodes at distance `d` is
     joined by an edge with probability

     .. math::
-            p = \\beta \\exp(-d / \\alpha L).
+            p = \beta \exp(-d / \alpha L).

     This function implements both Waxman models, using the `L` keyword
     argument.
@@ -430,10 +555,10 @@ def waxman_graph(n, beta=0.4, alpha=0.1, L=None, domain=(0, 0, 1, 1),
         Specifically, if $d$ is the function and $x$, $y$,
         and $z$ are vectors in the graph, then $d$ must satisfy

-        1. $d(x, y) \\ge 0$,
+        1. $d(x, y) \ge 0$,
         2. $d(x, y) = 0$ if and only if $x = y$,
         3. $d(x, y) = d(y, x)$,
-        4. $d(x, z) \\le d(x, y) + d(y, z)$.
+        4. $d(x, z) \le d(x, y) + d(y, z)$.

         If this argument is not specified, the Euclidean distance metric is
         used.
@@ -478,20 +603,50 @@ def waxman_graph(n, beta=0.4, alpha=0.1, L=None, domain=(0, 0, 1, 1),
     .. [1]  B. M. Waxman, *Routing of multipoint connections*.
        IEEE J. Select. Areas Commun. 6(9),(1988) 1617--1622.
     """
-    pass
+    G = nx.empty_graph(n)
+    (xmin, ymin, xmax, ymax) = domain
+    # Each node gets a uniformly random position in the given rectangle.
+    pos = {v: (seed.uniform(xmin, xmax), seed.uniform(ymin, ymax)) for v in G}
+    nx.set_node_attributes(G, pos, pos_name)
+    # If no distance metric is provided, use Euclidean distance.
+    if metric is None:
+        metric = math.dist
+    # If the maximum distance L is not specified (that is, we are in the
+    # Waxman-1 model), then find the maximum distance between any pair
+    # of nodes.
+    #
+    # In the Waxman-1 model, join nodes randomly based on distance. In
+    # the Waxman-2 model, join randomly based on random l.
+    if L is None:
+        L = max(metric(x, y) for x, y in combinations(pos.values(), 2))
+
+        def dist(u, v):
+            return metric(pos[u], pos[v])
+
+    else:
+
+        def dist(u, v):
+            return seed.random() * L
+
+    # `pair` is the pair of nodes to decide whether to join.
+    def should_join(pair):
+        return seed.random() < beta * math.exp(-dist(*pair) / (alpha * L))
+
+    G.add_edges_from(filter(should_join, combinations(G, 2)))
+    return G


 @py_random_state(5)
 @nx._dispatchable(graphs=None, returns_graph=True)
 def navigable_small_world_graph(n, p=1, q=1, r=2, dim=2, seed=None):
-    """Returns a navigable small-world graph.
+    r"""Returns a navigable small-world graph.

     A navigable small-world graph is a directed grid with additional long-range
     connections that are chosen randomly.

       [...] we begin with a set of nodes [...] that are identified with the set
-      of lattice points in an $n \\times n$ square,
-      $\\{(i, j): i \\in \\{1, 2, \\ldots, n\\}, j \\in \\{1, 2, \\ldots, n\\}\\}$,
+      of lattice points in an $n \times n$ square,
+      $\{(i, j): i \in \{1, 2, \ldots, n\}, j \in \{1, 2, \ldots, n\}\}$,
       and we define the *lattice distance* between two nodes $(i, j)$ and
       $(k, l)$ to be the number of "lattice steps" separating them:
       $d((i, j), (k, l)) = |k - i| + |l - j|$.
@@ -529,14 +684,47 @@ def navigable_small_world_graph(n, p=1, q=1, r=2, dim=2, seed=None):
     .. [1] J. Kleinberg. The small-world phenomenon: An algorithmic
        perspective. Proc. 32nd ACM Symposium on Theory of Computing, 2000.
     """
-    pass
+    if p < 1:
+        raise nx.NetworkXException("p must be >= 1")
+    if q < 0:
+        raise nx.NetworkXException("q must be >= 0")
+    if r < 0:
+        raise nx.NetworkXException("r must be >= 0")
+
+    G = nx.DiGraph()
+    nodes = list(product(range(n), repeat=dim))
+    for p1 in nodes:
+        probs = [0]
+        for p2 in nodes:
+            if p1 == p2:
+                continue
+            d = sum((abs(b - a) for a, b in zip(p1, p2)))
+            if d <= p:
+                G.add_edge(p1, p2)
+            probs.append(d**-r)
+        cdf = list(accumulate(probs))
+        for _ in range(q):
+            target = nodes[bisect_left(cdf, seed.uniform(0, cdf[-1]))]
+            G.add_edge(p1, target)
+    return G


 @py_random_state(7)
 @nx._dispatchable(graphs=None, returns_graph=True)
-def thresholded_random_geometric_graph(n, radius, theta, dim=2, pos=None,
-    weight=None, p=2, seed=None, *, pos_name='pos', weight_name='weight'):
-    """Returns a thresholded random geometric graph in the unit cube.
+def thresholded_random_geometric_graph(
+    n,
+    radius,
+    theta,
+    dim=2,
+    pos=None,
+    weight=None,
+    p=2,
+    seed=None,
+    *,
+    pos_name="pos",
+    weight_name="weight",
+):
+    r"""Returns a thresholded random geometric graph in the unit cube.

     The thresholded random geometric graph [1] model places `n` nodes
     uniformly at random in the unit cube of dimensions `dim`. Each node
@@ -620,7 +808,7 @@ def thresholded_random_geometric_graph(n, radius, theta, dim=2, pos=None,
     (0, 0) and standard deviation 2

     If weights are not specified they are assigned to nodes by drawing randomly
-    from the exponential distribution with rate parameter :math:`\\lambda=1`.
+    from the exponential distribution with rate parameter :math:`\lambda=1`.
     To specify weights from a different distribution, use the `weight` keyword
     argument::

@@ -638,27 +826,48 @@ def thresholded_random_geometric_graph(n, radius, theta, dim=2, pos=None,
     .. [1] http://cole-maclean.github.io/blog/files/thesis.pdf

     """
-    pass
+    G = nx.empty_graph(n)
+    G.name = f"thresholded_random_geometric_graph({n}, {radius}, {theta}, {dim})"
+    # If no weights are provided, choose them from an exponential
+    # distribution.
+    if weight is None:
+        weight = {v: seed.expovariate(1) for v in G}
+    # If no positions are provided, choose uniformly random vectors in
+    # Euclidean space of the specified dimension.
+    if pos is None:
+        pos = {v: [seed.random() for i in range(dim)] for v in G}
+    # If no distance metric is provided, use Euclidean distance.
+    nx.set_node_attributes(G, weight, weight_name)
+    nx.set_node_attributes(G, pos, pos_name)
+
+    edges = (
+        (u, v)
+        for u, v in _geometric_edges(G, radius, p, pos_name)
+        if weight[u] + weight[v] >= theta
+    )
+    G.add_edges_from(edges)
+    return G


 @py_random_state(5)
 @nx._dispatchable(graphs=None, returns_graph=True)
-def geometric_soft_configuration_graph(*, beta, n=None, gamma=None,
-    mean_degree=None, kappas=None, seed=None):
-    """Returns a random graph from the geometric soft configuration model.
+def geometric_soft_configuration_graph(
+    *, beta, n=None, gamma=None, mean_degree=None, kappas=None, seed=None
+):
+    r"""Returns a random graph from the geometric soft configuration model.

-    The $\\mathbb{S}^1$ model [1]_ is the geometric soft configuration model
+    The $\mathbb{S}^1$ model [1]_ is the geometric soft configuration model
     which is able to explain many fundamental features of real networks such as
     small-world property, heteregenous degree distributions, high level of
     clustering, and self-similarity.

     In the geometric soft configuration model, a node $i$ is assigned two hidden
-    variables: a hidden degree $\\kappa_i$, quantifying its popularity, influence,
-    or importance, and an angular position $\\theta_i$ in a circle abstracting the
+    variables: a hidden degree $\kappa_i$, quantifying its popularity, influence,
+    or importance, and an angular position $\theta_i$ in a circle abstracting the
     similarity space, where angular distances between nodes are a proxy for their
     similarity. Focusing on the angular position, this model is often called
-    the $\\mathbb{S}^1$ model (a one-dimensional sphere). The circle's radius is
-    adjusted to $R = N/2\\pi$, where $N$ is the number of nodes, so that the density
+    the $\mathbb{S}^1$ model (a one-dimensional sphere). The circle's radius is
+    adjusted to $R = N/2\pi$, where $N$ is the number of nodes, so that the density
     is set to 1 without loss of generality.

     The connection probability between any pair of nodes increases with
@@ -666,40 +875,40 @@ def geometric_soft_configuration_graph(*, beta, n=None, gamma=None,
     and decreases with the angular distance between the two nodes.
     Specifically, nodes $i$ and $j$ are connected with the probability

-    $p_{ij} = \\frac{1}{1 + \\frac{d_{ij}^\\beta}{\\left(\\mu \\kappa_i \\kappa_j\\right)^{\\max(1, \\beta)}}}$
+    $p_{ij} = \frac{1}{1 + \frac{d_{ij}^\beta}{\left(\mu \kappa_i \kappa_j\right)^{\max(1, \beta)}}}$

-    where $d_{ij} = R\\Delta\\theta_{ij}$ is the arc length of the circle between
-    nodes $i$ and $j$ separated by an angular distance $\\Delta\\theta_{ij}$.
-    Parameters $\\mu$ and $\\beta$ (also called inverse temperature) control the
+    where $d_{ij} = R\Delta\theta_{ij}$ is the arc length of the circle between
+    nodes $i$ and $j$ separated by an angular distance $\Delta\theta_{ij}$.
+    Parameters $\mu$ and $\beta$ (also called inverse temperature) control the
     average degree and the clustering coefficient, respectively.

     It can be shown [2]_ that the model undergoes a structural phase transition
-    at $\\beta=1$ so that for $\\beta<1$ networks are unclustered in the thermodynamic
-    limit (when $N\\to \\infty$) whereas for $\\beta>1$ the ensemble generates
+    at $\beta=1$ so that for $\beta<1$ networks are unclustered in the thermodynamic
+    limit (when $N\to \infty$) whereas for $\beta>1$ the ensemble generates
     networks with finite clustering coefficient.

-    The $\\mathbb{S}^1$ model can be expressed as a purely geometric model
-    $\\mathbb{H}^2$ in the hyperbolic plane [3]_ by mapping the hidden degree of
+    The $\mathbb{S}^1$ model can be expressed as a purely geometric model
+    $\mathbb{H}^2$ in the hyperbolic plane [3]_ by mapping the hidden degree of
     each node into a radial coordinate as

-    $r_i = \\hat{R} - \\frac{2 \\max(1, \\beta)}{\\beta \\zeta} \\ln \\left(\\frac{\\kappa_i}{\\kappa_0}\\right)$
+    $r_i = \hat{R} - \frac{2 \max(1, \beta)}{\beta \zeta} \ln \left(\frac{\kappa_i}{\kappa_0}\right)$

-    where $\\hat{R}$ is the radius of the hyperbolic disk and $\\zeta$ is the curvature,
+    where $\hat{R}$ is the radius of the hyperbolic disk and $\zeta$ is the curvature,

-    $\\hat{R} = \\frac{2}{\\zeta} \\ln \\left(\\frac{N}{\\pi}\\right)
-    - \\frac{2\\max(1, \\beta)}{\\beta \\zeta} \\ln (\\mu \\kappa_0^2)$
+    $\hat{R} = \frac{2}{\zeta} \ln \left(\frac{N}{\pi}\right)
+    - \frac{2\max(1, \beta)}{\beta \zeta} \ln (\mu \kappa_0^2)$

     The connection probability then reads

-    $p_{ij} = \\frac{1}{1 + \\exp\\left({\\frac{\\beta\\zeta}{2} (x_{ij} - \\hat{R})}\\right)}$
+    $p_{ij} = \frac{1}{1 + \exp\left({\frac{\beta\zeta}{2} (x_{ij} - \hat{R})}\right)}$

     where

-    $x_{ij} = r_i + r_j + \\frac{2}{\\zeta} \\ln \\frac{\\Delta\\theta_{ij}}{2}$
+    $x_{ij} = r_i + r_j + \frac{2}{\zeta} \ln \frac{\Delta\theta_{ij}}{2}$

     is a good approximation of the hyperbolic distance between two nodes separated
-    by an angular distance $\\Delta\\theta_{ij}$ with radial coordinates $r_i$ and $r_j$.
-    For $\\beta > 1$, the curvature $\\zeta = 1$, for $\\beta < 1$, $\\zeta = \\beta^{-1}$.
+    by an angular distance $\Delta\theta_{ij}$ with radial coordinates $r_i$ and $r_j$.
+    For $\beta > 1$, the curvature $\zeta = 1$, for $\beta < 1$, $\zeta = \beta^{-1}$.


     Parameters
@@ -735,7 +944,7 @@ def geometric_soft_configuration_graph(*, beta, n=None, gamma=None,

         - ``kappa`` that represents the hidden degree.

-        - ``theta`` the position in the similarity space ($\\mathbb{S}^1$) which is
+        - ``theta`` the position in the similarity space ($\mathbb{S}^1$) which is
           also the angular position in the hyperbolic plane.

         - ``radius`` the radial position in the hyperbolic plane
@@ -748,7 +957,7 @@ def geometric_soft_configuration_graph(*, beta, n=None, gamma=None,

     >>> G = nx.geometric_soft_configuration_graph(beta=1.5, n=100, gamma=2.7, mean_degree=5)

-    Create a geometric soft configuration graph with 100 nodes. The $\\beta$ parameter
+    Create a geometric soft configuration graph with 100 nodes. The $\beta$ parameter
     is set to 1.5 and the exponent of the powerlaw distribution of the hidden
     degrees is 2.7 with mean value of 5.

@@ -757,8 +966,8 @@ def geometric_soft_configuration_graph(*, beta, n=None, gamma=None,
     >>> kappas = {i: 10 for i in range(100)}
     >>> G = nx.geometric_soft_configuration_graph(beta=2.5, kappas=kappas)

-    Create a geometric soft configuration graph with 100 nodes. The $\\beta$ parameter
-    is set to 2.5 and all nodes with hidden degree $\\kappa=10$.
+    Create a geometric soft configuration graph with 100 nodes. The $\beta$ parameter
+    is set to 2.5 and all nodes with hidden degree $\kappa=10$.


     References
@@ -773,4 +982,66 @@ def geometric_soft_configuration_graph(*, beta, n=None, gamma=None,
        Hyperbolic geometry of complex networks. Physical Review E, 82(3), 036106.

     """
-    pass
+    if beta <= 0:
+        raise nx.NetworkXError("The parameter beta cannot be smaller or equal to 0.")
+
+    if kappas is not None:
+        if not all((n is None, gamma is None, mean_degree is None)):
+            raise nx.NetworkXError(
+                "When kappas is input, n, gamma and mean_degree must not be."
+            )
+
+        n = len(kappas)
+        mean_degree = sum(kappas) / len(kappas)
+    else:
+        if any((n is None, gamma is None, mean_degree is None)):
+            raise nx.NetworkXError(
+                "Please provide either kappas, or all 3 of: n, gamma and mean_degree."
+            )
+
+        # Generate `n` hidden degrees from a powerlaw distribution
+        # with given exponent `gamma` and mean value `mean_degree`
+        gam_ratio = (gamma - 2) / (gamma - 1)
+        kappa_0 = mean_degree * gam_ratio * (1 - 1 / n) / (1 - 1 / n**gam_ratio)
+        base = 1 - 1 / n
+        power = 1 / (1 - gamma)
+        kappas = {i: kappa_0 * (1 - seed.random() * base) ** power for i in range(n)}
+
+    G = nx.Graph()
+    R = n / (2 * math.pi)
+
+    # Approximate values for mu in the thermodynamic limit (when n -> infinity)
+    if beta > 1:
+        mu = beta * math.sin(math.pi / beta) / (2 * math.pi * mean_degree)
+    elif beta == 1:
+        mu = 1 / (2 * mean_degree * math.log(n))
+    else:
+        mu = (1 - beta) / (2**beta * mean_degree * n ** (1 - beta))
+
+    # Generate random positions on a circle
+    thetas = {k: seed.uniform(0, 2 * math.pi) for k in kappas}
+
+    for u in kappas:
+        for v in list(G):
+            angle = math.pi - math.fabs(math.pi - math.fabs(thetas[u] - thetas[v]))
+            dij = math.pow(R * angle, beta)
+            mu_kappas = math.pow(mu * kappas[u] * kappas[v], max(1, beta))
+            p_ij = 1 / (1 + dij / mu_kappas)
+
+            # Create an edge with a certain connection probability
+            if seed.random() < p_ij:
+                G.add_edge(u, v)
+        G.add_node(u)
+
+    nx.set_node_attributes(G, thetas, "theta")
+    nx.set_node_attributes(G, kappas, "kappa")
+
+    # Map hidden degrees into the radial coordiantes
+    zeta = 1 if beta > 1 else 1 / beta
+    kappa_min = min(kappas.values())
+    R_c = 2 * max(1, beta) / (beta * zeta)
+    R_hat = (2 / zeta) * math.log(n / math.pi) - R_c * math.log(mu * kappa_min)
+    radii = {node: R_hat - R_c * math.log(kappa) for node, kappa in kappas.items()}
+    nx.set_node_attributes(G, radii, "radius")
+
+    return G
diff --git a/networkx/generators/harary_graph.py b/networkx/generators/harary_graph.py
index f7a527567..591587d3a 100644
--- a/networkx/generators/harary_graph.py
+++ b/networkx/generators/harary_graph.py
@@ -14,9 +14,11 @@ References
        Proc. Nat. Acad. Sci. USA 48, 1142-1146, 1962.

 """
+
 import networkx as nx
 from networkx.exception import NetworkXError
-__all__ = ['hnm_harary_graph', 'hkn_harary_graph']
+
+__all__ = ["hnm_harary_graph", "hkn_harary_graph"]


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -63,7 +65,52 @@ def hnm_harary_graph(n, m, create_using=None):
     .. [2] Harary, F. "The Maximum Connectivity of a Graph."
        Proc. Nat. Acad. Sci. USA 48, 1142-1146, 1962.
     """
-    pass
+
+    if n < 1:
+        raise NetworkXError("The number of nodes must be >= 1!")
+    if m < n - 1:
+        raise NetworkXError("The number of edges must be >= n - 1 !")
+    if m > n * (n - 1) // 2:
+        raise NetworkXError("The number of edges must be <= n(n-1)/2")
+
+    # Construct an empty graph with n nodes first
+    H = nx.empty_graph(n, create_using)
+    # Get the floor of average node degree
+    d = 2 * m // n
+
+    # Test the parity of n and d
+    if (n % 2 == 0) or (d % 2 == 0):
+        # Start with a regular graph of d degrees
+        offset = d // 2
+        for i in range(n):
+            for j in range(1, offset + 1):
+                H.add_edge(i, (i - j) % n)
+                H.add_edge(i, (i + j) % n)
+        if d & 1:
+            # in case d is odd; n must be even in this case
+            half = n // 2
+            for i in range(half):
+                # add edges diagonally
+                H.add_edge(i, i + half)
+        # Get the remainder of 2*m modulo n
+        r = 2 * m % n
+        if r > 0:
+            # add remaining edges at offset+1
+            for i in range(r // 2):
+                H.add_edge(i, i + offset + 1)
+    else:
+        # Start with a regular graph of (d - 1) degrees
+        offset = (d - 1) // 2
+        for i in range(n):
+            for j in range(1, offset + 1):
+                H.add_edge(i, (i - j) % n)
+                H.add_edge(i, (i + j) % n)
+        half = n // 2
+        for i in range(m - n * offset):
+            # add the remaining m - n*offset edges between i and i+half
+            H.add_edge(i, (i + half) % n)
+
+    return H


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -109,4 +156,44 @@ def hkn_harary_graph(k, n, create_using=None):
     .. [2] Harary, F. "The Maximum Connectivity of a Graph."
       Proc. Nat. Acad. Sci. USA 48, 1142-1146, 1962.
     """
-    pass
+
+    if k < 1:
+        raise NetworkXError("The node connectivity must be >= 1!")
+    if n < k + 1:
+        raise NetworkXError("The number of nodes must be >= k+1 !")
+
+    # in case of connectivity 1, simply return the path graph
+    if k == 1:
+        H = nx.path_graph(n, create_using)
+        return H
+
+    # Construct an empty graph with n nodes first
+    H = nx.empty_graph(n, create_using)
+
+    # Test the parity of k and n
+    if (k % 2 == 0) or (n % 2 == 0):
+        # Construct a regular graph with k degrees
+        offset = k // 2
+        for i in range(n):
+            for j in range(1, offset + 1):
+                H.add_edge(i, (i - j) % n)
+                H.add_edge(i, (i + j) % n)
+        if k & 1:
+            # odd degree; n must be even in this case
+            half = n // 2
+            for i in range(half):
+                # add edges diagonally
+                H.add_edge(i, i + half)
+    else:
+        # Construct a regular graph with (k - 1) degrees
+        offset = (k - 1) // 2
+        for i in range(n):
+            for j in range(1, offset + 1):
+                H.add_edge(i, (i - j) % n)
+                H.add_edge(i, (i + j) % n)
+        half = n // 2
+        for i in range(half + 1):
+            # add half+1 edges between i and i+half
+            H.add_edge(i, (i + half) % n)
+
+    return H
diff --git a/networkx/generators/internet_as_graphs.py b/networkx/generators/internet_as_graphs.py
index 00d4a0144..449d54376 100644
--- a/networkx/generators/internet_as_graphs.py
+++ b/networkx/generators/internet_as_graphs.py
@@ -1,7 +1,9 @@
 """Generates graphs resembling the Internet Autonomous System network"""
+
 import networkx as nx
 from networkx.utils import py_random_state
-__all__ = ['random_internet_as_graph']
+
+__all__ = ["random_internet_as_graph"]


 def uniform_int_from_avg(a, m, seed):
@@ -17,7 +19,18 @@ def uniform_int_from_avg(a, m, seed):
     X = X1 + X2; X1~U(a,floor(b)), X2~B(p)
     E[X] = E[X1] + E[X2] = (floor(b)+a)/2 + (b-floor(b))/2 = (b+a)/2 = m
     """
-    pass
+
+    from math import floor
+
+    assert m >= a
+    b = 2 * m - a
+    p = (b - floor(b)) / 2
+    X1 = round(seed.random() * (floor(b) - a) + a)
+    if seed.random() < p:
+        X2 = 1
+    else:
+        X2 = 0
+    return X1 + X2


 def choose_pref_attach(degs, seed):
@@ -38,7 +51,21 @@ def choose_pref_attach(degs, seed):
     v: object
         A key of degs or None if degs is empty
     """
-    pass
+
+    if len(degs) == 0:
+        return None
+    s = sum(degs.values())
+    if s == 0:
+        return seed.choice(list(degs.keys()))
+    v = seed.random() * s
+
+    nodes = list(degs.keys())
+    i = 0
+    acc = degs[nodes[i]]
+    while v > acc:
+        i += 1
+        acc += degs[nodes[i]]
+    return nodes[i]


 class AS_graph_generator:
@@ -65,20 +92,24 @@ class AS_graph_generator:
         BGP: The Role of Topology Growth," in IEEE Journal on Selected Areas
         in Communications, vol. 28, no. 8, pp. 1250-1261, October 2010.
         """
+
         self.seed = seed
-        self.n_t = min(n, round(self.seed.random() * 2 + 4))
-        self.n_m = round(0.15 * n)
-        self.n_cp = round(0.05 * n)
-        self.n_c = max(0, n - self.n_t - self.n_m - self.n_cp)
-        self.d_m = 2 + 2.5 * n / 10000
-        self.d_cp = 2 + 1.5 * n / 10000
-        self.d_c = 1 + 5 * n / 100000
-        self.p_m_m = 1 + 2 * n / 10000
-        self.p_cp_m = 0.2 + 2 * n / 10000
-        self.p_cp_cp = 0.05 + 2 * n / 100000
-        self.t_m = 0.375
-        self.t_cp = 0.375
-        self.t_c = 0.125
+        self.n_t = min(n, round(self.seed.random() * 2 + 4))  # num of T nodes
+        self.n_m = round(0.15 * n)  # number of M nodes
+        self.n_cp = round(0.05 * n)  # number of CP nodes
+        self.n_c = max(0, n - self.n_t - self.n_m - self.n_cp)  # number of C nodes
+
+        self.d_m = 2 + (2.5 * n) / 10000  # average multihoming degree for M nodes
+        self.d_cp = 2 + (1.5 * n) / 10000  # avg multihoming degree for CP nodes
+        self.d_c = 1 + (5 * n) / 100000  # average multihoming degree for C nodes
+
+        self.p_m_m = 1 + (2 * n) / 10000  # avg num of peer edges between M and M
+        self.p_cp_m = 0.2 + (2 * n) / 10000  # avg num of peer edges between CP, M
+        self.p_cp_cp = 0.05 + (2 * n) / 100000  # avg num of peer edges btwn CP, CP
+
+        self.t_m = 0.375  # probability M's provider is T
+        self.t_cp = 0.375  # probability CP's provider is T
+        self.t_c = 0.125  # probability C's provider is T

     def t_graph(self):
         """Generates the core mesh network of tier one nodes of a AS graph.
@@ -88,7 +119,25 @@ class AS_graph_generator:
         G: Networkx Graph
             Core network
         """
-        pass
+
+        self.G = nx.Graph()
+        for i in range(self.n_t):
+            self.G.add_node(i, type="T")
+            for r in self.regions:
+                self.regions[r].add(i)
+            for j in self.G.nodes():
+                if i != j:
+                    self.add_edge(i, j, "peer")
+            self.customers[i] = set()
+            self.providers[i] = set()
+        return self.G
+
+    def add_edge(self, i, j, kind):
+        if kind == "transit":
+            customer = str(i)
+        else:
+            customer = "none"
+        self.G.add_edge(i, j, type=kind, customer=customer)

     def choose_peer_pref_attach(self, node_list):
         """Pick a node with a probability weighted by its peer degree.
@@ -96,7 +145,11 @@ class AS_graph_generator:
         Pick a node from node_list with preferential attachment
         computed only on their peer degree
         """
-        pass
+
+        d = {}
+        for n in node_list:
+            d[n] = self.G.nodes[n]["peers"]
+        return choose_pref_attach(d, self.seed)

     def choose_node_pref_attach(self, node_list):
         """Pick a node with a probability weighted by its degree.
@@ -104,11 +157,18 @@ class AS_graph_generator:
         Pick a node from node_list with preferential attachment
         computed on their degree
         """
-        pass
+
+        degs = dict(self.G.degree(node_list))
+        return choose_pref_attach(degs, self.seed)

     def add_customer(self, i, j):
         """Keep the dictionaries 'customers' and 'providers' consistent."""
-        pass
+
+        self.customers[j].add(i)
+        self.providers[i].add(j)
+        for z in self.providers[j]:
+            self.customers[z].add(i)
+            self.providers[i].add(z)

     def add_node(self, i, kind, reg2prob, avg_deg, t_edge_prob):
         """Add a node and its customer transit edges to the graph.
@@ -133,7 +193,41 @@ class AS_graph_generator:
         i: object
             Identifier of the new node
         """
-        pass
+
+        regs = 1  # regions in which node resides
+        if self.seed.random() < reg2prob:  # node is in two regions
+            regs = 2
+        node_options = set()
+
+        self.G.add_node(i, type=kind, peers=0)
+        self.customers[i] = set()
+        self.providers[i] = set()
+        self.nodes[kind].add(i)
+        for r in self.seed.sample(list(self.regions), regs):
+            node_options = node_options.union(self.regions[r])
+            self.regions[r].add(i)
+
+        edge_num = uniform_int_from_avg(1, avg_deg, self.seed)
+
+        t_options = node_options.intersection(self.nodes["T"])
+        m_options = node_options.intersection(self.nodes["M"])
+        if i in m_options:
+            m_options.remove(i)
+        d = 0
+        while d < edge_num and (len(t_options) > 0 or len(m_options) > 0):
+            if len(m_options) == 0 or (
+                len(t_options) > 0 and self.seed.random() < t_edge_prob
+            ):  # add edge to a T node
+                j = self.choose_node_pref_attach(t_options)
+                t_options.remove(j)
+            else:
+                j = self.choose_node_pref_attach(m_options)
+                m_options.remove(j)
+            self.add_edge(i, j, "transit")
+            self.add_customer(i, j)
+            d += 1
+
+        return i

     def add_m_peering_link(self, m, to_kind):
         """Add a peering link between two middle tier (M) nodes.
@@ -152,7 +246,28 @@ class AS_graph_generator:
         -------
         success: boolean
         """
-        pass
+
+        # candidates are of type 'M' and are not customers of m
+        node_options = self.nodes["M"].difference(self.customers[m])
+        # candidates are not providers of m
+        node_options = node_options.difference(self.providers[m])
+        # remove self
+        if m in node_options:
+            node_options.remove(m)
+
+        # remove candidates we are already connected to
+        for j in self.G.neighbors(m):
+            if j in node_options:
+                node_options.remove(j)
+
+        if len(node_options) > 0:
+            j = self.choose_peer_pref_attach(node_options)
+            self.add_edge(m, j, "peer")
+            self.G.nodes[m]["peers"] += 1
+            self.G.nodes[j]["peers"] += 1
+            return True
+        else:
+            return False

     def add_cp_peering_link(self, cp, to_kind):
         """Add a peering link to a content provider (CP) node.
@@ -171,7 +286,35 @@ class AS_graph_generator:
         -------
         success: boolean
         """
-        pass
+
+        node_options = set()
+        for r in self.regions:  # options include nodes in the same region(s)
+            if cp in self.regions[r]:
+                node_options = node_options.union(self.regions[r])
+
+        # options are restricted to the indicated kind ('M' or 'CP')
+        node_options = self.nodes[to_kind].intersection(node_options)
+
+        # remove self
+        if cp in node_options:
+            node_options.remove(cp)
+
+        # remove nodes that are cp's providers
+        node_options = node_options.difference(self.providers[cp])
+
+        # remove nodes we are already connected to
+        for j in self.G.neighbors(cp):
+            if j in node_options:
+                node_options.remove(j)
+
+        if len(node_options) > 0:
+            j = self.seed.sample(list(node_options), 1)[0]
+            self.add_edge(cp, j, "peer")
+            self.G.nodes[cp]["peers"] += 1
+            self.G.nodes[j]["peers"] += 1
+            return True
+        else:
+            return False

     def graph_regions(self, rn):
         """Initializes AS network regions.
@@ -181,11 +324,28 @@ class AS_graph_generator:
         rn: integer
             Number of regions
         """
-        pass
+
+        self.regions = {}
+        for i in range(rn):
+            self.regions["REG" + str(i)] = set()

     def add_peering_links(self, from_kind, to_kind):
         """Utility function to add peering links among node groups."""
-        pass
+        peer_link_method = None
+        if from_kind == "M":
+            peer_link_method = self.add_m_peering_link
+            m = self.p_m_m
+        if from_kind == "CP":
+            peer_link_method = self.add_cp_peering_link
+            if to_kind == "M":
+                m = self.p_cp_m
+            else:
+                m = self.p_cp_cp
+
+        for i in self.nodes[from_kind]:
+            num = uniform_int_from_avg(0, m, self.seed)
+            for _ in range(num):
+                peer_link_method(i, to_kind)

     def generate(self):
         """Generates a random AS network graph as described in [1].
@@ -209,7 +369,31 @@ class AS_graph_generator:
         BGP: The Role of Topology Growth," in IEEE Journal on Selected Areas
         in Communications, vol. 28, no. 8, pp. 1250-1261, October 2010.
         """
-        pass
+
+        self.graph_regions(5)
+        self.customers = {}
+        self.providers = {}
+        self.nodes = {"T": set(), "M": set(), "CP": set(), "C": set()}
+
+        self.t_graph()
+        self.nodes["T"] = set(self.G.nodes())
+
+        i = len(self.nodes["T"])
+        for _ in range(self.n_m):
+            self.nodes["M"].add(self.add_node(i, "M", 0.2, self.d_m, self.t_m))
+            i += 1
+        for _ in range(self.n_cp):
+            self.nodes["CP"].add(self.add_node(i, "CP", 0.05, self.d_cp, self.t_cp))
+            i += 1
+        for _ in range(self.n_c):
+            self.nodes["C"].add(self.add_node(i, "C", 0, self.d_c, self.t_c))
+            i += 1
+
+        self.add_peering_links("M", "M")
+        self.add_peering_links("CP", "M")
+        self.add_peering_links("CP", "CP")
+
+        return self.G


 @py_random_state(1)
@@ -251,4 +435,7 @@ def random_internet_as_graph(n, seed=None):
        BGP: The Role of Topology Growth," in IEEE Journal on Selected Areas
        in Communications, vol. 28, no. 8, pp. 1250-1261, October 2010.
     """
-    pass
+
+    GG = AS_graph_generator(n, seed)
+    G = GG.generate()
+    return G
diff --git a/networkx/generators/intersection.py b/networkx/generators/intersection.py
index 9cab57581..2ed3a5fa3 100644
--- a/networkx/generators/intersection.py
+++ b/networkx/generators/intersection.py
@@ -3,8 +3,12 @@ Generators for random intersection graphs.
 """
 import networkx as nx
 from networkx.utils import py_random_state
-__all__ = ['uniform_random_intersection_graph',
-    'k_random_intersection_graph', 'general_random_intersection_graph']
+
+__all__ = [
+    "uniform_random_intersection_graph",
+    "k_random_intersection_graph",
+    "general_random_intersection_graph",
+]


 @py_random_state(3)
@@ -37,7 +41,10 @@ def uniform_random_intersection_graph(n, m, p, seed=None):
        An equivalence theorem relating the evolution of the g(n, m, p)
        and g(n, p) models. Random Struct. Algorithms 16, 2 (2000), 156–176.
     """
-    pass
+    from networkx.algorithms import bipartite
+
+    G = bipartite.random_graph(n, m, p, seed)
+    return nx.projected_graph(G, range(n))


 @py_random_state(3)
@@ -68,7 +75,12 @@ def k_random_intersection_graph(n, m, k, seed=None):
        Two models of random intersection graphs and their applications.
        Electronic Notes in Discrete Mathematics 10 (2001), 129--132.
     """
-    pass
+    G = nx.empty_graph(n + m)
+    mset = range(n, n + m)
+    for v in range(n):
+        targets = seed.sample(mset, k)
+        G.add_edges_from(zip([v] * len(targets), targets))
+    return nx.projected_graph(G, range(n))


 @py_random_state(3)
@@ -101,4 +113,12 @@ def general_random_intersection_graph(n, m, p, seed=None):
        J. Karhum¨aki, A. Lepist¨o, and D. Sannella, Eds., vol. 3142
        of Lecture Notes in Computer Science, Springer, pp. 1029–1040.
     """
-    pass
+    if len(p) != m:
+        raise ValueError("Probability list p must have m elements.")
+    G = nx.empty_graph(n + m)
+    mset = range(n, n + m)
+    for u in range(n):
+        for v, q in zip(mset, p):
+            if seed.random() < q:
+                G.add_edge(u, v)
+    return nx.projected_graph(G, range(n))
diff --git a/networkx/generators/interval_graph.py b/networkx/generators/interval_graph.py
index 19bfd1716..2a3d76090 100644
--- a/networkx/generators/interval_graph.py
+++ b/networkx/generators/interval_graph.py
@@ -2,8 +2,10 @@
 Generators for interval graph.
 """
 from collections.abc import Sequence
+
 import networkx as nx
-__all__ = ['interval_graph']
+
+__all__ = ["interval_graph"]


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -43,4 +45,25 @@ def interval_graph(intervals):
         if `intervals` contains an interval such that min1 > max1
         where min1,max1 = interval
     """
-    pass
+    intervals = list(intervals)
+    for interval in intervals:
+        if not (isinstance(interval, Sequence) and len(interval) == 2):
+            raise TypeError(
+                "Each interval must have length 2, and be a "
+                "collections.abc.Sequence such as tuple or list."
+            )
+        if interval[0] > interval[1]:
+            raise ValueError(f"Interval must have lower value first. Got {interval}")
+
+    graph = nx.Graph()
+
+    tupled_intervals = [tuple(interval) for interval in intervals]
+    graph.add_nodes_from(tupled_intervals)
+
+    while tupled_intervals:
+        min1, max1 = interval1 = tupled_intervals.pop()
+        for interval2 in tupled_intervals:
+            min2, max2 = interval2
+            if max1 >= min2 and max2 >= min1:
+                graph.add_edge(interval1, interval2)
+    return graph
diff --git a/networkx/generators/joint_degree_seq.py b/networkx/generators/joint_degree_seq.py
index 28dd40509..c426df944 100644
--- a/networkx/generators/joint_degree_seq.py
+++ b/networkx/generators/joint_degree_seq.py
@@ -1,8 +1,14 @@
 """Generate graphs with a given joint degree and directed joint degree"""
+
 import networkx as nx
 from networkx.utils import py_random_state
-__all__ = ['is_valid_joint_degree', 'is_valid_directed_joint_degree',
-    'joint_degree_graph', 'directed_joint_degree_graph']
+
+__all__ = [
+    "is_valid_joint_degree",
+    "is_valid_directed_joint_degree",
+    "joint_degree_graph",
+    "directed_joint_degree_graph",
+]


 @nx._dispatchable(graphs=None)
@@ -45,7 +51,31 @@ def is_valid_joint_degree(joint_degrees):
        prescribed joint degree distribution", Journal of Experimental
        Algorithmics, 2012.
     """
-    pass
+
+    degree_count = {}
+    for k in joint_degrees:
+        if k > 0:
+            k_size = sum(joint_degrees[k].values()) / k
+            if not k_size.is_integer():
+                return False
+            degree_count[k] = k_size
+
+    for k in joint_degrees:
+        for l in joint_degrees[k]:
+            if not float(joint_degrees[k][l]).is_integer():
+                return False
+
+            if (k != l) and (joint_degrees[k][l] > degree_count[k] * degree_count[l]):
+                return False
+            elif k == l:
+                if joint_degrees[k][k] > degree_count[k] * (degree_count[k] - 1):
+                    return False
+                if joint_degrees[k][k] % 2 != 0:
+                    return False
+
+    # if all above conditions have been satisfied then the input
+    # joint degree is realizable as a simple graph.
+    return True


 def _neighbor_switch(G, w, unsat, h_node_residual, avoid_node_id=None):
@@ -77,7 +107,38 @@ def _neighbor_switch(G, w, unsat, h_node_residual, avoid_node_id=None):
     .. [1] M. Gjoka, B. Tillman, A. Markopoulou, "Construction of Simple
        Graphs with a Target Joint Degree Matrix and Beyond", IEEE Infocom, '15
     """
-    pass
+
+    if (avoid_node_id is None) or (h_node_residual[avoid_node_id] > 1):
+        # select unsaturated node w_prime that has the same degree as w
+        w_prime = next(iter(unsat))
+    else:
+        # assume that the node pair (v,w) has been selected for connection. if
+        # - neighbor_switch is called for node w,
+        # - nodes v and w have the same degree,
+        # - node v=avoid_node_id has only one stub left,
+        # then prevent v=avoid_node_id from being selected as w_prime.
+
+        iter_var = iter(unsat)
+        while True:
+            w_prime = next(iter_var)
+            if w_prime != avoid_node_id:
+                break
+
+    # select switch_node, a neighbor of w, that is not connected to w_prime
+    w_prime_neighbs = G[w_prime]  # slightly faster declaring this variable
+    for v in G[w]:
+        if (v not in w_prime_neighbs) and (v != w_prime):
+            switch_node = v
+            break
+
+    # remove edge (w,switch_node), add edge (w_prime,switch_node) and update
+    # data structures
+    G.remove_edge(w, switch_node)
+    G.add_edge(w_prime, switch_node)
+    h_node_residual[w] += 1
+    h_node_residual[w_prime] -= 1
+    if h_node_residual[w_prime] == 0:
+        unsat.remove(w_prime)


 @py_random_state(1)
@@ -138,7 +199,90 @@ def joint_degree_graph(joint_degrees, seed=None):
     >>> G = nx.joint_degree_graph(joint_degrees)
     >>>
     """
-    pass
+
+    if not is_valid_joint_degree(joint_degrees):
+        msg = "Input joint degree dict not realizable as a simple graph"
+        raise nx.NetworkXError(msg)
+
+    # compute degree count from joint_degrees
+    degree_count = {k: sum(l.values()) // k for k, l in joint_degrees.items() if k > 0}
+
+    # start with empty N-node graph
+    N = sum(degree_count.values())
+    G = nx.empty_graph(N)
+
+    # for a given degree group, keep the list of all node ids
+    h_degree_nodelist = {}
+
+    # for a given node, keep track of the remaining stubs
+    h_node_residual = {}
+
+    # populate h_degree_nodelist and h_node_residual
+    nodeid = 0
+    for degree, num_nodes in degree_count.items():
+        h_degree_nodelist[degree] = range(nodeid, nodeid + num_nodes)
+        for v in h_degree_nodelist[degree]:
+            h_node_residual[v] = degree
+        nodeid += int(num_nodes)
+
+    # iterate over every degree pair (k,l) and add the number of edges given
+    # for each pair
+    for k in joint_degrees:
+        for l in joint_degrees[k]:
+            # n_edges_add is the number of edges to add for the
+            # degree pair (k,l)
+            n_edges_add = joint_degrees[k][l]
+
+            if (n_edges_add > 0) and (k >= l):
+                # number of nodes with degree k and l
+                k_size = degree_count[k]
+                l_size = degree_count[l]
+
+                # k_nodes and l_nodes consist of all nodes of degree k and l
+                k_nodes = h_degree_nodelist[k]
+                l_nodes = h_degree_nodelist[l]
+
+                # k_unsat and l_unsat consist of nodes of degree k and l that
+                # are unsaturated (nodes that have at least 1 available stub)
+                k_unsat = {v for v in k_nodes if h_node_residual[v] > 0}
+
+                if k != l:
+                    l_unsat = {w for w in l_nodes if h_node_residual[w] > 0}
+                else:
+                    l_unsat = k_unsat
+                    n_edges_add = joint_degrees[k][l] // 2
+
+                while n_edges_add > 0:
+                    # randomly pick nodes v and w that have degrees k and l
+                    v = k_nodes[seed.randrange(k_size)]
+                    w = l_nodes[seed.randrange(l_size)]
+
+                    # if nodes v and w are disconnected then attempt to connect
+                    if not G.has_edge(v, w) and (v != w):
+                        # if node v has no free stubs then do neighbor switch
+                        if h_node_residual[v] == 0:
+                            _neighbor_switch(G, v, k_unsat, h_node_residual)
+
+                        # if node w has no free stubs then do neighbor switch
+                        if h_node_residual[w] == 0:
+                            if k != l:
+                                _neighbor_switch(G, w, l_unsat, h_node_residual)
+                            else:
+                                _neighbor_switch(
+                                    G, w, l_unsat, h_node_residual, avoid_node_id=v
+                                )
+
+                        # add edge (v, w) and update data structures
+                        G.add_edge(v, w)
+                        h_node_residual[v] -= 1
+                        h_node_residual[w] -= 1
+                        n_edges_add -= 1
+
+                        if h_node_residual[v] == 0:
+                            k_unsat.discard(v)
+                        if h_node_residual[w] == 0:
+                            l_unsat.discard(w)
+    return G


 @nx._dispatchable(graphs=None)
@@ -179,11 +323,39 @@ def is_valid_directed_joint_degree(in_degrees, out_degrees, nkk):
     [1] B. Tillman, A. Markopoulou, C. T. Butts & M. Gjoka,
         "Construction of Directed 2K Graphs". In Proc. of KDD 2017.
     """
-    pass
-
-
-def _directed_neighbor_switch(G, w, unsat, h_node_residual_out, chords,
-    h_partition_in, partition):
+    V = {}  # number of nodes with in/out degree.
+    forbidden = {}
+    if len(in_degrees) != len(out_degrees):
+        return False
+
+    for idx in range(len(in_degrees)):
+        i = in_degrees[idx]
+        o = out_degrees[idx]
+        V[(i, 0)] = V.get((i, 0), 0) + 1
+        V[(o, 1)] = V.get((o, 1), 0) + 1
+
+        forbidden[(o, i)] = forbidden.get((o, i), 0) + 1
+
+    S = {}  # number of edges going from in/out degree nodes.
+    for k in nkk:
+        for l in nkk[k]:
+            val = nkk[k][l]
+            if not float(val).is_integer():  # condition 1
+                return False
+
+            if val > 0:
+                S[(k, 1)] = S.get((k, 1), 0) + val
+                S[(l, 0)] = S.get((l, 0), 0) + val
+                # condition 3
+                if val + forbidden.get((k, l), 0) > V[(k, 1)] * V[(l, 0)]:
+                    return False
+
+    return all(S[s] / s[0] == V[s] for s in S)
+
+
+def _directed_neighbor_switch(
+    G, w, unsat, h_node_residual_out, chords, h_partition_in, partition
+):
     """Releases one free stub for node w, while preserving joint degree in G.

     Parameters
@@ -217,11 +389,36 @@ def _directed_neighbor_switch(G, w, unsat, h_node_residual_out, chords,
     [1] B. Tillman, A. Markopoulou, C. T. Butts & M. Gjoka,
         "Construction of Directed 2K Graphs". In Proc. of KDD 2017.
     """
-    pass
-
-
-def _directed_neighbor_switch_rev(G, w, unsat, h_node_residual_in, chords,
-    h_partition_out, partition):
+    w_prime = unsat.pop()
+    unsat.add(w_prime)
+    # select node t, a neighbor of w, that is not connected to w_prime
+    w_neighbs = list(G.successors(w))
+    # slightly faster declaring this variable
+    w_prime_neighbs = list(G.successors(w_prime))
+
+    for v in w_neighbs:
+        if (v not in w_prime_neighbs) and w_prime != v:
+            # removes (w,v), add (w_prime,v)  and update data structures
+            G.remove_edge(w, v)
+            G.add_edge(w_prime, v)
+
+            if h_partition_in[v] == partition:
+                chords.add((w, v))
+                chords.discard((w_prime, v))
+
+            h_node_residual_out[w] += 1
+            h_node_residual_out[w_prime] -= 1
+            if h_node_residual_out[w_prime] == 0:
+                unsat.remove(w_prime)
+            return None
+
+    # If neighbor switch didn't work, use unsaturated node
+    return w_prime
+
+
+def _directed_neighbor_switch_rev(
+    G, w, unsat, h_node_residual_in, chords, h_partition_out, partition
+):
     """The reverse of directed_neighbor_switch.

     Parameters
@@ -246,7 +443,29 @@ def _directed_neighbor_switch_rev(G, w, unsat, h_node_residual_in, chords,
     Same operation as directed_neighbor_switch except it handles this operation
     for incoming edges instead of outgoing.
     """
-    pass
+    w_prime = unsat.pop()
+    unsat.add(w_prime)
+    # slightly faster declaring these as variables.
+    w_neighbs = list(G.predecessors(w))
+    w_prime_neighbs = list(G.predecessors(w_prime))
+    # select node v, a neighbor of w, that is not connected to w_prime.
+    for v in w_neighbs:
+        if (v not in w_prime_neighbs) and w_prime != v:
+            # removes (v,w), add (v,w_prime) and update data structures.
+            G.remove_edge(v, w)
+            G.add_edge(v, w_prime)
+            if h_partition_out[v] == partition:
+                chords.add((v, w))
+                chords.discard((v, w_prime))
+
+            h_node_residual_in[w] += 1
+            h_node_residual_in[w_prime] -= 1
+            if h_node_residual_in[w_prime] == 0:
+                unsat.remove(w_prime)
+            return None
+
+    # If neighbor switch didn't work, use the unsaturated node.
+    return w_prime


 @py_random_state(3)
@@ -315,4 +534,131 @@ def directed_joint_degree_graph(in_degrees, out_degrees, nkk, seed=None):
     >>> G = nx.directed_joint_degree_graph(in_degrees, out_degrees, nkk)
     >>>
     """
-    pass
+    if not is_valid_directed_joint_degree(in_degrees, out_degrees, nkk):
+        msg = "Input is not realizable as a simple graph"
+        raise nx.NetworkXError(msg)
+
+    # start with an empty directed graph.
+    G = nx.DiGraph()
+
+    # for a given group, keep the list of all node ids.
+    h_degree_nodelist_in = {}
+    h_degree_nodelist_out = {}
+    # for a given group, keep the list of all unsaturated node ids.
+    h_degree_nodelist_in_unsat = {}
+    h_degree_nodelist_out_unsat = {}
+    # for a given node, keep track of the remaining stubs to be added.
+    h_node_residual_out = {}
+    h_node_residual_in = {}
+    # for a given node, keep track of the partition id.
+    h_partition_out = {}
+    h_partition_in = {}
+    # keep track of non-chords between pairs of partition ids.
+    non_chords = {}
+
+    # populate data structures
+    for idx, i in enumerate(in_degrees):
+        idx = int(idx)
+        if i > 0:
+            h_degree_nodelist_in.setdefault(i, [])
+            h_degree_nodelist_in_unsat.setdefault(i, set())
+            h_degree_nodelist_in[i].append(idx)
+            h_degree_nodelist_in_unsat[i].add(idx)
+            h_node_residual_in[idx] = i
+            h_partition_in[idx] = i
+
+    for idx, o in enumerate(out_degrees):
+        o = out_degrees[idx]
+        non_chords[(o, in_degrees[idx])] = non_chords.get((o, in_degrees[idx]), 0) + 1
+        idx = int(idx)
+        if o > 0:
+            h_degree_nodelist_out.setdefault(o, [])
+            h_degree_nodelist_out_unsat.setdefault(o, set())
+            h_degree_nodelist_out[o].append(idx)
+            h_degree_nodelist_out_unsat[o].add(idx)
+            h_node_residual_out[idx] = o
+            h_partition_out[idx] = o
+
+        G.add_node(idx)
+
+    nk_in = {}
+    nk_out = {}
+    for p in h_degree_nodelist_in:
+        nk_in[p] = len(h_degree_nodelist_in[p])
+    for p in h_degree_nodelist_out:
+        nk_out[p] = len(h_degree_nodelist_out[p])
+
+    # iterate over every degree pair (k,l) and add the number of edges given
+    # for each pair.
+    for k in nkk:
+        for l in nkk[k]:
+            n_edges_add = nkk[k][l]
+
+            if n_edges_add > 0:
+                # chords contains a random set of potential edges.
+                chords = set()
+
+                k_len = nk_out[k]
+                l_len = nk_in[l]
+                chords_sample = seed.sample(
+                    range(k_len * l_len), n_edges_add + non_chords.get((k, l), 0)
+                )
+
+                num = 0
+                while len(chords) < n_edges_add:
+                    i = h_degree_nodelist_out[k][chords_sample[num] % k_len]
+                    j = h_degree_nodelist_in[l][chords_sample[num] // k_len]
+                    num += 1
+                    if i != j:
+                        chords.add((i, j))
+
+                # k_unsat and l_unsat consist of nodes of in/out degree k and l
+                # that are unsaturated i.e. those nodes that have at least one
+                # available stub
+                k_unsat = h_degree_nodelist_out_unsat[k]
+                l_unsat = h_degree_nodelist_in_unsat[l]
+
+                while n_edges_add > 0:
+                    v, w = chords.pop()
+                    chords.add((v, w))
+
+                    # if node v has no free stubs then do neighbor switch.
+                    if h_node_residual_out[v] == 0:
+                        _v = _directed_neighbor_switch(
+                            G,
+                            v,
+                            k_unsat,
+                            h_node_residual_out,
+                            chords,
+                            h_partition_in,
+                            l,
+                        )
+                        if _v is not None:
+                            v = _v
+
+                    # if node w has no free stubs then do neighbor switch.
+                    if h_node_residual_in[w] == 0:
+                        _w = _directed_neighbor_switch_rev(
+                            G,
+                            w,
+                            l_unsat,
+                            h_node_residual_in,
+                            chords,
+                            h_partition_out,
+                            k,
+                        )
+                        if _w is not None:
+                            w = _w
+
+                    # add edge (v,w) and update data structures.
+                    G.add_edge(v, w)
+                    h_node_residual_out[v] -= 1
+                    h_node_residual_in[w] -= 1
+                    n_edges_add -= 1
+                    chords.discard((v, w))
+
+                    if h_node_residual_out[v] == 0:
+                        k_unsat.discard(v)
+                    if h_node_residual_in[w] == 0:
+                        l_unsat.discard(w)
+    return G
diff --git a/networkx/generators/lattice.py b/networkx/generators/lattice.py
index c84792c4c..95e520d2c 100644
--- a/networkx/generators/lattice.py
+++ b/networkx/generators/lattice.py
@@ -12,16 +12,24 @@ be found about `Triangular Tiling`_, and `Square, Hex and Triangle Grids`_
 .. _Triangular Tiling: https://en.wikipedia.org/wiki/Triangular_tiling

 """
+
 from itertools import repeat
 from math import sqrt
+
 import networkx as nx
 from networkx.classes import set_node_attributes
 from networkx.exception import NetworkXError
 from networkx.generators.classic import cycle_graph, empty_graph, path_graph
 from networkx.relabel import relabel_nodes
 from networkx.utils import flatten, nodes_or_number, pairwise
-__all__ = ['grid_2d_graph', 'grid_graph', 'hypercube_graph',
-    'triangular_lattice_graph', 'hexagonal_lattice_graph']
+
+__all__ = [
+    "grid_2d_graph",
+    "grid_graph",
+    "hypercube_graph",
+    "triangular_lattice_graph",
+    "hexagonal_lattice_graph",
+]


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -52,7 +60,30 @@ def grid_2d_graph(m, n, periodic=False, create_using=None):
         The (possibly periodic) grid graph of the specified dimensions.

     """
-    pass
+    G = empty_graph(0, create_using)
+    row_name, rows = m
+    col_name, cols = n
+    G.add_nodes_from((i, j) for i in rows for j in cols)
+    G.add_edges_from(((i, j), (pi, j)) for pi, i in pairwise(rows) for j in cols)
+    G.add_edges_from(((i, j), (i, pj)) for i in rows for pj, j in pairwise(cols))
+
+    try:
+        periodic_r, periodic_c = periodic
+    except TypeError:
+        periodic_r = periodic_c = periodic
+
+    if periodic_r and len(rows) > 2:
+        first = rows[0]
+        last = rows[-1]
+        G.add_edges_from(((first, j), (last, j)) for j in cols)
+    if periodic_c and len(cols) > 2:
+        first = cols[0]
+        last = cols[-1]
+        G.add_edges_from(((i, first), (i, last)) for i in rows)
+    # both directions for directed
+    if G.is_directed():
+        G.add_edges_from((v, u) for u, v in G.edges())
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -93,7 +124,23 @@ def grid_graph(dim, periodic=False):
     >>> len(G)
     6
     """
-    pass
+    from networkx.algorithms.operators.product import cartesian_product
+
+    if not dim:
+        return empty_graph(0)
+
+    try:
+        func = (cycle_graph if p else path_graph for p in periodic)
+    except TypeError:
+        func = repeat(cycle_graph if periodic else path_graph)
+
+    G = next(func)(dim[0])
+    for current_dim in dim[1:]:
+        Gnew = next(func)(current_dim)
+        G = cartesian_product(Gnew, G)
+    # graph G is done but has labels of the form (1, (2, (3, 1))) so relabel
+    H = relabel_nodes(G, flatten)
+    return H


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -118,13 +165,16 @@ def hypercube_graph(n):
     NetworkX graph
         The hypercube graph of dimension *n*.
     """
-    pass
+    dim = n * [2]
+    G = grid_graph(dim)
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
-def triangular_lattice_graph(m, n, periodic=False, with_positions=True,
-    create_using=None):
-    """Returns the $m$ by $n$ triangular lattice graph.
+def triangular_lattice_graph(
+    m, n, periodic=False, with_positions=True, create_using=None
+):
+    r"""Returns the $m$ by $n$ triangular lattice graph.

     The `triangular lattice graph`_ is a two-dimensional `grid graph`_ in
     which each square unit has a diagonal edge (each grid unit has a chord).
@@ -141,7 +191,7 @@ def triangular_lattice_graph(m, n, periodic=False, with_positions=True,
     Positions of nodes are computed by default or `with_positions is True`.
     The position of each node (embedded in a euclidean plane) is stored in
     the graph using equilateral triangles with sidelength 1.
-    The height between rows of nodes is thus $\\sqrt(3)/2$.
+    The height between rows of nodes is thus $\sqrt(3)/2$.
     Nodes lie in the first quadrant with the node $(0, 0)$ at the origin.

     .. _triangular lattice graph: http://mathworld.wolfram.com/TriangularGrid.html
@@ -177,12 +227,54 @@ def triangular_lattice_graph(m, n, periodic=False, with_positions=True,
     NetworkX graph
         The *m* by *n* triangular lattice graph.
     """
-    pass
+    H = empty_graph(0, create_using)
+    if n == 0 or m == 0:
+        return H
+    if periodic:
+        if n < 5 or m < 3:
+            msg = f"m > 2 and n > 4 required for periodic. m={m}, n={n}"
+            raise NetworkXError(msg)
+
+    N = (n + 1) // 2  # number of nodes in row
+    rows = range(m + 1)
+    cols = range(N + 1)
+    # Make grid
+    H.add_edges_from(((i, j), (i + 1, j)) for j in rows for i in cols[:N])
+    H.add_edges_from(((i, j), (i, j + 1)) for j in rows[:m] for i in cols)
+    # add diagonals
+    H.add_edges_from(((i, j), (i + 1, j + 1)) for j in rows[1:m:2] for i in cols[:N])
+    H.add_edges_from(((i + 1, j), (i, j + 1)) for j in rows[:m:2] for i in cols[:N])
+    # identify boundary nodes if periodic
+    from networkx.algorithms.minors import contracted_nodes
+
+    if periodic is True:
+        for i in cols:
+            H = contracted_nodes(H, (i, 0), (i, m))
+        for j in rows[:m]:
+            H = contracted_nodes(H, (0, j), (N, j))
+    elif n % 2:
+        # remove extra nodes
+        H.remove_nodes_from((N, j) for j in rows[1::2])
+
+    # Add position node attributes
+    if with_positions:
+        ii = (i for i in cols for j in rows)
+        jj = (j for i in cols for j in rows)
+        xx = (0.5 * (j % 2) + i for i in cols for j in rows)
+        h = sqrt(3) / 2
+        if periodic:
+            yy = (h * j + 0.01 * i * i for i in cols for j in rows)
+        else:
+            yy = (h * j for i in cols for j in rows)
+        pos = {(i, j): (x, y) for i, j, x, y in zip(ii, jj, xx, yy) if (i, j) in H}
+        set_node_attributes(H, pos, "pos")
+    return H


 @nx._dispatchable(graphs=None, returns_graph=True)
-def hexagonal_lattice_graph(m, n, periodic=False, with_positions=True,
-    create_using=None):
+def hexagonal_lattice_graph(
+    m, n, periodic=False, with_positions=True, create_using=None
+):
     """Returns an `m` by `n` hexagonal lattice graph.

     The *hexagonal lattice graph* is a graph whose nodes and edges are
@@ -229,4 +321,47 @@ def hexagonal_lattice_graph(m, n, periodic=False, with_positions=True,
     NetworkX graph
         The *m* by *n* hexagonal lattice graph.
     """
-    pass
+    G = empty_graph(0, create_using)
+    if m == 0 or n == 0:
+        return G
+    if periodic and (n % 2 == 1 or m < 2 or n < 2):
+        msg = "periodic hexagonal lattice needs m > 1, n > 1 and even n"
+        raise NetworkXError(msg)
+
+    M = 2 * m  # twice as many nodes as hexagons vertically
+    rows = range(M + 2)
+    cols = range(n + 1)
+    # make lattice
+    col_edges = (((i, j), (i, j + 1)) for i in cols for j in rows[: M + 1])
+    row_edges = (((i, j), (i + 1, j)) for i in cols[:n] for j in rows if i % 2 == j % 2)
+    G.add_edges_from(col_edges)
+    G.add_edges_from(row_edges)
+    # Remove corner nodes with one edge
+    G.remove_node((0, M + 1))
+    G.remove_node((n, (M + 1) * (n % 2)))
+
+    # identify boundary nodes if periodic
+    from networkx.algorithms.minors import contracted_nodes
+
+    if periodic:
+        for i in cols[:n]:
+            G = contracted_nodes(G, (i, 0), (i, M))
+        for i in cols[1:]:
+            G = contracted_nodes(G, (i, 1), (i, M + 1))
+        for j in rows[1:M]:
+            G = contracted_nodes(G, (0, j), (n, j))
+        G.remove_node((n, M))
+
+    # calc position in embedded space
+    ii = (i for i in cols for j in rows)
+    jj = (j for i in cols for j in rows)
+    xx = (0.5 + i + i // 2 + (j % 2) * ((i % 2) - 0.5) for i in cols for j in rows)
+    h = sqrt(3) / 2
+    if periodic:
+        yy = (h * j + 0.01 * i * i for i in cols for j in rows)
+    else:
+        yy = (h * j for i in cols for j in rows)
+    # exclude nodes not in G
+    pos = {(i, j): (x, y) for i, j, x, y in zip(ii, jj, xx, yy) if (i, j) in G}
+    set_node_attributes(G, pos, "pos")
+    return G
diff --git a/networkx/generators/line.py b/networkx/generators/line.py
index dce0056f3..57f4d1692 100644
--- a/networkx/generators/line.py
+++ b/networkx/generators/line.py
@@ -2,15 +2,17 @@
 from collections import defaultdict
 from functools import partial
 from itertools import combinations
+
 import networkx as nx
 from networkx.utils import arbitrary_element
 from networkx.utils.decorators import not_implemented_for
-__all__ = ['line_graph', 'inverse_line_graph']
+
+__all__ = ["line_graph", "inverse_line_graph"]


 @nx._dispatchable(returns_graph=True)
 def line_graph(G, create_using=None):
-    """Returns the line graph of the graph or digraph `G`.
+    r"""Returns the line graph of the graph or digraph `G`.

     The line graph of a graph `G` has a node for each edge in `G` and an
     edge joining those nodes if the two edges in `G` share a common node. For
@@ -63,8 +65,8 @@ def line_graph(G, create_using=None):
     *Self-loops in undirected graphs*

     For an undirected graph `G` without multiple edges, each edge can be
-    written as a set `\\{u, v\\}`.  Its line graph `L` has the edges of `G` as
-    its nodes. If `x` and `y` are two nodes in `L`, then `\\{x, y\\}` is an edge
+    written as a set `\{u, v\}`.  Its line graph `L` has the edges of `G` as
+    its nodes. If `x` and `y` are two nodes in `L`, then `\{x, y\}` is an edge
     in `L` if and only if the intersection of `x` and `y` is nonempty. Thus,
     the set of all edges is determined by the set of all pairwise intersections
     of edges in `G`.
@@ -110,7 +112,11 @@ def line_graph(G, create_using=None):
       Academic Press Inc., pp. 271--305.

     """
-    pass
+    if G.is_directed():
+        L = _lg_directed(G, create_using=create_using)
+    else:
+        L = _lg_undirected(G, selfloops=False, create_using=create_using)
+    return L


 def _lg_directed(G, create_using=None):
@@ -129,7 +135,18 @@ def _lg_directed(G, create_using=None):
        Default is to use the same graph class as `G`.

     """
-    pass
+    L = nx.empty_graph(0, create_using, default=G.__class__)
+
+    # Create a graph specific edge function.
+    get_edges = partial(G.edges, keys=True) if G.is_multigraph() else G.edges
+
+    for from_node in get_edges():
+        # from_node is: (u,v) or (u,v,key)
+        L.add_node(from_node)
+        for to_node in get_edges(from_node[1]):
+            L.add_edge(from_node, to_node)
+
+    return L


 def _lg_undirected(G, selfloops=False, create_using=None):
@@ -156,11 +173,48 @@ def _lg_undirected(G, selfloops=False, create_using=None):
     produce self-loops.

     """
-    pass
+    L = nx.empty_graph(0, create_using, default=G.__class__)
+
+    # Graph specific functions for edges.
+    get_edges = partial(G.edges, keys=True) if G.is_multigraph() else G.edges
+
+    # Determine if we include self-loops or not.
+    shift = 0 if selfloops else 1

+    # Introduce numbering of nodes
+    node_index = {n: i for i, n in enumerate(G)}

-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+    # Lift canonical representation of nodes to edges in line graph
+    edge_key_function = lambda edge: (node_index[edge[0]], node_index[edge[1]])
+
+    edges = set()
+    for u in G:
+        # Label nodes as a sorted tuple of nodes in original graph.
+        # Decide on representation of {u, v} as (u, v) or (v, u) depending on node_index.
+        # -> This ensures a canonical representation and avoids comparing values of different types.
+        nodes = [tuple(sorted(x[:2], key=node_index.get)) + x[2:] for x in get_edges(u)]
+
+        if len(nodes) == 1:
+            # Then the edge will be an isolated node in L.
+            L.add_node(nodes[0])
+
+        # Add a clique of `nodes` to graph. To prevent double adding edges,
+        # especially important for multigraphs, we store the edges in
+        # canonical form in a set.
+        for i, a in enumerate(nodes):
+            edges.update(
+                [
+                    tuple(sorted((a, b), key=edge_key_function))
+                    for b in nodes[i + shift :]
+                ]
+            )
+
+    L.add_edges_from(edges)
+    return L
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable(returns_graph=True)
 def inverse_line_graph(G):
     """Returns the inverse line graph of graph G.
@@ -212,12 +266,61 @@ def inverse_line_graph(G):
        `DOI link <https://doi.org/10.1016/0020-0190(73)90029-X>`_

     """
-    pass
+    if G.number_of_nodes() == 0:
+        return nx.empty_graph(1)
+    elif G.number_of_nodes() == 1:
+        v = arbitrary_element(G)
+        a = (v, 0)
+        b = (v, 1)
+        H = nx.Graph([(a, b)])
+        return H
+    elif G.number_of_nodes() > 1 and G.number_of_edges() == 0:
+        msg = (
+            "inverse_line_graph() doesn't work on an edgeless graph. "
+            "Please use this function on each component separately."
+        )
+        raise nx.NetworkXError(msg)
+
+    if nx.number_of_selfloops(G) != 0:
+        msg = (
+            "A line graph as generated by NetworkX has no selfloops, so G has no "
+            "inverse line graph. Please remove the selfloops from G and try again."
+        )
+        raise nx.NetworkXError(msg)
+
+    starting_cell = _select_starting_cell(G)
+    P = _find_partition(G, starting_cell)
+    # count how many times each vertex appears in the partition set
+    P_count = {u: 0 for u in G.nodes}
+    for p in P:
+        for u in p:
+            P_count[u] += 1
+
+    if max(P_count.values()) > 2:
+        msg = "G is not a line graph (vertex found in more than two partition cells)"
+        raise nx.NetworkXError(msg)
+    W = tuple((u,) for u in P_count if P_count[u] == 1)
+    H = nx.Graph()
+    H.add_nodes_from(P)
+    H.add_nodes_from(W)
+    for a, b in combinations(H.nodes, 2):
+        if any(a_bit in b for a_bit in a):
+            H.add_edge(a, b)
+    return H


 def _triangles(G, e):
     """Return list of all triangles containing edge e"""
-    pass
+    u, v = e
+    if u not in G:
+        raise nx.NetworkXError(f"Vertex {u} not in graph")
+    if v not in G[u]:
+        raise nx.NetworkXError(f"Edge ({u}, {v}) not in graph")
+    triangle_list = []
+    for x in G[u]:
+        if x in G[v]:
+            triangle_list.append((u, v, x))
+    return triangle_list


 def _odd_triangle(G, T):
@@ -245,7 +348,19 @@ def _odd_triangle(G, T):
     triangle.

     """
-    pass
+    for u in T:
+        if u not in G.nodes():
+            raise nx.NetworkXError(f"Vertex {u} not in graph")
+    for e in list(combinations(T, 2)):
+        if e[0] not in G[e[1]]:
+            raise nx.NetworkXError(f"Edge ({e[0]}, {e[1]}) not in graph")
+
+    T_nbrs = defaultdict(int)
+    for t in T:
+        for v in G[t]:
+            if v not in T:
+                T_nbrs[v] += 1
+    return any(T_nbrs[v] in [1, 3] for v in T_nbrs)


 def _find_partition(G, starting_cell):
@@ -265,7 +380,32 @@ def _find_partition(G, starting_cell):
     NetworkXError
         If a cell is not a complete subgraph then G is not a line graph
     """
-    pass
+    G_partition = G.copy()
+    P = [starting_cell]  # partition set
+    G_partition.remove_edges_from(list(combinations(starting_cell, 2)))
+    # keep list of partitioned nodes which might have an edge in G_partition
+    partitioned_vertices = list(starting_cell)
+    while G_partition.number_of_edges() > 0:
+        # there are still edges left and so more cells to be made
+        u = partitioned_vertices.pop()
+        deg_u = len(G_partition[u])
+        if deg_u != 0:
+            # if u still has edges then we need to find its other cell
+            # this other cell must be a complete subgraph or else G is
+            # not a line graph
+            new_cell = [u] + list(G_partition[u])
+            for u in new_cell:
+                for v in new_cell:
+                    if (u != v) and (v not in G_partition[u]):
+                        msg = (
+                            "G is not a line graph "
+                            "(partition cell not a complete subgraph)"
+                        )
+                        raise nx.NetworkXError(msg)
+            P.append(tuple(new_cell))
+            G_partition.remove_edges_from(list(combinations(new_cell, 2)))
+            partitioned_vertices += new_cell
+    return P


 def _select_starting_cell(G, starting_edge=None):
@@ -292,4 +432,68 @@ def _select_starting_cell(G, starting_edge=None):
     specific starting edge. Note that the r, s notation for counting
     triangles is the same as in the Roussopoulos paper cited above.
     """
-    pass
+    if starting_edge is None:
+        e = arbitrary_element(G.edges())
+    else:
+        e = starting_edge
+        if e[0] not in G.nodes():
+            raise nx.NetworkXError(f"Vertex {e[0]} not in graph")
+        if e[1] not in G[e[0]]:
+            msg = f"starting_edge ({e[0]}, {e[1]}) is not in the Graph"
+            raise nx.NetworkXError(msg)
+    e_triangles = _triangles(G, e)
+    r = len(e_triangles)
+    if r == 0:
+        # there are no triangles containing e, so the starting cell is just e
+        starting_cell = e
+    elif r == 1:
+        # there is exactly one triangle, T, containing e. If other 2 edges
+        # of T belong only to this triangle then T is starting cell
+        T = e_triangles[0]
+        a, b, c = T
+        # ab was original edge so check the other 2 edges
+        ac_edges = len(_triangles(G, (a, c)))
+        bc_edges = len(_triangles(G, (b, c)))
+        if ac_edges == 1:
+            if bc_edges == 1:
+                starting_cell = T
+            else:
+                return _select_starting_cell(G, starting_edge=(b, c))
+        else:
+            return _select_starting_cell(G, starting_edge=(a, c))
+    else:
+        # r >= 2 so we need to count the number of odd triangles, s
+        s = 0
+        odd_triangles = []
+        for T in e_triangles:
+            if _odd_triangle(G, T):
+                s += 1
+                odd_triangles.append(T)
+        if r == 2 and s == 0:
+            # in this case either triangle works, so just use T
+            starting_cell = T
+        elif r - 1 <= s <= r:
+            # check if odd triangles containing e form complete subgraph
+            triangle_nodes = set()
+            for T in odd_triangles:
+                for x in T:
+                    triangle_nodes.add(x)
+
+            for u in triangle_nodes:
+                for v in triangle_nodes:
+                    if u != v and (v not in G[u]):
+                        msg = (
+                            "G is not a line graph (odd triangles "
+                            "do not form complete subgraph)"
+                        )
+                        raise nx.NetworkXError(msg)
+            # otherwise then we can use this as the starting cell
+            starting_cell = tuple(triangle_nodes)
+
+        else:
+            msg = (
+                "G is not a line graph (incorrect number of "
+                "odd triangles around starting edge)"
+            )
+            raise nx.NetworkXError(msg)
+    return starting_cell
diff --git a/networkx/generators/mycielski.py b/networkx/generators/mycielski.py
index 0783e5d81..804b90369 100644
--- a/networkx/generators/mycielski.py
+++ b/networkx/generators/mycielski.py
@@ -2,16 +2,18 @@
 of graphs.

 """
+
 import networkx as nx
 from networkx.utils import not_implemented_for
-__all__ = ['mycielskian', 'mycielski_graph']
+
+__all__ = ["mycielskian", "mycielski_graph"]


-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable(returns_graph=True)
 def mycielskian(G, iterations=1):
-    """Returns the Mycielskian of a simple, undirected graph G
+    r"""Returns the Mycielskian of a simple, undirected graph G

     The Mycielskian of graph preserves a graph's triangle free
     property while increasing the chromatic number by 1.
@@ -23,9 +25,9 @@ def mycielskian(G, iterations=1):

     Let :math:`V = {0, ..., n-1}`. Construct another vertex set
     :math:`U = {n, ..., 2n}` and a vertex, `w`.
-    Construct a new graph, `M`, with vertices :math:`U \\bigcup V \\bigcup w`.
-    For edges, :math:`(u, v) \\in E` add edges :math:`(u, v), (u, v + n)`, and
-    :math:`(u + n, v)` to M. Finally, for all vertices :math:`u \\in U`, add
+    Construct a new graph, `M`, with vertices :math:`U \bigcup V \bigcup w`.
+    For edges, :math:`(u, v) \in E` add edges :math:`(u, v), (u, v + n)`, and
+    :math:`(u + n, v)` to M. Finally, for all vertices :math:`u \in U`, add
     edge :math:`(u, w)` to M.

     The Mycielski Operation can be done multiple times by repeating the above
@@ -51,7 +53,19 @@ def mycielskian(G, iterations=1):
     Graph, node, and edge data are not necessarily propagated to the new graph.

     """
-    pass
+
+    M = nx.convert_node_labels_to_integers(G)
+
+    for i in range(iterations):
+        n = M.number_of_nodes()
+        M.add_nodes_from(range(n, 2 * n))
+        old_edges = list(M.edges())
+        M.add_edges_from((u, v + n) for u, v in old_edges)
+        M.add_edges_from((u + n, v) for u, v in old_edges)
+        M.add_node(2 * n)
+        M.add_edges_from((u + n, 2 * n) for u in range(n))
+
+    return M


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -85,4 +99,12 @@ def mycielski_graph(n):
     The remaining graphs are generated using the Mycielski operation.

     """
-    pass
+
+    if n < 1:
+        raise nx.NetworkXError("must satisfy n >= 1")
+
+    if n == 1:
+        return nx.empty_graph(1)
+
+    else:
+        return mycielskian(nx.path_graph(2), n - 2)
diff --git a/networkx/generators/nonisomorphic_trees.py b/networkx/generators/nonisomorphic_trees.py
index a32533c07..9716cf338 100644
--- a/networkx/generators/nonisomorphic_trees.py
+++ b/networkx/generators/nonisomorphic_trees.py
@@ -6,12 +6,14 @@ lists in which the i-th element specifies the distance of vertex i to
 the root.

 """
-__all__ = ['nonisomorphic_trees', 'number_of_nonisomorphic_trees']
+
+__all__ = ["nonisomorphic_trees", "number_of_nonisomorphic_trees"]
+
 import networkx as nx


 @nx._dispatchable(graphs=None, returns_graph=True)
-def nonisomorphic_trees(order, create='graph'):
+def nonisomorphic_trees(order, create="graph"):
     """Generates lists of nonisomorphic trees

     Parameters
@@ -40,7 +42,34 @@ def nonisomorphic_trees(order, create='graph'):
        - ``create="graph"``: yields a list of `networkx.Graph` instances
        - ``create="matrix"``: yields a list of list-of-lists representing adjacency matrices
     """
-    pass
+
+    if order < 2:
+        raise ValueError
+    # start at the path graph rooted at its center
+    layout = list(range(order // 2 + 1)) + list(range(1, (order + 1) // 2))
+
+    while layout is not None:
+        layout = _next_tree(layout)
+        if layout is not None:
+            if create == "graph":
+                yield _layout_to_graph(layout)
+            elif create == "matrix":
+                import warnings
+
+                warnings.warn(
+                    (
+                        "\n\nThe 'create=matrix' argument of nonisomorphic_trees\n"
+                        "is deprecated and will be removed in version 3.5.\n"
+                        "Use ``nx.to_numpy_array`` to convert graphs to adjacency "
+                        "matrices, e.g.::\n\n"
+                        "   [nx.to_numpy_array(G) for G in nx.nonisomorphic_trees(N)]"
+                    ),
+                    category=DeprecationWarning,
+                    stacklevel=2,
+                )
+
+                yield _layout_to_matrix(layout)
+            layout = _next_rooted_tree(layout)


 @nx._dispatchable(graphs=None)
@@ -60,34 +89,124 @@ def number_of_nonisomorphic_trees(order):
     ----------

     """
-    pass
+    return sum(1 for _ in nonisomorphic_trees(order))


 def _next_rooted_tree(predecessor, p=None):
     """One iteration of the Beyer-Hedetniemi algorithm."""
-    pass
+
+    if p is None:
+        p = len(predecessor) - 1
+        while predecessor[p] == 1:
+            p -= 1
+    if p == 0:
+        return None
+
+    q = p - 1
+    while predecessor[q] != predecessor[p] - 1:
+        q -= 1
+    result = list(predecessor)
+    for i in range(p, len(result)):
+        result[i] = result[i - p + q]
+    return result


 def _next_tree(candidate):
     """One iteration of the Wright, Richmond, Odlyzko and McKay
     algorithm."""
-    pass
+
+    # valid representation of a free tree if:
+    # there are at least two vertices at layer 1
+    # (this is always the case because we start at the path graph)
+    left, rest = _split_tree(candidate)
+
+    # and the left subtree of the root
+    # is less high than the tree with the left subtree removed
+    left_height = max(left)
+    rest_height = max(rest)
+    valid = rest_height >= left_height
+
+    if valid and rest_height == left_height:
+        # and, if left and rest are of the same height,
+        # if left does not encompass more vertices
+        if len(left) > len(rest):
+            valid = False
+        # and, if they have the same number or vertices,
+        # if left does not come after rest lexicographically
+        elif len(left) == len(rest) and left > rest:
+            valid = False
+
+    if valid:
+        return candidate
+    else:
+        # jump to the next valid free tree
+        p = len(left)
+        new_candidate = _next_rooted_tree(candidate, p)
+        if candidate[p] > 2:
+            new_left, new_rest = _split_tree(new_candidate)
+            new_left_height = max(new_left)
+            suffix = range(1, new_left_height + 2)
+            new_candidate[-len(suffix) :] = suffix
+        return new_candidate


 def _split_tree(layout):
     """Returns a tuple of two layouts, one containing the left
     subtree of the root vertex, and one containing the original tree
     with the left subtree removed."""
-    pass
+
+    one_found = False
+    m = None
+    for i in range(len(layout)):
+        if layout[i] == 1:
+            if one_found:
+                m = i
+                break
+            else:
+                one_found = True
+
+    if m is None:
+        m = len(layout)
+
+    left = [layout[i] - 1 for i in range(1, m)]
+    rest = [0] + [layout[i] for i in range(m, len(layout))]
+    return (left, rest)


 def _layout_to_matrix(layout):
     """Create the adjacency matrix for the tree specified by the
     given layout (level sequence)."""
-    pass
+
+    result = [[0] * len(layout) for i in range(len(layout))]
+    stack = []
+    for i in range(len(layout)):
+        i_level = layout[i]
+        if stack:
+            j = stack[-1]
+            j_level = layout[j]
+            while j_level >= i_level:
+                stack.pop()
+                j = stack[-1]
+                j_level = layout[j]
+            result[i][j] = result[j][i] = 1
+        stack.append(i)
+    return result


 def _layout_to_graph(layout):
     """Create a NetworkX Graph for the tree specified by the
     given layout(level sequence)"""
-    pass
+    G = nx.Graph()
+    stack = []
+    for i in range(len(layout)):
+        i_level = layout[i]
+        if stack:
+            j = stack[-1]
+            j_level = layout[j]
+            while j_level >= i_level:
+                stack.pop()
+                j = stack[-1]
+                j_level = layout[j]
+            G.add_edge(i, j)
+        stack.append(i)
+    return G
diff --git a/networkx/generators/random_clustered.py b/networkx/generators/random_clustered.py
index c607abb23..edf4b94b3 100644
--- a/networkx/generators/random_clustered.py
+++ b/networkx/generators/random_clustered.py
@@ -2,14 +2,14 @@
 """
 import networkx as nx
 from networkx.utils import py_random_state
-__all__ = ['random_clustered_graph']
+
+__all__ = ["random_clustered_graph"]


 @py_random_state(2)
 @nx._dispatchable(graphs=None, returns_graph=True)
-def random_clustered_graph(joint_degree_sequence, create_using=None, seed=None
-    ):
-    """Generate a random graph with the given joint independent edge degree and
+def random_clustered_graph(joint_degree_sequence, create_using=None, seed=None):
+    r"""Generate a random graph with the given joint independent edge degree and
     triangle degree sequence.

     This uses a configuration model-like approach to generate a random graph
@@ -17,7 +17,7 @@ def random_clustered_graph(joint_degree_sequence, create_using=None, seed=None
     the given joint degree sequence.

     The joint degree sequence is a list of pairs of integers of the form
-    $[(d_{1,i}, d_{1,t}), \\dotsc, (d_{n,i}, d_{n,t})]$. According to this list,
+    $[(d_{1,i}, d_{1,t}), \dotsc, (d_{n,i}, d_{n,t})]$. According to this list,
     vertex $u$ is a member of $d_{u,t}$ triangles and has $d_{u, i}$ other
     edges. The number $d_{u,t}$ is the *triangle degree* of $u$ and the number
     $d_{u,i}$ is the *independent edge degree*.
@@ -85,4 +85,33 @@ def random_clustered_graph(joint_degree_sequence, create_using=None, seed=None
     >>> G.remove_edges_from(nx.selfloop_edges(G))

     """
-    pass
+    # In Python 3, zip() returns an iterator. Make this into a list.
+    joint_degree_sequence = list(joint_degree_sequence)
+
+    N = len(joint_degree_sequence)
+    G = nx.empty_graph(N, create_using, default=nx.MultiGraph)
+    if G.is_directed():
+        raise nx.NetworkXError("Directed Graph not supported")
+
+    ilist = []
+    tlist = []
+    for n in G:
+        degrees = joint_degree_sequence[n]
+        for icount in range(degrees[0]):
+            ilist.append(n)
+        for tcount in range(degrees[1]):
+            tlist.append(n)
+
+    if len(ilist) % 2 != 0 or len(tlist) % 3 != 0:
+        raise nx.NetworkXError("Invalid degree sequence")
+
+    seed.shuffle(ilist)
+    seed.shuffle(tlist)
+    while ilist:
+        G.add_edge(ilist.pop(), ilist.pop())
+    while tlist:
+        n1 = tlist.pop()
+        n2 = tlist.pop()
+        n3 = tlist.pop()
+        G.add_edges_from([(n1, n2), (n1, n3), (n2, n3)])
+    return G
diff --git a/networkx/generators/random_graphs.py b/networkx/generators/random_graphs.py
index d8339ed31..b6f5ebf62 100644
--- a/networkx/generators/random_graphs.py
+++ b/networkx/generators/random_graphs.py
@@ -2,21 +2,38 @@
 Generators for random graphs.

 """
+
 import itertools
 import math
 from collections import defaultdict
+
 import networkx as nx
 from networkx.utils import py_random_state
+
 from .classic import complete_graph, empty_graph, path_graph, star_graph
 from .degree_seq import degree_sequence_tree
-__all__ = ['fast_gnp_random_graph', 'gnp_random_graph',
-    'dense_gnm_random_graph', 'gnm_random_graph', 'erdos_renyi_graph',
-    'binomial_graph', 'newman_watts_strogatz_graph', 'watts_strogatz_graph',
-    'connected_watts_strogatz_graph', 'random_regular_graph',
-    'barabasi_albert_graph', 'dual_barabasi_albert_graph',
-    'extended_barabasi_albert_graph', 'powerlaw_cluster_graph',
-    'random_lobster', 'random_shell_graph', 'random_powerlaw_tree',
-    'random_powerlaw_tree_sequence', 'random_kernel_graph']
+
+__all__ = [
+    "fast_gnp_random_graph",
+    "gnp_random_graph",
+    "dense_gnm_random_graph",
+    "gnm_random_graph",
+    "erdos_renyi_graph",
+    "binomial_graph",
+    "newman_watts_strogatz_graph",
+    "watts_strogatz_graph",
+    "connected_watts_strogatz_graph",
+    "random_regular_graph",
+    "barabasi_albert_graph",
+    "dual_barabasi_albert_graph",
+    "extended_barabasi_albert_graph",
+    "powerlaw_cluster_graph",
+    "random_lobster",
+    "random_shell_graph",
+    "random_powerlaw_tree",
+    "random_powerlaw_tree_sequence",
+    "random_kernel_graph",
+]


 @py_random_state(2)
@@ -57,7 +74,38 @@ def fast_gnp_random_graph(n, p, seed=None, directed=False):
        "Efficient generation of large random networks",
        Phys. Rev. E, 71, 036113, 2005.
     """
-    pass
+    G = empty_graph(n)
+
+    if p <= 0 or p >= 1:
+        return nx.gnp_random_graph(n, p, seed=seed, directed=directed)
+
+    lp = math.log(1.0 - p)
+
+    if directed:
+        G = nx.DiGraph(G)
+        v = 1
+        w = -1
+        while v < n:
+            lr = math.log(1.0 - seed.random())
+            w = w + 1 + int(lr / lp)
+            while w >= v and v < n:
+                w = w - v
+                v = v + 1
+            if v < n:
+                G.add_edge(w, v)
+
+    # Nodes in graph are from 0,n-1 (start with v as the second node index).
+    v = 1
+    w = -1
+    while v < n:
+        lr = math.log(1.0 - seed.random())
+        w = w + 1 + int(lr / lp)
+        while w >= v and v < n:
+            w = w - v
+            v = v + 1
+        if v < n:
+            G.add_edge(v, w)
+    return G


 @py_random_state(2)
@@ -102,9 +150,25 @@ def gnp_random_graph(n, p, seed=None, directed=False):
     .. [1] P. Erdős and A. Rényi, On Random Graphs, Publ. Math. 6, 290 (1959).
     .. [2] E. N. Gilbert, Random Graphs, Ann. Math. Stat., 30, 1141 (1959).
     """
-    pass
-
-
+    if directed:
+        edges = itertools.permutations(range(n), 2)
+        G = nx.DiGraph()
+    else:
+        edges = itertools.combinations(range(n), 2)
+        G = nx.Graph()
+    G.add_nodes_from(range(n))
+    if p <= 0:
+        return G
+    if p >= 1:
+        return complete_graph(n, create_using=G)
+
+    for e in edges:
+        if seed.random() < p:
+            G.add_edge(*e)
+    return G
+
+
+# add some aliases to common names
 binomial_graph = gnp_random_graph
 erdos_renyi_graph = gnp_random_graph

@@ -145,7 +209,30 @@ def dense_gnm_random_graph(n, m, seed=None):
     .. [1] Donald E. Knuth, The Art of Computer Programming,
         Volume 2/Seminumerical algorithms, Third Edition, Addison-Wesley, 1997.
     """
-    pass
+    mmax = n * (n - 1) // 2
+    if m >= mmax:
+        G = complete_graph(n)
+    else:
+        G = empty_graph(n)
+
+    if n == 1 or m >= mmax:
+        return G
+
+    u = 0
+    v = 1
+    t = 0
+    k = 0
+    while True:
+        if seed.randrange(mmax - t) < m - k:
+            G.add_edge(u, v)
+            k += 1
+            if k == m:
+                return G
+        t += 1
+        v += 1
+        if v == n:  # go to next row of adjacency matrix
+            u += 1
+            v = u + 1


 @py_random_state(2)
@@ -176,7 +263,32 @@ def gnm_random_graph(n, m, seed=None, directed=False):
     dense_gnm_random_graph

     """
-    pass
+    if directed:
+        G = nx.DiGraph()
+    else:
+        G = nx.Graph()
+    G.add_nodes_from(range(n))
+
+    if n == 1:
+        return G
+    max_edges = n * (n - 1)
+    if not directed:
+        max_edges /= 2.0
+    if m >= max_edges:
+        return complete_graph(n, create_using=G)
+
+    nlist = list(G)
+    edge_count = 0
+    while edge_count < m:
+        # generate random edge,u,v
+        u = seed.choice(nlist)
+        v = seed.choice(nlist)
+        if u == v or G.has_edge(u, v):
+            continue
+        else:
+            G.add_edge(u, v)
+            edge_count = edge_count + 1
+    return G


 @py_random_state(3)
@@ -218,7 +330,36 @@ def newman_watts_strogatz_graph(n, k, p, seed=None):
        Physics Letters A, 263, 341, 1999.
        https://doi.org/10.1016/S0375-9601(99)00757-4
     """
-    pass
+    if k > n:
+        raise nx.NetworkXError("k>=n, choose smaller k or larger n")
+
+    # If k == n the graph return is a complete graph
+    if k == n:
+        return nx.complete_graph(n)
+
+    G = empty_graph(n)
+    nlist = list(G.nodes())
+    fromv = nlist
+    # connect the k/2 neighbors
+    for j in range(1, k // 2 + 1):
+        tov = fromv[j:] + fromv[0:j]  # the first j are now last
+        for i in range(len(fromv)):
+            G.add_edge(fromv[i], tov[i])
+    # for each edge u-v, with probability p, randomly select existing
+    # node w and add new edge u-w
+    e = list(G.edges())
+    for u, v in e:
+        if seed.random() < p:
+            w = seed.choice(nlist)
+            # no self-loops and reject if edge u-w exists
+            # is that the correct NWS model?
+            while w == u or G.has_edge(u, w):
+                w = seed.choice(nlist)
+                if G.degree(u) >= n - 1:
+                    break  # skip this rewiring
+            else:
+                G.add_edge(u, w)
+    return G


 @py_random_state(3)
@@ -263,7 +404,37 @@ def watts_strogatz_graph(n, k, p, seed=None):
        Collective dynamics of small-world networks,
        Nature, 393, pp. 440--442, 1998.
     """
-    pass
+    if k > n:
+        raise nx.NetworkXError("k>n, choose smaller k or larger n")
+
+    # If k == n, the graph is complete not Watts-Strogatz
+    if k == n:
+        return nx.complete_graph(n)
+
+    G = nx.Graph()
+    nodes = list(range(n))  # nodes are labeled 0 to n-1
+    # connect each node to k/2 neighbors
+    for j in range(1, k // 2 + 1):
+        targets = nodes[j:] + nodes[0:j]  # first j nodes are now last in list
+        G.add_edges_from(zip(nodes, targets))
+    # rewire edges from each node
+    # loop over all nodes in order (label) and neighbors in order (distance)
+    # no self loops or multiple edges allowed
+    for j in range(1, k // 2 + 1):  # outer loop is neighbors
+        targets = nodes[j:] + nodes[0:j]  # first j nodes are now last in list
+        # inner loop in node order
+        for u, v in zip(nodes, targets):
+            if seed.random() < p:
+                w = seed.choice(nodes)
+                # Enforce no self-loops or multiple edges
+                while w == u or G.has_edge(u, w):
+                    w = seed.choice(nodes)
+                    if G.degree(u) >= n - 1:
+                        break  # skip this rewiring
+                else:
+                    G.remove_edge(u, v)
+                    G.add_edge(u, w)
+    return G


 @py_random_state(4)
@@ -311,13 +482,18 @@ def connected_watts_strogatz_graph(n, k, p, tries=100, seed=None):
        Collective dynamics of small-world networks,
        Nature, 393, pp. 440--442, 1998.
     """
-    pass
+    for i in range(tries):
+        # seed is an RNG so should change sequence each call
+        G = watts_strogatz_graph(n, k, p, seed)
+        if nx.is_connected(G):
+            return G
+    raise nx.NetworkXError("Maximum number of tries exceeded")


 @py_random_state(2)
 @nx._dispatchable(graphs=None, returns_graph=True)
 def random_regular_graph(d, n, seed=None):
-    """Returns a random $d$-regular graph on $n$ nodes.
+    r"""Returns a random $d$-regular graph on $n$ nodes.

     A regular graph is a graph where each node has the same number of neighbors.

@@ -328,7 +504,7 @@ def random_regular_graph(d, n, seed=None):
     d : int
       The degree of each node.
     n : integer
-      The number of nodes. The value of $n \\times d$ must be even.
+      The number of nodes. The value of $n \times d$ must be even.
     seed : integer, random_state, or None (default)
         Indicator of random number generation state.
         See :ref:`Randomness<randomness>`.
@@ -339,13 +515,13 @@ def random_regular_graph(d, n, seed=None):

     Kim and Vu's paper [2]_ shows that this algorithm samples in an
     asymptotically uniform way from the space of random graphs when
-    $d = O(n^{1 / 3 - \\epsilon})$.
+    $d = O(n^{1 / 3 - \epsilon})$.

     Raises
     ------

     NetworkXError
-        If $n \\times d$ is odd or $d$ is greater than or equal to $n$.
+        If $n \times d$ is odd or $d$ is greater than or equal to $n$.

     References
     ----------
@@ -360,7 +536,74 @@ def random_regular_graph(d, n, seed=None):
        San Diego, CA, USA, pp 213--222, 2003.
        http://portal.acm.org/citation.cfm?id=780542.780576
     """
-    pass
+    if (n * d) % 2 != 0:
+        raise nx.NetworkXError("n * d must be even")
+
+    if not 0 <= d < n:
+        raise nx.NetworkXError("the 0 <= d < n inequality must be satisfied")
+
+    if d == 0:
+        return empty_graph(n)
+
+    def _suitable(edges, potential_edges):
+        # Helper subroutine to check if there are suitable edges remaining
+        # If False, the generation of the graph has failed
+        if not potential_edges:
+            return True
+        for s1 in potential_edges:
+            for s2 in potential_edges:
+                # Two iterators on the same dictionary are guaranteed
+                # to visit it in the same order if there are no
+                # intervening modifications.
+                if s1 == s2:
+                    # Only need to consider s1-s2 pair one time
+                    break
+                if s1 > s2:
+                    s1, s2 = s2, s1
+                if (s1, s2) not in edges:
+                    return True
+        return False
+
+    def _try_creation():
+        # Attempt to create an edge set
+
+        edges = set()
+        stubs = list(range(n)) * d
+
+        while stubs:
+            potential_edges = defaultdict(lambda: 0)
+            seed.shuffle(stubs)
+            stubiter = iter(stubs)
+            for s1, s2 in zip(stubiter, stubiter):
+                if s1 > s2:
+                    s1, s2 = s2, s1
+                if s1 != s2 and ((s1, s2) not in edges):
+                    edges.add((s1, s2))
+                else:
+                    potential_edges[s1] += 1
+                    potential_edges[s2] += 1
+
+            if not _suitable(edges, potential_edges):
+                return None  # failed to find suitable edge set
+
+            stubs = [
+                node
+                for node, potential in potential_edges.items()
+                for _ in range(potential)
+            ]
+        return edges
+
+    # Even though a suitable edge set exists,
+    # the generation of such a set is not guaranteed.
+    # Try repeatedly to find one.
+    edges = _try_creation()
+    while edges is None:
+        edges = _try_creation()
+
+    G = nx.Graph()
+    G.add_edges_from(edges)
+
+    return G


 def _random_subset(seq, m, rng):
@@ -371,7 +614,11 @@ def _random_subset(seq, m, rng):

     Note: rng is a random.Random or numpy.random.RandomState instance.
     """
-    pass
+    targets = set()
+    while len(targets) < m:
+        x = rng.choice(seq)
+        targets.add(x)
+    return targets


 @py_random_state(2)
@@ -412,7 +659,39 @@ def barabasi_albert_graph(n, m, seed=None, initial_graph=None):
     .. [1] A. L. Barabási and R. Albert "Emergence of scaling in
        random networks", Science 286, pp 509-512, 1999.
     """
-    pass
+
+    if m < 1 or m >= n:
+        raise nx.NetworkXError(
+            f"Barabási–Albert network must have m >= 1 and m < n, m = {m}, n = {n}"
+        )
+
+    if initial_graph is None:
+        # Default initial graph : star graph on (m + 1) nodes
+        G = star_graph(m)
+    else:
+        if len(initial_graph) < m or len(initial_graph) > n:
+            raise nx.NetworkXError(
+                f"Barabási–Albert initial graph needs between m={m} and n={n} nodes"
+            )
+        G = initial_graph.copy()
+
+    # List of existing nodes, with nodes repeated once for each adjacent edge
+    repeated_nodes = [n for n, d in G.degree() for _ in range(d)]
+    # Start adding the other n - m0 nodes.
+    source = len(G)
+    while source < n:
+        # Now choose m unique nodes from the existing nodes
+        # Pick uniformly from repeated_nodes (preferential attachment)
+        targets = _random_subset(repeated_nodes, m, seed)
+        # Add edges to m nodes from the source.
+        G.add_edges_from(zip([source] * m, targets))
+        # Add one node to the list for each new edge just created.
+        repeated_nodes.extend(targets)
+        # And the new node "source" has m edges to add to the list.
+        repeated_nodes.extend([source] * m)
+
+        source += 1
+    return G


 @py_random_state(4)
@@ -458,7 +737,61 @@ def dual_barabasi_albert_graph(n, m1, m2, p, seed=None, initial_graph=None):
     ----------
     .. [1] N. Moshiri "The dual-Barabasi-Albert model", arXiv:1810.10538.
     """
-    pass
+
+    if m1 < 1 or m1 >= n:
+        raise nx.NetworkXError(
+            f"Dual Barabási–Albert must have m1 >= 1 and m1 < n, m1 = {m1}, n = {n}"
+        )
+    if m2 < 1 or m2 >= n:
+        raise nx.NetworkXError(
+            f"Dual Barabási–Albert must have m2 >= 1 and m2 < n, m2 = {m2}, n = {n}"
+        )
+    if p < 0 or p > 1:
+        raise nx.NetworkXError(
+            f"Dual Barabási–Albert network must have 0 <= p <= 1, p = {p}"
+        )
+
+    # For simplicity, if p == 0 or 1, just return BA
+    if p == 1:
+        return barabasi_albert_graph(n, m1, seed)
+    elif p == 0:
+        return barabasi_albert_graph(n, m2, seed)
+
+    if initial_graph is None:
+        # Default initial graph : empty graph on max(m1, m2) nodes
+        G = star_graph(max(m1, m2))
+    else:
+        if len(initial_graph) < max(m1, m2) or len(initial_graph) > n:
+            raise nx.NetworkXError(
+                f"Barabási–Albert initial graph must have between "
+                f"max(m1, m2) = {max(m1, m2)} and n = {n} nodes"
+            )
+        G = initial_graph.copy()
+
+    # Target nodes for new edges
+    targets = list(G)
+    # List of existing nodes, with nodes repeated once for each adjacent edge
+    repeated_nodes = [n for n, d in G.degree() for _ in range(d)]
+    # Start adding the remaining nodes.
+    source = len(G)
+    while source < n:
+        # Pick which m to use (m1 or m2)
+        if seed.random() < p:
+            m = m1
+        else:
+            m = m2
+        # Now choose m unique nodes from the existing nodes
+        # Pick uniformly from repeated_nodes (preferential attachment)
+        targets = _random_subset(repeated_nodes, m, seed)
+        # Add edges to m nodes from the source.
+        G.add_edges_from(zip([source] * m, targets))
+        # Add one node to the list for each new edge just created.
+        repeated_nodes.extend(targets)
+        # And the new node "source" has m edges to add to the list.
+        repeated_nodes.extend([source] * m)
+
+        source += 1
+    return G


 @py_random_state(4)
@@ -511,7 +844,115 @@ def extended_barabasi_albert_graph(n, m, p, q, seed=None):
        Topology of evolving networks: local events and universality
        Physical review letters, 85(24), 5234.
     """
-    pass
+    if m < 1 or m >= n:
+        msg = f"Extended Barabasi-Albert network needs m>=1 and m<n, m={m}, n={n}"
+        raise nx.NetworkXError(msg)
+    if p + q >= 1:
+        msg = f"Extended Barabasi-Albert network needs p + q <= 1, p={p}, q={q}"
+        raise nx.NetworkXError(msg)
+
+    # Add m initial nodes (m0 in barabasi-speak)
+    G = empty_graph(m)
+
+    # List of nodes to represent the preferential attachment random selection.
+    # At the creation of the graph, all nodes are added to the list
+    # so that even nodes that are not connected have a chance to get selected,
+    # for rewiring and adding of edges.
+    # With each new edge, nodes at the ends of the edge are added to the list.
+    attachment_preference = []
+    attachment_preference.extend(range(m))
+
+    # Start adding the other n-m nodes. The first node is m.
+    new_node = m
+    while new_node < n:
+        a_probability = seed.random()
+
+        # Total number of edges of a Clique of all the nodes
+        clique_degree = len(G) - 1
+        clique_size = (len(G) * clique_degree) / 2
+
+        # Adding m new edges, if there is room to add them
+        if a_probability < p and G.size() <= clique_size - m:
+            # Select the nodes where an edge can be added
+            eligible_nodes = [nd for nd, deg in G.degree() if deg < clique_degree]
+            for i in range(m):
+                # Choosing a random source node from eligible_nodes
+                src_node = seed.choice(eligible_nodes)
+
+                # Picking a possible node that is not 'src_node' or
+                # neighbor with 'src_node', with preferential attachment
+                prohibited_nodes = list(G[src_node])
+                prohibited_nodes.append(src_node)
+                # This will raise an exception if the sequence is empty
+                dest_node = seed.choice(
+                    [nd for nd in attachment_preference if nd not in prohibited_nodes]
+                )
+                # Adding the new edge
+                G.add_edge(src_node, dest_node)
+
+                # Appending both nodes to add to their preferential attachment
+                attachment_preference.append(src_node)
+                attachment_preference.append(dest_node)
+
+                # Adjusting the eligible nodes. Degree may be saturated.
+                if G.degree(src_node) == clique_degree:
+                    eligible_nodes.remove(src_node)
+                if G.degree(dest_node) == clique_degree and dest_node in eligible_nodes:
+                    eligible_nodes.remove(dest_node)
+
+        # Rewiring m edges, if there are enough edges
+        elif p <= a_probability < (p + q) and m <= G.size() < clique_size:
+            # Selecting nodes that have at least 1 edge but that are not
+            # fully connected to ALL other nodes (center of star).
+            # These nodes are the pivot nodes of the edges to rewire
+            eligible_nodes = [nd for nd, deg in G.degree() if 0 < deg < clique_degree]
+            for i in range(m):
+                # Choosing a random source node
+                node = seed.choice(eligible_nodes)
+
+                # The available nodes do have a neighbor at least.
+                nbr_nodes = list(G[node])
+
+                # Choosing the other end that will get detached
+                src_node = seed.choice(nbr_nodes)
+
+                # Picking a target node that is not 'node' or
+                # neighbor with 'node', with preferential attachment
+                nbr_nodes.append(node)
+                dest_node = seed.choice(
+                    [nd for nd in attachment_preference if nd not in nbr_nodes]
+                )
+                # Rewire
+                G.remove_edge(node, src_node)
+                G.add_edge(node, dest_node)
+
+                # Adjusting the preferential attachment list
+                attachment_preference.remove(src_node)
+                attachment_preference.append(dest_node)
+
+                # Adjusting the eligible nodes.
+                # nodes may be saturated or isolated.
+                if G.degree(src_node) == 0 and src_node in eligible_nodes:
+                    eligible_nodes.remove(src_node)
+                if dest_node in eligible_nodes:
+                    if G.degree(dest_node) == clique_degree:
+                        eligible_nodes.remove(dest_node)
+                else:
+                    if G.degree(dest_node) == 1:
+                        eligible_nodes.append(dest_node)
+
+        # Adding new node with m edges
+        else:
+            # Select the edges' nodes by preferential attachment
+            targets = _random_subset(attachment_preference, m, seed)
+            G.add_edges_from(zip([new_node] * m, targets))
+
+            # Add one node to the list for each new edge just created.
+            attachment_preference.extend(targets)
+            # The new node has m edges to it, plus itself: m + 1
+            attachment_preference.extend([new_node] * (m + 1))
+            new_node += 1
+    return G


 @py_random_state(3)
@@ -562,7 +1003,46 @@ def powerlaw_cluster_graph(n, m, p, seed=None):
        "Growing scale-free networks with tunable clustering",
        Phys. Rev. E, 65, 026107, 2002.
     """
-    pass
+
+    if m < 1 or n < m:
+        raise nx.NetworkXError(f"NetworkXError must have m>1 and m<n, m={m},n={n}")
+
+    if p > 1 or p < 0:
+        raise nx.NetworkXError(f"NetworkXError p must be in [0,1], p={p}")
+
+    G = empty_graph(m)  # add m initial nodes (m0 in barabasi-speak)
+    repeated_nodes = list(G.nodes())  # list of existing nodes to sample from
+    # with nodes repeated once for each adjacent edge
+    source = m  # next node is m
+    while source < n:  # Now add the other n-1 nodes
+        possible_targets = _random_subset(repeated_nodes, m, seed)
+        # do one preferential attachment for new node
+        target = possible_targets.pop()
+        G.add_edge(source, target)
+        repeated_nodes.append(target)  # add one node to list for each new link
+        count = 1
+        while count < m:  # add m-1 more new links
+            if seed.random() < p:  # clustering step: add triangle
+                neighborhood = [
+                    nbr
+                    for nbr in G.neighbors(target)
+                    if not G.has_edge(source, nbr) and nbr != source
+                ]
+                if neighborhood:  # if there is a neighbor without a link
+                    nbr = seed.choice(neighborhood)
+                    G.add_edge(source, nbr)  # add triangle
+                    repeated_nodes.append(nbr)
+                    count = count + 1
+                    continue  # go to top of while loop
+            # else do preferential attachment step if above fails
+            target = possible_targets.pop()
+            G.add_edge(source, target)
+            repeated_nodes.append(target)
+            count = count + 1
+
+        repeated_nodes.extend([source] * m)  # add source node to list m times
+        source += 1
+    return G


 @py_random_state(3)
@@ -596,7 +1076,24 @@ def random_lobster(n, p1, p2, seed=None):
     NetworkXError
         If `p1` or `p2` parameters are >= 1 because the while loops would never finish.
     """
-    pass
+    p1, p2 = abs(p1), abs(p2)
+    if any(p >= 1 for p in [p1, p2]):
+        raise nx.NetworkXError("Probability values for `p1` and `p2` must both be < 1.")
+
+    # a necessary ingredient in any self-respecting graph library
+    llen = int(2 * seed.random() * n + 0.5)
+    L = path_graph(llen)
+    # build caterpillar: add edges to path graph with probability p1
+    current_node = llen - 1
+    for n in range(llen):
+        while seed.random() < p1:  # add fuzzy caterpillar parts
+            current_node += 1
+            L.add_edge(n, current_node)
+            cat_node = current_node
+            while seed.random() < p2:  # add crunchy lobster bits
+                current_node += 1
+                L.add_edge(cat_node, current_node)
+    return L  # voila, un lobster!


 @py_random_state(1)
@@ -624,7 +1121,37 @@ def random_shell_graph(constructor, seed=None):
     >>> G = nx.random_shell_graph(constructor)

     """
-    pass
+    G = empty_graph(0)
+
+    glist = []
+    intra_edges = []
+    nnodes = 0
+    # create gnm graphs for each shell
+    for n, m, d in constructor:
+        inter_edges = int(m * d)
+        intra_edges.append(m - inter_edges)
+        g = nx.convert_node_labels_to_integers(
+            gnm_random_graph(n, inter_edges, seed=seed), first_label=nnodes
+        )
+        glist.append(g)
+        nnodes += n
+        G = nx.operators.union(G, g)
+
+    # connect the shells randomly
+    for gi in range(len(glist) - 1):
+        nlist1 = list(glist[gi])
+        nlist2 = list(glist[gi + 1])
+        total_edges = intra_edges[gi]
+        edge_count = 0
+        while edge_count < total_edges:
+            u = seed.choice(nlist1)
+            v = seed.choice(nlist2)
+            if u == v or G.has_edge(u, v):
+                continue
+            else:
+                G.add_edge(u, v)
+                edge_count = edge_count + 1
+    return G


 @py_random_state(2)
@@ -658,7 +1185,10 @@ def random_powerlaw_tree(n, gamma=3, seed=None, tries=100):
     edges is one smaller than the number of nodes).

     """
-    pass
+    # This call may raise a NetworkXError if the number of tries is succeeded.
+    seq = random_powerlaw_tree_sequence(n, gamma=gamma, seed=seed, tries=tries)
+    G = degree_sequence_tree(seq)
+    return G


 @py_random_state(2)
@@ -692,17 +1222,40 @@ def random_powerlaw_tree_sequence(n, gamma=3, seed=None, tries=100):
     edges is one smaller than the number of nodes).

     """
-    pass
+    # get trial sequence
+    z = nx.utils.powerlaw_sequence(n, exponent=gamma, seed=seed)
+    # round to integer values in the range [0,n]
+    zseq = [min(n, max(round(s), 0)) for s in z]
+
+    # another sequence to swap values from
+    z = nx.utils.powerlaw_sequence(tries, exponent=gamma, seed=seed)
+    # round to integer values in the range [0,n]
+    swap = [min(n, max(round(s), 0)) for s in z]
+
+    for deg in swap:
+        # If this degree sequence can be the degree sequence of a tree, return
+        # it. It can be a tree if the number of edges is one fewer than the
+        # number of nodes, or in other words, `n - sum(zseq) / 2 == 1`. We
+        # use an equivalent condition below that avoids floating point
+        # operations.
+        if 2 * n - sum(zseq) == 2:
+            return zseq
+        index = seed.randint(0, n - 1)
+        zseq[index] = swap.pop()
+
+    raise nx.NetworkXError(
+        f"Exceeded max ({tries}) attempts for a valid tree sequence."
+    )


 @py_random_state(3)
 @nx._dispatchable(graphs=None, returns_graph=True)
 def random_kernel_graph(n, kernel_integral, kernel_root=None, seed=None):
-    """Returns an random graph based on the specified kernel.
+    r"""Returns an random graph based on the specified kernel.

     The algorithm chooses each of the $[n(n-1)]/2$ possible edges with
-    probability specified by a kernel $\\kappa(x,y)$ [1]_.  The kernel
-    $\\kappa(x,y)$ must be a symmetric (in $x,y$), non-negative,
+    probability specified by a kernel $\kappa(x,y)$ [1]_.  The kernel
+    $\kappa(x,y)$ must be a symmetric (in $x,y$), non-negative,
     bounded function.

     Parameters
@@ -710,8 +1263,8 @@ def random_kernel_graph(n, kernel_integral, kernel_root=None, seed=None):
     n : int
         The number of nodes
     kernel_integral : function
-        Function that returns the definite integral of the kernel $\\kappa(x,y)$,
-        $F(y,a,b) := \\int_a^b \\kappa(x,y)dx$
+        Function that returns the definite integral of the kernel $\kappa(x,y)$,
+        $F(y,a,b) := \int_a^b \kappa(x,y)dx$
     kernel_root: function (optional)
         Function that returns the root $b$ of the equation $F(y,a,b) = r$.
         If None, the root is found using :func:`scipy.optimize.brentq`
@@ -732,7 +1285,7 @@ def random_kernel_graph(n, kernel_integral, kernel_root=None, seed=None):
     Examples
     --------
     Generate an Erdős–Rényi random graph $G(n,c/n)$, with kernel
-    $\\kappa(x,y)=c$ where $c$ is the mean expected degree.
+    $\kappa(x,y)=c$ where $c$ is the mean expected degree.

     >>> def integral(u, w, z):
     ...     return c * (z - w)
@@ -756,4 +1309,23 @@ def random_kernel_graph(n, kernel_integral, kernel_root=None, seed=None):
        "Fast Generation of Sparse Random Kernel Graphs".
        PLoS ONE 10(9): e0135177, 2015. doi:10.1371/journal.pone.0135177
     """
-    pass
+    if kernel_root is None:
+        import scipy as sp
+
+        def kernel_root(y, a, r):
+            def my_function(b):
+                return kernel_integral(y, a, b) - r
+
+            return sp.optimize.brentq(my_function, a, 1)
+
+    graph = nx.Graph()
+    graph.add_nodes_from(range(n))
+    (i, j) = (1, 1)
+    while i < n:
+        r = -math.log(1 - seed.random())  # (1-seed.random()) in (0, 1]
+        if kernel_integral(i / n, j / n, 1) <= r:
+            i, j = i + 1, i + 1
+        else:
+            j = math.ceil(n * kernel_root(i / n, j / n, r))
+            graph.add_edge(i - 1, j - 1)
+    return graph
diff --git a/networkx/generators/small.py b/networkx/generators/small.py
index a019a4826..acd2fbc7a 100644
--- a/networkx/generators/small.py
+++ b/networkx/generators/small.py
@@ -2,18 +2,43 @@
 Various small and named graphs, together with some compact generators.

 """
-__all__ = ['LCF_graph', 'bull_graph', 'chvatal_graph', 'cubical_graph',
-    'desargues_graph', 'diamond_graph', 'dodecahedral_graph',
-    'frucht_graph', 'heawood_graph', 'hoffman_singleton_graph',
-    'house_graph', 'house_x_graph', 'icosahedral_graph',
-    'krackhardt_kite_graph', 'moebius_kantor_graph', 'octahedral_graph',
-    'pappus_graph', 'petersen_graph', 'sedgewick_maze_graph',
-    'tetrahedral_graph', 'truncated_cube_graph',
-    'truncated_tetrahedron_graph', 'tutte_graph']
+
+__all__ = [
+    "LCF_graph",
+    "bull_graph",
+    "chvatal_graph",
+    "cubical_graph",
+    "desargues_graph",
+    "diamond_graph",
+    "dodecahedral_graph",
+    "frucht_graph",
+    "heawood_graph",
+    "hoffman_singleton_graph",
+    "house_graph",
+    "house_x_graph",
+    "icosahedral_graph",
+    "krackhardt_kite_graph",
+    "moebius_kantor_graph",
+    "octahedral_graph",
+    "pappus_graph",
+    "petersen_graph",
+    "sedgewick_maze_graph",
+    "tetrahedral_graph",
+    "truncated_cube_graph",
+    "truncated_tetrahedron_graph",
+    "tutte_graph",
+]
+
 from functools import wraps
+
 import networkx as nx
 from networkx.exception import NetworkXError
-from networkx.generators.classic import complete_graph, cycle_graph, empty_graph, path_graph
+from networkx.generators.classic import (
+    complete_graph,
+    cycle_graph,
+    empty_graph,
+    path_graph,
+)


 def _raise_on_directed(func):
@@ -22,7 +47,16 @@ def _raise_on_directed(func):
     NetworkX exception when `create_using` is a DiGraph (class or instance) for
     graph generators that do not support directed outputs.
     """
-    pass
+
+    @wraps(func)
+    def wrapper(*args, **kwargs):
+        if kwargs.get("create_using") is not None:
+            G = nx.empty_graph(create_using=kwargs["create_using"])
+            if G.is_directed():
+                raise NetworkXError("Directed Graph not supported")
+        return func(*args, **kwargs)
+
+    return wrapper


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -77,7 +111,33 @@ def LCF_graph(n, shift_list, repeats, create_using=None):
     .. [1] https://en.wikipedia.org/wiki/LCF_notation

     """
-    pass
+    if n <= 0:
+        return empty_graph(0, create_using)
+
+    # start with the n-cycle
+    G = cycle_graph(n, create_using)
+    if G.is_directed():
+        raise NetworkXError("Directed Graph not supported")
+    G.name = "LCF_graph"
+    nodes = sorted(G)
+
+    n_extra_edges = repeats * len(shift_list)
+    # edges are added n_extra_edges times
+    # (not all of these need be new)
+    if n_extra_edges < 1:
+        return G
+
+    for i in range(n_extra_edges):
+        shift = shift_list[i % len(shift_list)]  # cycle through shift_list
+        v1 = nodes[i % n]  # cycle repeatedly through nodes
+        v2 = nodes[(i + shift) % n]
+        G.add_edge(v1, v2)
+    return G
+
+
+# -------------------------------------------------------------------------------
+#   Various small and named graphs
+# -------------------------------------------------------------------------------


 @_raise_on_directed
@@ -106,7 +166,12 @@ def bull_graph(create_using=None):
     .. [1] https://en.wikipedia.org/wiki/Bull_graph.

     """
-    pass
+    G = nx.from_dict_of_lists(
+        {0: [1, 2], 1: [0, 2, 3], 2: [0, 1, 4], 3: [1], 4: [2]},
+        create_using=create_using,
+    )
+    G.name = "Bull Graph"
+    return G


 @_raise_on_directed
@@ -135,7 +200,23 @@ def chvatal_graph(create_using=None):
     .. [2] https://mathworld.wolfram.com/ChvatalGraph.html

     """
-    pass
+    G = nx.from_dict_of_lists(
+        {
+            0: [1, 4, 6, 9],
+            1: [2, 5, 7],
+            2: [3, 6, 8],
+            3: [4, 7, 9],
+            4: [5, 8],
+            5: [10, 11],
+            6: [10, 11],
+            7: [8, 11],
+            8: [10],
+            9: [10, 11],
+        },
+        create_using=create_using,
+    )
+    G.name = "Chvatal Graph"
+    return G


 @_raise_on_directed
@@ -165,7 +246,21 @@ def cubical_graph(create_using=None):
     .. [1] https://en.wikipedia.org/wiki/Cube#Cubical_graph

     """
-    pass
+    G = nx.from_dict_of_lists(
+        {
+            0: [1, 3, 4],
+            1: [0, 2, 7],
+            2: [1, 3, 6],
+            3: [0, 2, 5],
+            4: [0, 5, 7],
+            5: [3, 4, 6],
+            6: [2, 5, 7],
+            7: [1, 4, 6],
+        },
+        create_using=create_using,
+    )
+    G.name = "Platonic Cubical Graph"
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -193,7 +288,9 @@ def desargues_graph(create_using=None):
     .. [1] https://en.wikipedia.org/wiki/Desargues_graph
     .. [2] https://mathworld.wolfram.com/DesarguesGraph.html
     """
-    pass
+    G = LCF_graph(20, [5, -5, 9, -9], 5, create_using)
+    G.name = "Desargues Graph"
+    return G


 @_raise_on_directed
@@ -219,7 +316,11 @@ def diamond_graph(create_using=None):
     ----------
     .. [1] https://mathworld.wolfram.com/DiamondGraph.html
     """
-    pass
+    G = nx.from_dict_of_lists(
+        {0: [1, 2], 1: [0, 2, 3], 2: [0, 1, 3], 3: [1, 2]}, create_using=create_using
+    )
+    G.name = "Diamond Graph"
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -248,7 +349,9 @@ def dodecahedral_graph(create_using=None):
     .. [2] https://mathworld.wolfram.com/DodecahedralGraph.html

     """
-    pass
+    G = LCF_graph(20, [10, 7, 4, -4, -7, 10, -4, 7, -7, 4], 2, create_using)
+    G.name = "Dodecahedral Graph"
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -277,7 +380,25 @@ def frucht_graph(create_using=None):
     .. [2] https://mathworld.wolfram.com/FruchtGraph.html

     """
-    pass
+    G = cycle_graph(7, create_using)
+    G.add_edges_from(
+        [
+            [0, 7],
+            [1, 7],
+            [2, 8],
+            [3, 9],
+            [4, 9],
+            [5, 10],
+            [6, 10],
+            [7, 11],
+            [8, 11],
+            [8, 9],
+            [10, 11],
+        ]
+    )
+
+    G.name = "Frucht Graph"
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -309,7 +430,9 @@ def heawood_graph(create_using=None):
     .. [3] https://www.win.tue.nl/~aeb/graphs/Heawood.html

     """
-    pass
+    G = LCF_graph(14, [5, -5], 7, create_using)
+    G.name = "Heawood Graph"
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -341,7 +464,18 @@ def hoffman_singleton_graph():
     .. [3] https://en.wikipedia.org/wiki/Hoffman%E2%80%93Singleton_graph

     """
-    pass
+    G = nx.Graph()
+    for i in range(5):
+        for j in range(5):
+            G.add_edge(("pentagon", i, j), ("pentagon", i, (j - 1) % 5))
+            G.add_edge(("pentagon", i, j), ("pentagon", i, (j + 1) % 5))
+            G.add_edge(("pentagram", i, j), ("pentagram", i, (j - 2) % 5))
+            G.add_edge(("pentagram", i, j), ("pentagram", i, (j + 2) % 5))
+            for k in range(5):
+                G.add_edge(("pentagon", i, j), ("pentagram", k, (i * k + j) % 5))
+    G = nx.convert_node_labels_to_integers(G)
+    G.name = "Hoffman-Singleton Graph"
+    return G


 @_raise_on_directed
@@ -367,7 +501,12 @@ def house_graph(create_using=None):
     ----------
     .. [1] https://mathworld.wolfram.com/HouseGraph.html
     """
-    pass
+    G = nx.from_dict_of_lists(
+        {0: [1, 2], 1: [0, 3], 2: [0, 3, 4], 3: [1, 2, 4], 4: [2, 3]},
+        create_using=create_using,
+    )
+    G.name = "House Graph"
+    return G


 @_raise_on_directed
@@ -394,7 +533,10 @@ def house_x_graph(create_using=None):
     ----------
     .. [1] https://mathworld.wolfram.com/HouseGraph.html
     """
-    pass
+    G = house_graph(create_using)
+    G.add_edges_from([(0, 3), (1, 2)])
+    G.name = "House-with-X-inside Graph"
+    return G


 @_raise_on_directed
@@ -421,7 +563,23 @@ def icosahedral_graph(create_using=None):
     ----------
     .. [1] https://mathworld.wolfram.com/IcosahedralGraph.html
     """
-    pass
+    G = nx.from_dict_of_lists(
+        {
+            0: [1, 5, 7, 8, 11],
+            1: [2, 5, 6, 8],
+            2: [3, 6, 8, 9],
+            3: [4, 6, 9, 10],
+            4: [5, 6, 10, 11],
+            5: [6, 11],
+            7: [8, 9, 10, 11],
+            8: [9],
+            9: [10],
+            10: [11],
+        },
+        create_using=create_using,
+    )
+    G.name = "Platonic Icosahedral Graph"
+    return G


 @_raise_on_directed
@@ -456,7 +614,23 @@ def krackhardt_kite_graph(create_using=None):
        35 (2): 342–369. doi:10.2307/2393394. JSTOR 2393394. June 1990.

     """
-    pass
+    G = nx.from_dict_of_lists(
+        {
+            0: [1, 2, 3, 5],
+            1: [0, 3, 4, 6],
+            2: [0, 3, 5],
+            3: [0, 1, 2, 4, 5, 6],
+            4: [1, 3, 6],
+            5: [0, 2, 3, 6, 7],
+            6: [1, 3, 4, 5, 7],
+            7: [5, 6, 8],
+            8: [7, 9],
+            9: [8],
+        },
+        create_using=create_using,
+    )
+    G.name = "Krackhardt Kite Social Network"
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -483,7 +657,9 @@ def moebius_kantor_graph(create_using=None):
     .. [1] https://en.wikipedia.org/wiki/M%C3%B6bius%E2%80%93Kantor_graph

     """
-    pass
+    G = LCF_graph(16, [5, -5], 8, create_using)
+    G.name = "Moebius-Kantor Graph"
+    return G


 @_raise_on_directed
@@ -514,7 +690,12 @@ def octahedral_graph(create_using=None):
     .. [2] https://en.wikipedia.org/wiki/Tur%C3%A1n_graph#Special_cases

     """
-    pass
+    G = nx.from_dict_of_lists(
+        {0: [1, 2, 3, 4], 1: [2, 3, 5], 2: [4, 5], 3: [4, 5], 4: [5]},
+        create_using=create_using,
+    )
+    G.name = "Platonic Octahedral Graph"
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -535,7 +716,9 @@ def pappus_graph():
     ----------
     .. [1] https://en.wikipedia.org/wiki/Pappus_graph
     """
-    pass
+    G = LCF_graph(18, [5, 7, -7, 7, -7, -5], 3)
+    G.name = "Pappus Graph"
+    return G


 @_raise_on_directed
@@ -564,7 +747,23 @@ def petersen_graph(create_using=None):
     .. [1] https://en.wikipedia.org/wiki/Petersen_graph
     .. [2] https://www.win.tue.nl/~aeb/drg/graphs/Petersen.html
     """
-    pass
+    G = nx.from_dict_of_lists(
+        {
+            0: [1, 4, 5],
+            1: [0, 2, 6],
+            2: [1, 3, 7],
+            3: [2, 4, 8],
+            4: [3, 0, 9],
+            5: [0, 7, 8],
+            6: [1, 8, 9],
+            7: [2, 5, 9],
+            8: [3, 5, 6],
+            9: [4, 6, 7],
+        },
+        create_using=create_using,
+    )
+    G.name = "Petersen Graph"
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -590,7 +789,14 @@ def sedgewick_maze_graph(create_using=None):
     ----------
     .. [1] Figure 18.2, Chapter 18, Graph Algorithms (3rd Ed), Sedgewick
     """
-    pass
+    G = empty_graph(0, create_using)
+    G.add_nodes_from(range(8))
+    G.add_edges_from([[0, 2], [0, 7], [0, 5]])
+    G.add_edges_from([[1, 7], [2, 6]])
+    G.add_edges_from([[3, 4], [3, 5]])
+    G.add_edges_from([[4, 5], [4, 7], [4, 6]])
+    G.name = "Sedgewick Maze"
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -617,7 +823,9 @@ def tetrahedral_graph(create_using=None):
     .. [1] https://en.wikipedia.org/wiki/Tetrahedron#Tetrahedral_graph

     """
-    pass
+    G = complete_graph(4, create_using)
+    G.name = "Platonic Tetrahedral Graph"
+    return G


 @_raise_on_directed
@@ -647,7 +855,36 @@ def truncated_cube_graph(create_using=None):
     .. [2] https://www.coolmath.com/reference/polyhedra-truncated-cube

     """
-    pass
+    G = nx.from_dict_of_lists(
+        {
+            0: [1, 2, 4],
+            1: [11, 14],
+            2: [3, 4],
+            3: [6, 8],
+            4: [5],
+            5: [16, 18],
+            6: [7, 8],
+            7: [10, 12],
+            8: [9],
+            9: [17, 20],
+            10: [11, 12],
+            11: [14],
+            12: [13],
+            13: [21, 22],
+            14: [15],
+            15: [19, 23],
+            16: [17, 18],
+            17: [20],
+            18: [19],
+            19: [23],
+            20: [21],
+            21: [22],
+            22: [23],
+        },
+        create_using=create_using,
+    )
+    G.name = "Truncated Cube Graph"
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -674,7 +911,10 @@ def truncated_tetrahedron_graph(create_using=None):
     .. [1] https://en.wikipedia.org/wiki/Truncated_tetrahedron

     """
-    pass
+    G = path_graph(12, create_using)
+    G.add_edges_from([(0, 2), (0, 9), (1, 6), (3, 11), (4, 11), (5, 7), (8, 10)])
+    G.name = "Truncated Tetrahedron Graph"
+    return G


 @_raise_on_directed
@@ -704,4 +944,50 @@ def tutte_graph(create_using=None):
     ----------
     .. [1] https://en.wikipedia.org/wiki/Tutte_graph
     """
-    pass
+    G = nx.from_dict_of_lists(
+        {
+            0: [1, 2, 3],
+            1: [4, 26],
+            2: [10, 11],
+            3: [18, 19],
+            4: [5, 33],
+            5: [6, 29],
+            6: [7, 27],
+            7: [8, 14],
+            8: [9, 38],
+            9: [10, 37],
+            10: [39],
+            11: [12, 39],
+            12: [13, 35],
+            13: [14, 15],
+            14: [34],
+            15: [16, 22],
+            16: [17, 44],
+            17: [18, 43],
+            18: [45],
+            19: [20, 45],
+            20: [21, 41],
+            21: [22, 23],
+            22: [40],
+            23: [24, 27],
+            24: [25, 32],
+            25: [26, 31],
+            26: [33],
+            27: [28],
+            28: [29, 32],
+            29: [30],
+            30: [31, 33],
+            31: [32],
+            34: [35, 38],
+            35: [36],
+            36: [37, 39],
+            37: [38],
+            40: [41, 44],
+            41: [42],
+            42: [43, 45],
+            43: [44],
+        },
+        create_using=create_using,
+    )
+    G.name = "Tutte's Graph"
+    return G
diff --git a/networkx/generators/social.py b/networkx/generators/social.py
index b8c273f72..6f150e2fc 100644
--- a/networkx/generators/social.py
+++ b/networkx/generators/social.py
@@ -2,8 +2,13 @@
 Famous social networks.
 """
 import networkx as nx
-__all__ = ['karate_club_graph', 'davis_southern_women_graph',
-    'florentine_families_graph', 'les_miserables_graph']
+
+__all__ = [
+    "karate_club_graph",
+    "davis_southern_women_graph",
+    "florentine_families_graph",
+    "les_miserables_graph",
+]


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -31,7 +36,61 @@ def karate_club_graph():
        "An Information Flow Model for Conflict and Fission in Small Groups."
        *Journal of Anthropological Research*, 33, 452--473, (1977).
     """
-    pass
+    # Create the set of all members, and the members of each club.
+    all_members = set(range(34))
+    club1 = {0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 16, 17, 19, 21}
+    # club2 = all_members - club1
+
+    G = nx.Graph()
+    G.add_nodes_from(all_members)
+    G.name = "Zachary's Karate Club"
+
+    zacharydat = """\
+0 4 5 3 3 3 3 2 2 0 2 3 2 3 0 0 0 2 0 2 0 2 0 0 0 0 0 0 0 0 0 2 0 0
+4 0 6 3 0 0 0 4 0 0 0 0 0 5 0 0 0 1 0 2 0 2 0 0 0 0 0 0 0 0 2 0 0 0
+5 6 0 3 0 0 0 4 5 1 0 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 2 2 0 0 0 3 0
+3 3 3 0 0 0 0 3 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+3 0 0 0 0 0 2 0 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+3 0 0 0 0 0 5 0 0 0 3 0 0 0 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+3 0 0 0 2 5 0 0 0 0 0 0 0 0 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+2 4 4 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+2 0 5 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 0 4 3
+0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2
+2 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+1 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+3 5 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 2
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 4
+0 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+2 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 2
+2 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 1
+2 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 5 0 4 0 2 0 0 5 4
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 0 3 0 0 0 2 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 5 2 0 0 0 0 0 0 7 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 4 0 0 0 2
+0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 4 3 0 0 0 0 0 0 0 0 4
+0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 0 2
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 0 0 4 0 0 0 0 0 3 2
+0 2 0 0 0 0 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 3
+2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 7 0 0 2 0 0 0 4 4
+0 0 2 0 0 0 0 0 3 0 0 0 0 0 3 3 0 0 1 0 3 0 2 5 0 0 0 0 0 4 3 4 0 5
+0 0 0 0 0 0 0 0 4 2 0 0 0 3 2 4 0 0 2 1 1 0 3 4 0 0 2 4 2 2 3 4 5 0"""
+
+    for row, line in enumerate(zacharydat.split("\n")):
+        thisrow = [int(b) for b in line.split()]
+        for col, entry in enumerate(thisrow):
+            if entry >= 1:
+                G.add_edge(row, col, weight=entry)
+
+    # Add the name of each member's club as a node attribute.
+    for v in G:
+        G.nodes[v]["club"] = "Mr. Hi" if v in club1 else "Officer"
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -45,7 +104,144 @@ def davis_southern_women_graph():
     .. [1] A. Davis, Gardner, B. B., Gardner, M. R., 1941. Deep South.
         University of Chicago Press, Chicago, IL.
     """
-    pass
+    G = nx.Graph()
+    # Top nodes
+    women = [
+        "Evelyn Jefferson",
+        "Laura Mandeville",
+        "Theresa Anderson",
+        "Brenda Rogers",
+        "Charlotte McDowd",
+        "Frances Anderson",
+        "Eleanor Nye",
+        "Pearl Oglethorpe",
+        "Ruth DeSand",
+        "Verne Sanderson",
+        "Myra Liddel",
+        "Katherina Rogers",
+        "Sylvia Avondale",
+        "Nora Fayette",
+        "Helen Lloyd",
+        "Dorothy Murchison",
+        "Olivia Carleton",
+        "Flora Price",
+    ]
+    G.add_nodes_from(women, bipartite=0)
+    # Bottom nodes
+    events = [
+        "E1",
+        "E2",
+        "E3",
+        "E4",
+        "E5",
+        "E6",
+        "E7",
+        "E8",
+        "E9",
+        "E10",
+        "E11",
+        "E12",
+        "E13",
+        "E14",
+    ]
+    G.add_nodes_from(events, bipartite=1)
+
+    G.add_edges_from(
+        [
+            ("Evelyn Jefferson", "E1"),
+            ("Evelyn Jefferson", "E2"),
+            ("Evelyn Jefferson", "E3"),
+            ("Evelyn Jefferson", "E4"),
+            ("Evelyn Jefferson", "E5"),
+            ("Evelyn Jefferson", "E6"),
+            ("Evelyn Jefferson", "E8"),
+            ("Evelyn Jefferson", "E9"),
+            ("Laura Mandeville", "E1"),
+            ("Laura Mandeville", "E2"),
+            ("Laura Mandeville", "E3"),
+            ("Laura Mandeville", "E5"),
+            ("Laura Mandeville", "E6"),
+            ("Laura Mandeville", "E7"),
+            ("Laura Mandeville", "E8"),
+            ("Theresa Anderson", "E2"),
+            ("Theresa Anderson", "E3"),
+            ("Theresa Anderson", "E4"),
+            ("Theresa Anderson", "E5"),
+            ("Theresa Anderson", "E6"),
+            ("Theresa Anderson", "E7"),
+            ("Theresa Anderson", "E8"),
+            ("Theresa Anderson", "E9"),
+            ("Brenda Rogers", "E1"),
+            ("Brenda Rogers", "E3"),
+            ("Brenda Rogers", "E4"),
+            ("Brenda Rogers", "E5"),
+            ("Brenda Rogers", "E6"),
+            ("Brenda Rogers", "E7"),
+            ("Brenda Rogers", "E8"),
+            ("Charlotte McDowd", "E3"),
+            ("Charlotte McDowd", "E4"),
+            ("Charlotte McDowd", "E5"),
+            ("Charlotte McDowd", "E7"),
+            ("Frances Anderson", "E3"),
+            ("Frances Anderson", "E5"),
+            ("Frances Anderson", "E6"),
+            ("Frances Anderson", "E8"),
+            ("Eleanor Nye", "E5"),
+            ("Eleanor Nye", "E6"),
+            ("Eleanor Nye", "E7"),
+            ("Eleanor Nye", "E8"),
+            ("Pearl Oglethorpe", "E6"),
+            ("Pearl Oglethorpe", "E8"),
+            ("Pearl Oglethorpe", "E9"),
+            ("Ruth DeSand", "E5"),
+            ("Ruth DeSand", "E7"),
+            ("Ruth DeSand", "E8"),
+            ("Ruth DeSand", "E9"),
+            ("Verne Sanderson", "E7"),
+            ("Verne Sanderson", "E8"),
+            ("Verne Sanderson", "E9"),
+            ("Verne Sanderson", "E12"),
+            ("Myra Liddel", "E8"),
+            ("Myra Liddel", "E9"),
+            ("Myra Liddel", "E10"),
+            ("Myra Liddel", "E12"),
+            ("Katherina Rogers", "E8"),
+            ("Katherina Rogers", "E9"),
+            ("Katherina Rogers", "E10"),
+            ("Katherina Rogers", "E12"),
+            ("Katherina Rogers", "E13"),
+            ("Katherina Rogers", "E14"),
+            ("Sylvia Avondale", "E7"),
+            ("Sylvia Avondale", "E8"),
+            ("Sylvia Avondale", "E9"),
+            ("Sylvia Avondale", "E10"),
+            ("Sylvia Avondale", "E12"),
+            ("Sylvia Avondale", "E13"),
+            ("Sylvia Avondale", "E14"),
+            ("Nora Fayette", "E6"),
+            ("Nora Fayette", "E7"),
+            ("Nora Fayette", "E9"),
+            ("Nora Fayette", "E10"),
+            ("Nora Fayette", "E11"),
+            ("Nora Fayette", "E12"),
+            ("Nora Fayette", "E13"),
+            ("Nora Fayette", "E14"),
+            ("Helen Lloyd", "E7"),
+            ("Helen Lloyd", "E8"),
+            ("Helen Lloyd", "E10"),
+            ("Helen Lloyd", "E11"),
+            ("Helen Lloyd", "E12"),
+            ("Dorothy Murchison", "E8"),
+            ("Dorothy Murchison", "E9"),
+            ("Olivia Carleton", "E9"),
+            ("Olivia Carleton", "E11"),
+            ("Flora Price", "E9"),
+            ("Flora Price", "E11"),
+        ]
+    )
+    G.graph["top"] = women
+    G.graph["bottom"] = events
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -58,7 +254,28 @@ def florentine_families_graph():
        Cumulated social roles: The duality of persons and their algebras,1
        Social Networks, Volume 8, Issue 3, September 1986, Pages 215-256
     """
-    pass
+    G = nx.Graph()
+    G.add_edge("Acciaiuoli", "Medici")
+    G.add_edge("Castellani", "Peruzzi")
+    G.add_edge("Castellani", "Strozzi")
+    G.add_edge("Castellani", "Barbadori")
+    G.add_edge("Medici", "Barbadori")
+    G.add_edge("Medici", "Ridolfi")
+    G.add_edge("Medici", "Tornabuoni")
+    G.add_edge("Medici", "Albizzi")
+    G.add_edge("Medici", "Salviati")
+    G.add_edge("Salviati", "Pazzi")
+    G.add_edge("Peruzzi", "Strozzi")
+    G.add_edge("Peruzzi", "Bischeri")
+    G.add_edge("Strozzi", "Ridolfi")
+    G.add_edge("Strozzi", "Bischeri")
+    G.add_edge("Ridolfi", "Tornabuoni")
+    G.add_edge("Tornabuoni", "Guadagni")
+    G.add_edge("Albizzi", "Ginori")
+    G.add_edge("Albizzi", "Guadagni")
+    G.add_edge("Bischeri", "Guadagni")
+    G.add_edge("Guadagni", "Lamberteschi")
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -71,4 +288,259 @@ def les_miserables_graph():
        The Stanford GraphBase: a platform for combinatorial computing,
        pp. 74-87. New York: AcM Press.
     """
-    pass
+    G = nx.Graph()
+    G.add_edge("Napoleon", "Myriel", weight=1)
+    G.add_edge("MlleBaptistine", "Myriel", weight=8)
+    G.add_edge("MmeMagloire", "Myriel", weight=10)
+    G.add_edge("MmeMagloire", "MlleBaptistine", weight=6)
+    G.add_edge("CountessDeLo", "Myriel", weight=1)
+    G.add_edge("Geborand", "Myriel", weight=1)
+    G.add_edge("Champtercier", "Myriel", weight=1)
+    G.add_edge("Cravatte", "Myriel", weight=1)
+    G.add_edge("Count", "Myriel", weight=2)
+    G.add_edge("OldMan", "Myriel", weight=1)
+    G.add_edge("Valjean", "Labarre", weight=1)
+    G.add_edge("Valjean", "MmeMagloire", weight=3)
+    G.add_edge("Valjean", "MlleBaptistine", weight=3)
+    G.add_edge("Valjean", "Myriel", weight=5)
+    G.add_edge("Marguerite", "Valjean", weight=1)
+    G.add_edge("MmeDeR", "Valjean", weight=1)
+    G.add_edge("Isabeau", "Valjean", weight=1)
+    G.add_edge("Gervais", "Valjean", weight=1)
+    G.add_edge("Listolier", "Tholomyes", weight=4)
+    G.add_edge("Fameuil", "Tholomyes", weight=4)
+    G.add_edge("Fameuil", "Listolier", weight=4)
+    G.add_edge("Blacheville", "Tholomyes", weight=4)
+    G.add_edge("Blacheville", "Listolier", weight=4)
+    G.add_edge("Blacheville", "Fameuil", weight=4)
+    G.add_edge("Favourite", "Tholomyes", weight=3)
+    G.add_edge("Favourite", "Listolier", weight=3)
+    G.add_edge("Favourite", "Fameuil", weight=3)
+    G.add_edge("Favourite", "Blacheville", weight=4)
+    G.add_edge("Dahlia", "Tholomyes", weight=3)
+    G.add_edge("Dahlia", "Listolier", weight=3)
+    G.add_edge("Dahlia", "Fameuil", weight=3)
+    G.add_edge("Dahlia", "Blacheville", weight=3)
+    G.add_edge("Dahlia", "Favourite", weight=5)
+    G.add_edge("Zephine", "Tholomyes", weight=3)
+    G.add_edge("Zephine", "Listolier", weight=3)
+    G.add_edge("Zephine", "Fameuil", weight=3)
+    G.add_edge("Zephine", "Blacheville", weight=3)
+    G.add_edge("Zephine", "Favourite", weight=4)
+    G.add_edge("Zephine", "Dahlia", weight=4)
+    G.add_edge("Fantine", "Tholomyes", weight=3)
+    G.add_edge("Fantine", "Listolier", weight=3)
+    G.add_edge("Fantine", "Fameuil", weight=3)
+    G.add_edge("Fantine", "Blacheville", weight=3)
+    G.add_edge("Fantine", "Favourite", weight=4)
+    G.add_edge("Fantine", "Dahlia", weight=4)
+    G.add_edge("Fantine", "Zephine", weight=4)
+    G.add_edge("Fantine", "Marguerite", weight=2)
+    G.add_edge("Fantine", "Valjean", weight=9)
+    G.add_edge("MmeThenardier", "Fantine", weight=2)
+    G.add_edge("MmeThenardier", "Valjean", weight=7)
+    G.add_edge("Thenardier", "MmeThenardier", weight=13)
+    G.add_edge("Thenardier", "Fantine", weight=1)
+    G.add_edge("Thenardier", "Valjean", weight=12)
+    G.add_edge("Cosette", "MmeThenardier", weight=4)
+    G.add_edge("Cosette", "Valjean", weight=31)
+    G.add_edge("Cosette", "Tholomyes", weight=1)
+    G.add_edge("Cosette", "Thenardier", weight=1)
+    G.add_edge("Javert", "Valjean", weight=17)
+    G.add_edge("Javert", "Fantine", weight=5)
+    G.add_edge("Javert", "Thenardier", weight=5)
+    G.add_edge("Javert", "MmeThenardier", weight=1)
+    G.add_edge("Javert", "Cosette", weight=1)
+    G.add_edge("Fauchelevent", "Valjean", weight=8)
+    G.add_edge("Fauchelevent", "Javert", weight=1)
+    G.add_edge("Bamatabois", "Fantine", weight=1)
+    G.add_edge("Bamatabois", "Javert", weight=1)
+    G.add_edge("Bamatabois", "Valjean", weight=2)
+    G.add_edge("Perpetue", "Fantine", weight=1)
+    G.add_edge("Simplice", "Perpetue", weight=2)
+    G.add_edge("Simplice", "Valjean", weight=3)
+    G.add_edge("Simplice", "Fantine", weight=2)
+    G.add_edge("Simplice", "Javert", weight=1)
+    G.add_edge("Scaufflaire", "Valjean", weight=1)
+    G.add_edge("Woman1", "Valjean", weight=2)
+    G.add_edge("Woman1", "Javert", weight=1)
+    G.add_edge("Judge", "Valjean", weight=3)
+    G.add_edge("Judge", "Bamatabois", weight=2)
+    G.add_edge("Champmathieu", "Valjean", weight=3)
+    G.add_edge("Champmathieu", "Judge", weight=3)
+    G.add_edge("Champmathieu", "Bamatabois", weight=2)
+    G.add_edge("Brevet", "Judge", weight=2)
+    G.add_edge("Brevet", "Champmathieu", weight=2)
+    G.add_edge("Brevet", "Valjean", weight=2)
+    G.add_edge("Brevet", "Bamatabois", weight=1)
+    G.add_edge("Chenildieu", "Judge", weight=2)
+    G.add_edge("Chenildieu", "Champmathieu", weight=2)
+    G.add_edge("Chenildieu", "Brevet", weight=2)
+    G.add_edge("Chenildieu", "Valjean", weight=2)
+    G.add_edge("Chenildieu", "Bamatabois", weight=1)
+    G.add_edge("Cochepaille", "Judge", weight=2)
+    G.add_edge("Cochepaille", "Champmathieu", weight=2)
+    G.add_edge("Cochepaille", "Brevet", weight=2)
+    G.add_edge("Cochepaille", "Chenildieu", weight=2)
+    G.add_edge("Cochepaille", "Valjean", weight=2)
+    G.add_edge("Cochepaille", "Bamatabois", weight=1)
+    G.add_edge("Pontmercy", "Thenardier", weight=1)
+    G.add_edge("Boulatruelle", "Thenardier", weight=1)
+    G.add_edge("Eponine", "MmeThenardier", weight=2)
+    G.add_edge("Eponine", "Thenardier", weight=3)
+    G.add_edge("Anzelma", "Eponine", weight=2)
+    G.add_edge("Anzelma", "Thenardier", weight=2)
+    G.add_edge("Anzelma", "MmeThenardier", weight=1)
+    G.add_edge("Woman2", "Valjean", weight=3)
+    G.add_edge("Woman2", "Cosette", weight=1)
+    G.add_edge("Woman2", "Javert", weight=1)
+    G.add_edge("MotherInnocent", "Fauchelevent", weight=3)
+    G.add_edge("MotherInnocent", "Valjean", weight=1)
+    G.add_edge("Gribier", "Fauchelevent", weight=2)
+    G.add_edge("MmeBurgon", "Jondrette", weight=1)
+    G.add_edge("Gavroche", "MmeBurgon", weight=2)
+    G.add_edge("Gavroche", "Thenardier", weight=1)
+    G.add_edge("Gavroche", "Javert", weight=1)
+    G.add_edge("Gavroche", "Valjean", weight=1)
+    G.add_edge("Gillenormand", "Cosette", weight=3)
+    G.add_edge("Gillenormand", "Valjean", weight=2)
+    G.add_edge("Magnon", "Gillenormand", weight=1)
+    G.add_edge("Magnon", "MmeThenardier", weight=1)
+    G.add_edge("MlleGillenormand", "Gillenormand", weight=9)
+    G.add_edge("MlleGillenormand", "Cosette", weight=2)
+    G.add_edge("MlleGillenormand", "Valjean", weight=2)
+    G.add_edge("MmePontmercy", "MlleGillenormand", weight=1)
+    G.add_edge("MmePontmercy", "Pontmercy", weight=1)
+    G.add_edge("MlleVaubois", "MlleGillenormand", weight=1)
+    G.add_edge("LtGillenormand", "MlleGillenormand", weight=2)
+    G.add_edge("LtGillenormand", "Gillenormand", weight=1)
+    G.add_edge("LtGillenormand", "Cosette", weight=1)
+    G.add_edge("Marius", "MlleGillenormand", weight=6)
+    G.add_edge("Marius", "Gillenormand", weight=12)
+    G.add_edge("Marius", "Pontmercy", weight=1)
+    G.add_edge("Marius", "LtGillenormand", weight=1)
+    G.add_edge("Marius", "Cosette", weight=21)
+    G.add_edge("Marius", "Valjean", weight=19)
+    G.add_edge("Marius", "Tholomyes", weight=1)
+    G.add_edge("Marius", "Thenardier", weight=2)
+    G.add_edge("Marius", "Eponine", weight=5)
+    G.add_edge("Marius", "Gavroche", weight=4)
+    G.add_edge("BaronessT", "Gillenormand", weight=1)
+    G.add_edge("BaronessT", "Marius", weight=1)
+    G.add_edge("Mabeuf", "Marius", weight=1)
+    G.add_edge("Mabeuf", "Eponine", weight=1)
+    G.add_edge("Mabeuf", "Gavroche", weight=1)
+    G.add_edge("Enjolras", "Marius", weight=7)
+    G.add_edge("Enjolras", "Gavroche", weight=7)
+    G.add_edge("Enjolras", "Javert", weight=6)
+    G.add_edge("Enjolras", "Mabeuf", weight=1)
+    G.add_edge("Enjolras", "Valjean", weight=4)
+    G.add_edge("Combeferre", "Enjolras", weight=15)
+    G.add_edge("Combeferre", "Marius", weight=5)
+    G.add_edge("Combeferre", "Gavroche", weight=6)
+    G.add_edge("Combeferre", "Mabeuf", weight=2)
+    G.add_edge("Prouvaire", "Gavroche", weight=1)
+    G.add_edge("Prouvaire", "Enjolras", weight=4)
+    G.add_edge("Prouvaire", "Combeferre", weight=2)
+    G.add_edge("Feuilly", "Gavroche", weight=2)
+    G.add_edge("Feuilly", "Enjolras", weight=6)
+    G.add_edge("Feuilly", "Prouvaire", weight=2)
+    G.add_edge("Feuilly", "Combeferre", weight=5)
+    G.add_edge("Feuilly", "Mabeuf", weight=1)
+    G.add_edge("Feuilly", "Marius", weight=1)
+    G.add_edge("Courfeyrac", "Marius", weight=9)
+    G.add_edge("Courfeyrac", "Enjolras", weight=17)
+    G.add_edge("Courfeyrac", "Combeferre", weight=13)
+    G.add_edge("Courfeyrac", "Gavroche", weight=7)
+    G.add_edge("Courfeyrac", "Mabeuf", weight=2)
+    G.add_edge("Courfeyrac", "Eponine", weight=1)
+    G.add_edge("Courfeyrac", "Feuilly", weight=6)
+    G.add_edge("Courfeyrac", "Prouvaire", weight=3)
+    G.add_edge("Bahorel", "Combeferre", weight=5)
+    G.add_edge("Bahorel", "Gavroche", weight=5)
+    G.add_edge("Bahorel", "Courfeyrac", weight=6)
+    G.add_edge("Bahorel", "Mabeuf", weight=2)
+    G.add_edge("Bahorel", "Enjolras", weight=4)
+    G.add_edge("Bahorel", "Feuilly", weight=3)
+    G.add_edge("Bahorel", "Prouvaire", weight=2)
+    G.add_edge("Bahorel", "Marius", weight=1)
+    G.add_edge("Bossuet", "Marius", weight=5)
+    G.add_edge("Bossuet", "Courfeyrac", weight=12)
+    G.add_edge("Bossuet", "Gavroche", weight=5)
+    G.add_edge("Bossuet", "Bahorel", weight=4)
+    G.add_edge("Bossuet", "Enjolras", weight=10)
+    G.add_edge("Bossuet", "Feuilly", weight=6)
+    G.add_edge("Bossuet", "Prouvaire", weight=2)
+    G.add_edge("Bossuet", "Combeferre", weight=9)
+    G.add_edge("Bossuet", "Mabeuf", weight=1)
+    G.add_edge("Bossuet", "Valjean", weight=1)
+    G.add_edge("Joly", "Bahorel", weight=5)
+    G.add_edge("Joly", "Bossuet", weight=7)
+    G.add_edge("Joly", "Gavroche", weight=3)
+    G.add_edge("Joly", "Courfeyrac", weight=5)
+    G.add_edge("Joly", "Enjolras", weight=5)
+    G.add_edge("Joly", "Feuilly", weight=5)
+    G.add_edge("Joly", "Prouvaire", weight=2)
+    G.add_edge("Joly", "Combeferre", weight=5)
+    G.add_edge("Joly", "Mabeuf", weight=1)
+    G.add_edge("Joly", "Marius", weight=2)
+    G.add_edge("Grantaire", "Bossuet", weight=3)
+    G.add_edge("Grantaire", "Enjolras", weight=3)
+    G.add_edge("Grantaire", "Combeferre", weight=1)
+    G.add_edge("Grantaire", "Courfeyrac", weight=2)
+    G.add_edge("Grantaire", "Joly", weight=2)
+    G.add_edge("Grantaire", "Gavroche", weight=1)
+    G.add_edge("Grantaire", "Bahorel", weight=1)
+    G.add_edge("Grantaire", "Feuilly", weight=1)
+    G.add_edge("Grantaire", "Prouvaire", weight=1)
+    G.add_edge("MotherPlutarch", "Mabeuf", weight=3)
+    G.add_edge("Gueulemer", "Thenardier", weight=5)
+    G.add_edge("Gueulemer", "Valjean", weight=1)
+    G.add_edge("Gueulemer", "MmeThenardier", weight=1)
+    G.add_edge("Gueulemer", "Javert", weight=1)
+    G.add_edge("Gueulemer", "Gavroche", weight=1)
+    G.add_edge("Gueulemer", "Eponine", weight=1)
+    G.add_edge("Babet", "Thenardier", weight=6)
+    G.add_edge("Babet", "Gueulemer", weight=6)
+    G.add_edge("Babet", "Valjean", weight=1)
+    G.add_edge("Babet", "MmeThenardier", weight=1)
+    G.add_edge("Babet", "Javert", weight=2)
+    G.add_edge("Babet", "Gavroche", weight=1)
+    G.add_edge("Babet", "Eponine", weight=1)
+    G.add_edge("Claquesous", "Thenardier", weight=4)
+    G.add_edge("Claquesous", "Babet", weight=4)
+    G.add_edge("Claquesous", "Gueulemer", weight=4)
+    G.add_edge("Claquesous", "Valjean", weight=1)
+    G.add_edge("Claquesous", "MmeThenardier", weight=1)
+    G.add_edge("Claquesous", "Javert", weight=1)
+    G.add_edge("Claquesous", "Eponine", weight=1)
+    G.add_edge("Claquesous", "Enjolras", weight=1)
+    G.add_edge("Montparnasse", "Javert", weight=1)
+    G.add_edge("Montparnasse", "Babet", weight=2)
+    G.add_edge("Montparnasse", "Gueulemer", weight=2)
+    G.add_edge("Montparnasse", "Claquesous", weight=2)
+    G.add_edge("Montparnasse", "Valjean", weight=1)
+    G.add_edge("Montparnasse", "Gavroche", weight=1)
+    G.add_edge("Montparnasse", "Eponine", weight=1)
+    G.add_edge("Montparnasse", "Thenardier", weight=1)
+    G.add_edge("Toussaint", "Cosette", weight=2)
+    G.add_edge("Toussaint", "Javert", weight=1)
+    G.add_edge("Toussaint", "Valjean", weight=1)
+    G.add_edge("Child1", "Gavroche", weight=2)
+    G.add_edge("Child2", "Gavroche", weight=2)
+    G.add_edge("Child2", "Child1", weight=3)
+    G.add_edge("Brujon", "Babet", weight=3)
+    G.add_edge("Brujon", "Gueulemer", weight=3)
+    G.add_edge("Brujon", "Thenardier", weight=3)
+    G.add_edge("Brujon", "Gavroche", weight=1)
+    G.add_edge("Brujon", "Eponine", weight=1)
+    G.add_edge("Brujon", "Claquesous", weight=1)
+    G.add_edge("Brujon", "Montparnasse", weight=1)
+    G.add_edge("MmeHucheloup", "Bossuet", weight=1)
+    G.add_edge("MmeHucheloup", "Joly", weight=1)
+    G.add_edge("MmeHucheloup", "Grantaire", weight=1)
+    G.add_edge("MmeHucheloup", "Bahorel", weight=1)
+    G.add_edge("MmeHucheloup", "Courfeyrac", weight=1)
+    G.add_edge("MmeHucheloup", "Gavroche", weight=1)
+    G.add_edge("MmeHucheloup", "Enjolras", weight=1)
+    return G
diff --git a/networkx/generators/spectral_graph_forge.py b/networkx/generators/spectral_graph_forge.py
index a06be505e..3f4ee3ed9 100644
--- a/networkx/generators/spectral_graph_forge.py
+++ b/networkx/generators/spectral_graph_forge.py
@@ -1,12 +1,15 @@
 """Generates graphs with a given eigenvector structure"""
+
+
 import networkx as nx
 from networkx.utils import np_random_state
-__all__ = ['spectral_graph_forge']
+
+__all__ = ["spectral_graph_forge"]


 @np_random_state(3)
 @nx._dispatchable(returns_graph=True)
-def spectral_graph_forge(G, alpha, transformation='identity', seed=None):
+def spectral_graph_forge(G, alpha, transformation="identity", seed=None):
     """Returns a random simple graph with spectrum resembling that of `G`

     This algorithm, called Spectral Graph Forge (SGF), computes the
@@ -77,4 +80,42 @@ def spectral_graph_forge(G, alpha, transformation='identity', seed=None):
     >>> H = nx.spectral_graph_forge(G, 0.3)
     >>>
     """
-    pass
+    import numpy as np
+    import scipy as sp
+
+    available_transformations = ["identity", "modularity"]
+    alpha = np.clip(alpha, 0, 1)
+    A = nx.to_numpy_array(G)
+    n = A.shape[1]
+    level = round(n * alpha)
+
+    if transformation not in available_transformations:
+        msg = f"{transformation!r} is not a valid transformation. "
+        msg += f"Transformations: {available_transformations}"
+        raise nx.NetworkXError(msg)
+
+    K = np.ones((1, n)) @ A
+
+    B = A
+    if transformation == "modularity":
+        B -= K.T @ K / K.sum()
+
+    # Compute low-rank approximation of B
+    evals, evecs = np.linalg.eigh(B)
+    k = np.argsort(np.abs(evals))[::-1]  # indices of evals in descending order
+    evecs[:, k[np.arange(level, n)]] = 0  # set smallest eigenvectors to 0
+    B = evecs @ np.diag(evals) @ evecs.T
+
+    if transformation == "modularity":
+        B += K.T @ K / K.sum()
+
+    B = np.clip(B, 0, 1)
+    np.fill_diagonal(B, 0)
+
+    for i in range(n - 1):
+        B[i, i + 1 :] = sp.stats.bernoulli.rvs(B[i, i + 1 :], random_state=seed)
+        B[i + 1 :, i] = np.transpose(B[i, i + 1 :])
+
+    H = nx.from_numpy_array(B)
+
+    return H
diff --git a/networkx/generators/stochastic.py b/networkx/generators/stochastic.py
index bd13bb17b..f53e23154 100644
--- a/networkx/generators/stochastic.py
+++ b/networkx/generators/stochastic.py
@@ -2,16 +2,19 @@
 graph.

 """
+
 import networkx as nx
 from networkx.classes import DiGraph, MultiDiGraph
 from networkx.utils import not_implemented_for
-__all__ = ['stochastic_graph']
+
+__all__ = ["stochastic_graph"]


-@not_implemented_for('undirected')
-@nx._dispatchable(edge_attrs='weight', mutates_input={'not copy': 1},
-    returns_graph=True)
-def stochastic_graph(G, copy=True, weight='weight'):
+@not_implemented_for("undirected")
+@nx._dispatchable(
+    edge_attrs="weight", mutates_input={"not copy": 1}, returns_graph=True
+)
+def stochastic_graph(G, copy=True, weight="weight"):
     """Returns a right-stochastic representation of directed graph `G`.

     A right-stochastic graph is a weighted digraph in which for each
@@ -36,4 +39,16 @@ def stochastic_graph(G, copy=True, weight='weight'):
         has a weight, it must be a positive number.

     """
-    pass
+    if copy:
+        G = MultiDiGraph(G) if G.is_multigraph() else DiGraph(G)
+    # There is a tradeoff here: the dictionary of node degrees may
+    # require a lot of memory, whereas making a call to `G.out_degree`
+    # inside the loop may be costly in computation time.
+    degree = dict(G.out_degree(weight=weight))
+    for u, v, d in G.edges(data=True):
+        if degree[u] == 0:
+            d[weight] = 0
+        else:
+            d[weight] = d.get(weight, 1) / degree[u]
+    nx._clear_cache(G)
+    return G
diff --git a/networkx/generators/sudoku.py b/networkx/generators/sudoku.py
index 15a3bd74c..f288ed24d 100644
--- a/networkx/generators/sudoku.py
+++ b/networkx/generators/sudoku.py
@@ -40,9 +40,11 @@ References
 .. [3] Wikipedia contributors. "Glossary of Sudoku." Wikipedia, The Free
     Encyclopedia, 3 Dec. 2019. Web. 22 Dec. 2019.
 """
+
 import networkx as nx
 from networkx.exception import NetworkXError
-__all__ = ['sudoku_graph']
+
+__all__ = ["sudoku_graph"]


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -88,4 +90,42 @@ def sudoku_graph(n=3):
     .. [3] Wikipedia contributors. "Glossary of Sudoku." Wikipedia, The Free
        Encyclopedia, 3 Dec. 2019. Web. 22 Dec. 2019.
     """
-    pass
+
+    if n < 0:
+        raise NetworkXError("The order must be greater than or equal to zero.")
+
+    n2 = n * n
+    n3 = n2 * n
+    n4 = n3 * n
+
+    # Construct an empty graph with n^4 nodes
+    G = nx.empty_graph(n4)
+
+    # A Sudoku graph of order 0 or 1 has no edges
+    if n < 2:
+        return G
+
+    # Add edges for cells in the same row
+    for row_no in range(n2):
+        row_start = row_no * n2
+        for j in range(1, n2):
+            for i in range(j):
+                G.add_edge(row_start + i, row_start + j)
+
+    # Add edges for cells in the same column
+    for col_no in range(n2):
+        for j in range(col_no, n4, n2):
+            for i in range(col_no, j, n2):
+                G.add_edge(i, j)
+
+    # Add edges for cells in the same box
+    for band_no in range(n):
+        for stack_no in range(n):
+            box_start = n3 * band_no + n * stack_no
+            for j in range(1, n2):
+                for i in range(j):
+                    u = box_start + (i % n) + n2 * (i // n)
+                    v = box_start + (j % n) + n2 * (j // n)
+                    G.add_edge(u, v)
+
+    return G
diff --git a/networkx/generators/time_series.py b/networkx/generators/time_series.py
index a63f37bed..6cf54b016 100644
--- a/networkx/generators/time_series.py
+++ b/networkx/generators/time_series.py
@@ -2,8 +2,10 @@
 Time Series Graphs
 """
 import itertools
+
 import networkx as nx
-__all__ = ['visibility_graph']
+
+__all__ = ["visibility_graph"]


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -49,4 +51,23 @@ def visibility_graph(series):
            National Academy of Sciences 105, no. 13 (2008): 4972-4975.
            https://www.pnas.org/doi/10.1073/pnas.0709247105
     """
-    pass
+
+    # Sequential values are always connected
+    G = nx.path_graph(len(series))
+    nx.set_node_attributes(G, dict(enumerate(series)), "value")
+
+    # Check all combinations of nodes n series
+    for (n1, t1), (n2, t2) in itertools.combinations(enumerate(series), 2):
+        # check if any value between obstructs line of sight
+        slope = (t2 - t1) / (n2 - n1)
+        offset = t2 - slope * n2
+
+        obstructed = any(
+            t >= slope * n + offset
+            for n, t in enumerate(series[n1 + 1 : n2], start=n1 + 1)
+        )
+
+        if not obstructed:
+            G.add_edge(n1, n2)
+
+    return G
diff --git a/networkx/generators/trees.py b/networkx/generators/trees.py
index f0abaf2ca..c1b0d7935 100644
--- a/networkx/generators/trees.py
+++ b/networkx/generators/trees.py
@@ -27,15 +27,25 @@ trees and forests uniformly at random. A rooted tree is a tree
 with a designated root node. A rooted forest is a disjoint union
 of rooted trees.
 """
+
 import warnings
 from collections import Counter, defaultdict
 from math import comb, factorial
+
 import networkx as nx
 from networkx.utils import py_random_state
-__all__ = ['prefix_tree', 'prefix_tree_recursive', 'random_tree',
-    'random_labeled_tree', 'random_labeled_rooted_tree',
-    'random_labeled_rooted_forest', 'random_unlabeled_tree',
-    'random_unlabeled_rooted_tree', 'random_unlabeled_rooted_forest']
+
+__all__ = [
+    "prefix_tree",
+    "prefix_tree_recursive",
+    "random_tree",
+    "random_labeled_tree",
+    "random_labeled_rooted_tree",
+    "random_labeled_rooted_forest",
+    "random_unlabeled_tree",
+    "random_unlabeled_rooted_tree",
+    "random_unlabeled_rooted_forest",
+]


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -129,7 +139,46 @@ def prefix_tree(paths):
         >>> sorted(recovered)
         ['ab', 'abs', 'ad']
     """
-    pass
+
+    def get_children(parent, paths):
+        children = defaultdict(list)
+        # Populate dictionary with key(s) as the child/children of the root and
+        # value(s) as the remaining paths of the corresponding child/children
+        for path in paths:
+            # If path is empty, we add an edge to the NIL node.
+            if not path:
+                tree.add_edge(parent, NIL)
+                continue
+            child, *rest = path
+            # `child` may exist as the head of more than one path in `paths`.
+            children[child].append(rest)
+        return children
+
+    # Initialize the prefix tree with a root node and a nil node.
+    tree = nx.DiGraph()
+    root = 0
+    tree.add_node(root, source=None)
+    NIL = -1
+    tree.add_node(NIL, source="NIL")
+    children = get_children(root, paths)
+    stack = [(root, iter(children.items()))]
+    while stack:
+        parent, remaining_children = stack[-1]
+        try:
+            child, remaining_paths = next(remaining_children)
+        # Pop item off stack if there are no remaining children
+        except StopIteration:
+            stack.pop()
+            continue
+        # We relabel each child with an unused name.
+        new_name = len(tree) - 1
+        # The "source" node attribute stores the original node name.
+        tree.add_node(new_name, source=child)
+        tree.add_edge(parent, new_name)
+        children = get_children(new_name, remaining_paths)
+        stack.append((new_name, iter(children.items())))
+
+    return tree


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -226,7 +275,52 @@ def prefix_tree_recursive(paths):
         >>> sorted(recovered)
         ['ab', 'abs', 'ad']
     """
-    pass
+
+    def _helper(paths, root, tree):
+        """Recursively create a trie from the given list of paths.
+
+        `paths` is a list of paths, each of which is itself a list of
+        nodes, relative to the given `root` (but not including it). This
+        list of paths will be interpreted as a tree-like structure, in
+        which two paths that share a prefix represent two branches of
+        the tree with the same initial segment.
+
+        `root` is the parent of the node at index 0 in each path.
+
+        `tree` is the "accumulator", the :class:`networkx.DiGraph`
+        representing the branching to which the new nodes and edges will
+        be added.
+
+        """
+        # For each path, remove the first node and make it a child of root.
+        # Any remaining paths then get processed recursively.
+        children = defaultdict(list)
+        for path in paths:
+            # If path is empty, we add an edge to the NIL node.
+            if not path:
+                tree.add_edge(root, NIL)
+                continue
+            child, *rest = path
+            # `child` may exist as the head of more than one path in `paths`.
+            children[child].append(rest)
+        # Add a node for each child, connect root, recurse to remaining paths
+        for child, remaining_paths in children.items():
+            # We relabel each child with an unused name.
+            new_name = len(tree) - 1
+            # The "source" node attribute stores the original node name.
+            tree.add_node(new_name, source=child)
+            tree.add_edge(root, new_name)
+            _helper(remaining_paths, new_name, tree)
+
+    # Initialize the prefix tree with a root node and a nil node.
+    tree = nx.DiGraph()
+    root = 0
+    tree.add_node(root, source=None)
+    NIL = -1
+    tree.add_node(NIL, source="NIL")
+    # Populate the tree.
+    _helper(paths, root, tree)
+    return tree


 @py_random_state(1)
@@ -297,10 +391,41 @@ def random_tree(n, seed=None, create_using=None):
             │           └─╼ 5
             └─╼ 9
     """
-    pass
-
-
-@py_random_state('seed')
+    warnings.warn(
+        (
+            "\n\nrandom_tree is deprecated and will be removed in NX v3.4\n"
+            "Use random_labeled_tree instead."
+        ),
+        DeprecationWarning,
+        stacklevel=2,
+    )
+    if n == 0:
+        raise nx.NetworkXPointlessConcept("the null graph is not a tree")
+    # Cannot create a Prüfer sequence unless `n` is at least two.
+    if n == 1:
+        utree = nx.empty_graph(1, create_using)
+    else:
+        sequence = [seed.choice(range(n)) for i in range(n - 2)]
+        utree = nx.from_prufer_sequence(sequence)
+
+    if create_using is None:
+        tree = utree
+    else:
+        tree = nx.empty_graph(0, create_using)
+        if tree.is_directed():
+            # Use a arbitrary root node and dfs to define edge directions
+            edges = nx.dfs_edges(utree, source=0)
+        else:
+            edges = utree.edges
+
+        # Populate the specified graph type
+        tree.add_nodes_from(utree.nodes)
+        tree.add_edges_from(edges)
+
+    return tree
+
+
+@py_random_state("seed")
 @nx._dispatchable(graphs=None, returns_graph=True)
 def random_labeled_tree(n, *, seed=None):
     """Returns a labeled tree on `n` nodes chosen uniformly at random.
@@ -328,10 +453,15 @@ def random_labeled_tree(n, *, seed=None):
     NetworkXPointlessConcept
         If `n` is zero (because the null graph is not a tree).
     """
-    pass
+    # Cannot create a Prüfer sequence unless `n` is at least two.
+    if n == 0:
+        raise nx.NetworkXPointlessConcept("the null graph is not a tree")
+    if n == 1:
+        return nx.empty_graph(1)
+    return nx.from_prufer_sequence([seed.choice(range(n)) for i in range(n - 2)])


-@py_random_state('seed')
+@py_random_state("seed")
 @nx._dispatchable(graphs=None, returns_graph=True)
 def random_labeled_rooted_tree(n, *, seed=None):
     """Returns a labeled rooted tree with `n` nodes.
@@ -363,10 +493,12 @@ def random_labeled_rooted_tree(n, *, seed=None):
     NetworkXPointlessConcept
         If `n` is zero (because the null graph is not a tree).
     """
-    pass
+    t = random_labeled_tree(n, seed=seed)
+    t.graph["root"] = seed.randint(0, n - 1)
+    return t


-@py_random_state('seed')
+@py_random_state("seed")
 @nx._dispatchable(graphs=None, returns_graph=True)
 def random_labeled_rooted_forest(n, *, seed=None):
     """Returns a labeled rooted forest with `n` nodes.
@@ -397,7 +529,57 @@ def random_labeled_rooted_forest(n, *, seed=None):
         Naturwissenschaften an der Formal- und Naturwissenschaftlichen
         Fakultät der Universität Wien. Wien, May 2000.
     """
-    pass
+
+    # Select the number of roots by iterating over the cumulative count of trees
+    # with at most k roots
+    def _select_k(n, seed):
+        r = seed.randint(0, (n + 1) ** (n - 1) - 1)
+        cum_sum = 0
+        for k in range(1, n):
+            cum_sum += (factorial(n - 1) * n ** (n - k)) // (
+                factorial(k - 1) * factorial(n - k)
+            )
+            if r < cum_sum:
+                return k
+
+        return n
+
+    F = nx.empty_graph(n)
+    if n == 0:
+        F.graph["roots"] = {}
+        return F
+    # Select the number of roots k
+    k = _select_k(n, seed)
+    if k == n:
+        F.graph["roots"] = set(range(n))
+        return F  # Nothing to do
+    # Select the roots
+    roots = seed.sample(range(n), k)
+    # Nonroots
+    p = set(range(n)).difference(roots)
+    # Coding sequence
+    N = [seed.randint(0, n - 1) for i in range(n - k - 1)]
+    # Multiset of elements in N also in p
+    degree = Counter([x for x in N if x in p])
+    # Iterator over the elements of p with degree zero
+    iterator = iter(x for x in p if degree[x] == 0)
+    u = last = next(iterator)
+    # This loop is identical to that for Prüfer sequences,
+    # except that we can draw nodes only from p
+    for v in N:
+        F.add_edge(u, v)
+        degree[v] -= 1
+        if v < last and degree[v] == 0:
+            u = v
+        else:
+            last = u = next(iterator)
+
+    F.add_edge(u, roots[0])
+    F.graph["roots"] = set(roots)
+    return F
+
+
+# The following functions support generation of unlabeled trees and forests.


 def _to_nx(edges, n_nodes, root=None, roots=None):
@@ -423,7 +605,13 @@ def _to_nx(edges, n_nodes, root=None, roots=None):
     :class:`networkx.Graph`
         The graph with `n_nodes` nodes and edges given by `edges`.
     """
-    pass
+    G = nx.empty_graph(n_nodes)
+    G.add_edges_from(edges)
+    if root is not None:
+        G.graph["root"] = root
+    if roots is not None:
+        G.graph["roots"] = roots
+    return G


 def _num_rooted_trees(n, cache_trees):
@@ -444,7 +632,18 @@ def _num_rooted_trees(n, cache_trees):
     int
         The number of unlabeled rooted trees with `n` nodes.
     """
-    pass
+    for n_i in range(len(cache_trees), n + 1):
+        cache_trees.append(
+            sum(
+                [
+                    d * cache_trees[n_i - j * d] * cache_trees[d]
+                    for d in range(1, n_i)
+                    for j in range(1, (n_i - 1) // d + 1)
+                ]
+            )
+            // (n_i - 1)
+        )
+    return cache_trees[n]


 def _select_jd_trees(n, cache_trees, seed):
@@ -475,7 +674,17 @@ def _select_jd_trees(n, cache_trees, seed):
         Academic Press, 1978.
         https://doi.org/10.1016/C2013-0-11243-3
     """
-    pass
+    p = seed.randint(0, _num_rooted_trees(n, cache_trees) * (n - 1) - 1)
+    cumsum = 0
+    for d in range(n - 1, 0, -1):
+        for j in range(1, (n - 1) // d + 1):
+            cumsum += (
+                d
+                * _num_rooted_trees(n - j * d, cache_trees)
+                * _num_rooted_trees(d, cache_trees)
+            )
+            if p < cumsum:
+                return (j, d)


 def _random_unlabeled_rooted_tree(n, cache_trees, seed):
@@ -508,10 +717,26 @@ def _random_unlabeled_rooted_tree(n, cache_trees, seed):
         Academic Press, 1978.
         https://doi.org/10.1016/C2013-0-11243-3
     """
-    pass
-
-
-@py_random_state('seed')
+    if n == 1:
+        edges, n_nodes = [], 1
+        return edges, n_nodes
+    if n == 2:
+        edges, n_nodes = [(0, 1)], 2
+        return edges, n_nodes
+
+    j, d = _select_jd_trees(n, cache_trees, seed)
+    t1, t1_nodes = _random_unlabeled_rooted_tree(n - j * d, cache_trees, seed)
+    t2, t2_nodes = _random_unlabeled_rooted_tree(d, cache_trees, seed)
+    t12 = [(0, t2_nodes * i + t1_nodes) for i in range(j)]
+    t1.extend(t12)
+    for _ in range(j):
+        t1.extend((n1 + t1_nodes, n2 + t1_nodes) for n1, n2 in t2)
+        t1_nodes += t2_nodes
+
+    return t1, t1_nodes
+
+
+@py_random_state("seed")
 @nx._dispatchable(graphs=None, returns_graph=True)
 def random_unlabeled_rooted_tree(n, *, number_of_trees=None, seed=None):
     """Returns a number of unlabeled rooted trees uniformly at random
@@ -557,7 +782,15 @@ def random_unlabeled_rooted_tree(n, *, number_of_trees=None, seed=None):
         Academic Press, 1978.
         https://doi.org/10.1016/C2013-0-11243-3
     """
-    pass
+    if n == 0:
+        raise nx.NetworkXPointlessConcept("the null graph is not a tree")
+    cache_trees = [0, 1]  # initial cache of number of rooted trees
+    if number_of_trees is None:
+        return _to_nx(*_random_unlabeled_rooted_tree(n, cache_trees, seed), root=0)
+    return [
+        _to_nx(*_random_unlabeled_rooted_tree(n, cache_trees, seed), root=0)
+        for i in range(number_of_trees)
+    ]


 def _num_rooted_forests(n, q, cache_forests):
@@ -589,7 +822,20 @@ def _num_rooted_forests(n, q, cache_forests):
         Journal of Algorithms 2.2 (1981): 204-207.
         https://doi.org/10.1016/0196-6774(81)90021-3
     """
-    pass
+    for n_i in range(len(cache_forests), n + 1):
+        q_i = min(n_i, q)
+        cache_forests.append(
+            sum(
+                [
+                    d * cache_forests[n_i - j * d] * cache_forests[d - 1]
+                    for d in range(1, q_i + 1)
+                    for j in range(1, n_i // d + 1)
+                ]
+            )
+            // n_i
+        )
+
+    return cache_forests[n]


 def _select_jd_forests(n, q, cache_forests, seed):
@@ -618,7 +864,17 @@ def _select_jd_forests(n, q, cache_forests, seed):
         Journal of Algorithms 2.2 (1981): 204-207.
         https://doi.org/10.1016/0196-6774(81)90021-3
     """
-    pass
+    p = seed.randint(0, _num_rooted_forests(n, q, cache_forests) * n - 1)
+    cumsum = 0
+    for d in range(q, 0, -1):
+        for j in range(1, n // d + 1):
+            cumsum += (
+                d
+                * _num_rooted_forests(n - j * d, q, cache_forests)
+                * _num_rooted_forests(d - 1, q, cache_forests)
+            )
+            if p < cumsum:
+                return (j, d)


 def _random_unlabeled_rooted_forest(n, q, cache_trees, cache_forests, seed):
@@ -650,13 +906,24 @@ def _random_unlabeled_rooted_forest(n, q, cache_trees, cache_forests, seed):
         Journal of Algorithms 2.2 (1981): 204-207.
         https://doi.org/10.1016/0196-6774(81)90021-3
     """
-    pass
-
-
-@py_random_state('seed')
+    if n == 0:
+        return ([], 0, [])
+
+    j, d = _select_jd_forests(n, q, cache_forests, seed)
+    t1, t1_nodes, r1 = _random_unlabeled_rooted_forest(
+        n - j * d, q, cache_trees, cache_forests, seed
+    )
+    t2, t2_nodes = _random_unlabeled_rooted_tree(d, cache_trees, seed)
+    for _ in range(j):
+        r1.append(t1_nodes)
+        t1.extend((n1 + t1_nodes, n2 + t1_nodes) for n1, n2 in t2)
+        t1_nodes += t2_nodes
+    return t1, t1_nodes, r1
+
+
+@py_random_state("seed")
 @nx._dispatchable(graphs=None, returns_graph=True)
-def random_unlabeled_rooted_forest(n, *, q=None, number_of_forests=None,
-    seed=None):
+def random_unlabeled_rooted_forest(n, *, q=None, number_of_forests=None, seed=None):
     """Returns a forest or list of forests selected at random.

     Returns one or more (depending on `number_of_forests`)
@@ -703,7 +970,27 @@ def random_unlabeled_rooted_forest(n, *, q=None, number_of_forests=None,
         Journal of Algorithms 2.2 (1981): 204-207.
         https://doi.org/10.1016/0196-6774(81)90021-3
     """
-    pass
+    if q is None:
+        q = n
+    if q == 0 and n != 0:
+        raise ValueError("q must be a positive integer if n is positive.")
+
+    cache_trees = [0, 1]  # initial cache of number of rooted trees
+    cache_forests = [1]  # initial cache of number of rooted forests
+
+    if number_of_forests is None:
+        g, nodes, rs = _random_unlabeled_rooted_forest(
+            n, q, cache_trees, cache_forests, seed
+        )
+        return _to_nx(g, nodes, roots=set(rs))
+
+    res = []
+    for i in range(number_of_forests):
+        g, nodes, rs = _random_unlabeled_rooted_forest(
+            n, q, cache_trees, cache_forests, seed
+        )
+        res.append(_to_nx(g, nodes, roots=set(rs)))
+    return res


 def _num_trees(n, cache_trees):
@@ -723,7 +1010,15 @@ def _num_trees(n, cache_trees):
     int
         The number of unlabeled trees with `n` nodes.
     """
-    pass
+    r = _num_rooted_trees(n, cache_trees) - sum(
+        [
+            _num_rooted_trees(j, cache_trees) * _num_rooted_trees(n - j, cache_trees)
+            for j in range(1, n // 2 + 1)
+        ]
+    )
+    if n % 2 == 0:
+        r += comb(_num_rooted_trees(n // 2, cache_trees) + 1, 2)
+    return r


 def _bicenter(n, cache, seed):
@@ -751,7 +1046,14 @@ def _bicenter(n, cache, seed):
         Journal of Algorithms 2.2 (1981): 204-207.
         https://doi.org/10.1016/0196-6774(81)90021-3
     """
-    pass
+    t, t_nodes = _random_unlabeled_rooted_tree(n // 2, cache, seed)
+    if seed.randint(0, _num_rooted_trees(n // 2, cache)) == 0:
+        t2, t2_nodes = t, t_nodes
+    else:
+        t2, t2_nodes = _random_unlabeled_rooted_tree(n // 2, cache, seed)
+    t.extend([(n1 + (n // 2), n2 + (n // 2)) for n1, n2 in t2])
+    t.append((0, n // 2))
+    return t, t_nodes + t2_nodes


 def _random_unlabeled_tree(n, cache_trees, cache_forests, seed):
@@ -781,10 +1083,22 @@ def _random_unlabeled_tree(n, cache_trees, cache_forests, seed):
         Journal of Algorithms 2.2 (1981): 204-207.
         https://doi.org/10.1016/0196-6774(81)90021-3
     """
-    pass
-
-
-@py_random_state('seed')
+    if n % 2 == 1:
+        p = 0
+    else:
+        p = comb(_num_rooted_trees(n // 2, cache_trees) + 1, 2)
+    if seed.randint(0, _num_trees(n, cache_trees) - 1) < p:
+        return _bicenter(n, cache_trees, seed)
+    else:
+        f, n_f, r = _random_unlabeled_rooted_forest(
+            n - 1, (n - 1) // 2, cache_trees, cache_forests, seed
+        )
+        for i in r:
+            f.append((i, n_f))
+        return f, n_f + 1
+
+
+@py_random_state("seed")
 @nx._dispatchable(graphs=None, returns_graph=True)
 def random_unlabeled_tree(n, *, number_of_trees=None, seed=None):
     """Returns a tree or list of trees chosen randomly.
@@ -828,4 +1142,15 @@ def random_unlabeled_tree(n, *, number_of_trees=None, seed=None):
         Journal of Algorithms 2.2 (1981): 204-207.
         https://doi.org/10.1016/0196-6774(81)90021-3
     """
-    pass
+    if n == 0:
+        raise nx.NetworkXPointlessConcept("the null graph is not a tree")
+
+    cache_trees = [0, 1]  # initial cache of number of rooted trees
+    cache_forests = [1]  # initial cache of number of rooted forests
+    if number_of_trees is None:
+        return _to_nx(*_random_unlabeled_tree(n, cache_trees, cache_forests, seed))
+    else:
+        return [
+            _to_nx(*_random_unlabeled_tree(n, cache_trees, cache_forests, seed))
+            for i in range(number_of_trees)
+        ]
diff --git a/networkx/generators/triads.py b/networkx/generators/triads.py
index fe4c0e896..5d380588b 100644
--- a/networkx/generators/triads.py
+++ b/networkx/generators/triads.py
@@ -1,17 +1,36 @@
+# See https://github.com/networkx/networkx/pull/1474
+# Copyright 2011 Reya Group <http://www.reyagroup.com>
+# Copyright 2011 Alex Levenson <alex@isnotinvain.com>
+# Copyright 2011 Diederik van Liere <diederik.vanliere@rotman.utoronto.ca>
 """Functions that generate the triad graphs, that is, the possible
 digraphs on three nodes.

 """
 import networkx as nx
 from networkx.classes import DiGraph
-__all__ = ['triad_graph']
-TRIAD_EDGES = {'003': [], '012': ['ab'], '102': ['ab', 'ba'], '021D': ['ba',
-    'bc'], '021U': ['ab', 'cb'], '021C': ['ab', 'bc'], '111D': ['ac', 'ca',
-    'bc'], '111U': ['ac', 'ca', 'cb'], '030T': ['ab', 'cb', 'ac'], '030C':
-    ['ba', 'cb', 'ac'], '201': ['ab', 'ba', 'ac', 'ca'], '120D': ['bc',
-    'ba', 'ac', 'ca'], '120U': ['ab', 'cb', 'ac', 'ca'], '120C': ['ab',
-    'bc', 'ac', 'ca'], '210': ['ab', 'bc', 'cb', 'ac', 'ca'], '300': ['ab',
-    'ba', 'bc', 'cb', 'ac', 'ca']}
+
+__all__ = ["triad_graph"]
+
+#: Dictionary mapping triad name to list of directed edges in the
+#: digraph representation of that triad (with nodes 'a', 'b', and 'c').
+TRIAD_EDGES = {
+    "003": [],
+    "012": ["ab"],
+    "102": ["ab", "ba"],
+    "021D": ["ba", "bc"],
+    "021U": ["ab", "cb"],
+    "021C": ["ab", "bc"],
+    "111D": ["ac", "ca", "bc"],
+    "111U": ["ac", "ca", "cb"],
+    "030T": ["ab", "cb", "ac"],
+    "030C": ["ba", "cb", "ac"],
+    "201": ["ab", "ba", "ac", "ca"],
+    "120D": ["bc", "ba", "ac", "ca"],
+    "120U": ["ab", "cb", "ac", "ca"],
+    "120C": ["ab", "bc", "ac", "ca"],
+    "210": ["ab", "bc", "cb", "ac", "ca"],
+    "300": ["ab", "ba", "bc", "cb", "ac", "ca"],
+}


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -63,4 +82,12 @@ def triad_graph(triad_name):
     triadic_census

     """
-    pass
+    if triad_name not in TRIAD_EDGES:
+        raise ValueError(
+            f'unknown triad name "{triad_name}"; use one of the triad names'
+            " in the TRIAD_NAMES constant"
+        )
+    G = DiGraph()
+    G.add_nodes_from("abc")
+    G.add_edges_from(TRIAD_EDGES[triad_name])
+    return G
diff --git a/networkx/lazy_imports.py b/networkx/lazy_imports.py
index b2ed7a12b..396404ba3 100644
--- a/networkx/lazy_imports.py
+++ b/networkx/lazy_imports.py
@@ -4,7 +4,8 @@ import inspect
 import os
 import sys
 import types
-__all__ = ['attach', '_lazy_import']
+
+__all__ = ["attach", "_lazy_import"]


 def attach(module_name, submodules=None, submod_attrs=None):
@@ -45,28 +46,55 @@ def attach(module_name, submodules=None, submod_attrs=None):
     __getattr__, __dir__, __all__

     """
-    pass
+    if submod_attrs is None:
+        submod_attrs = {}
+
+    if submodules is None:
+        submodules = set()
+    else:
+        submodules = set(submodules)
+
+    attr_to_modules = {
+        attr: mod for mod, attrs in submod_attrs.items() for attr in attrs
+    }
+
+    __all__ = list(submodules | attr_to_modules.keys())
+
+    def __getattr__(name):
+        if name in submodules:
+            return importlib.import_module(f"{module_name}.{name}")
+        elif name in attr_to_modules:
+            submod = importlib.import_module(f"{module_name}.{attr_to_modules[name]}")
+            return getattr(submod, name)
+        else:
+            raise AttributeError(f"No {module_name} attribute {name}")

+    def __dir__():
+        return __all__

-class DelayedImportErrorModule(types.ModuleType):
+    if os.environ.get("EAGER_IMPORT", ""):
+        for attr in set(attr_to_modules.keys()) | submodules:
+            __getattr__(attr)
+
+    return __getattr__, __dir__, list(__all__)

+
+class DelayedImportErrorModule(types.ModuleType):
     def __init__(self, frame_data, *args, **kwargs):
         self.__frame_data = frame_data
         super().__init__(*args, **kwargs)

     def __getattr__(self, x):
-        if x in ('__class__', '__file__', '__frame_data'):
+        if x in ("__class__", "__file__", "__frame_data"):
             super().__getattr__(x)
         else:
             fd = self.__frame_data
             raise ModuleNotFoundError(
-                f"""No module named '{fd['spec']}'
-
-This error is lazily reported, having originally occurred in
-  File {fd['filename']}, line {fd['lineno']}, in {fd['function']}
-
-----> {''.join(fd['code_context'] or '').strip()}"""
-                )
+                f"No module named '{fd['spec']}'\n\n"
+                "This error is lazily reported, having originally occurred in\n"
+                f'  File {fd["filename"]}, line {fd["lineno"]}, in {fd["function"]}\n\n'
+                f'----> {"".join(fd["code_context"] or "").strip()}'
+            )


 def _lazy_import(fullname):
@@ -129,4 +157,32 @@ def _lazy_import(fullname):
         Actual loading of the module occurs upon first attribute request.

     """
-    pass
+    try:
+        return sys.modules[fullname]
+    except:
+        pass
+
+    # Not previously loaded -- look it up
+    spec = importlib.util.find_spec(fullname)
+
+    if spec is None:
+        try:
+            parent = inspect.stack()[1]
+            frame_data = {
+                "spec": fullname,
+                "filename": parent.filename,
+                "lineno": parent.lineno,
+                "function": parent.function,
+                "code_context": parent.code_context,
+            }
+            return DelayedImportErrorModule(frame_data, "DelayedImportErrorModule")
+        finally:
+            del parent
+
+    module = importlib.util.module_from_spec(spec)
+    sys.modules[fullname] = module
+
+    loader = importlib.util.LazyLoader(spec.loader)
+    loader.exec_module(module)
+
+    return module
diff --git a/networkx/linalg/algebraicconnectivity.py b/networkx/linalg/algebraicconnectivity.py
index 78ad0d311..870b4ec5f 100644
--- a/networkx/linalg/algebraicconnectivity.py
+++ b/networkx/linalg/algebraicconnectivity.py
@@ -2,10 +2,20 @@
 Algebraic connectivity and Fiedler vectors of undirected graphs.
 """
 from functools import partial
+
 import networkx as nx
-from networkx.utils import not_implemented_for, np_random_state, reverse_cuthill_mckee_ordering
-__all__ = ['algebraic_connectivity', 'fiedler_vector', 'spectral_ordering',
-    'spectral_bisection']
+from networkx.utils import (
+    not_implemented_for,
+    np_random_state,
+    reverse_cuthill_mckee_ordering,
+)
+
+__all__ = [
+    "algebraic_connectivity",
+    "fiedler_vector",
+    "spectral_ordering",
+    "spectral_bisection",
+]


 class _PCGSolver:
@@ -28,6 +38,42 @@ class _PCGSolver:
         self._A = A
         self._M = M

+    def solve(self, B, tol):
+        import numpy as np
+
+        # Densifying step - can this be kept sparse?
+        B = np.asarray(B)
+        X = np.ndarray(B.shape, order="F")
+        for j in range(B.shape[1]):
+            X[:, j] = self._solve(B[:, j], tol)
+        return X
+
+    def _solve(self, b, tol):
+        import numpy as np
+        import scipy as sp
+
+        A = self._A
+        M = self._M
+        tol *= sp.linalg.blas.dasum(b)
+        # Initialize.
+        x = np.zeros(b.shape)
+        r = b.copy()
+        z = M(r)
+        rz = sp.linalg.blas.ddot(r, z)
+        p = z.copy()
+        # Iterate.
+        while True:
+            Ap = A(p)
+            alpha = rz / sp.linalg.blas.ddot(p, Ap)
+            x = sp.linalg.blas.daxpy(p, x, a=alpha)
+            r = sp.linalg.blas.daxpy(Ap, r, a=-alpha)
+            if sp.linalg.blas.dasum(r) < tol:
+                return x
+            z = M(r)
+            beta = sp.linalg.blas.ddot(r, z)
+            beta, rz = beta / rz, beta
+            p = sp.linalg.blas.daxpy(p, z, a=beta)
+

 class _LUSolver:
     """LU factorization.
@@ -42,19 +88,63 @@ class _LUSolver:

     def __init__(self, A):
         import scipy as sp
-        self._LU = sp.sparse.linalg.splu(A, permc_spec='MMD_AT_PLUS_A',
-            diag_pivot_thresh=0.0, options={'Equil': True, 'SymmetricMode':
-            True})
+
+        self._LU = sp.sparse.linalg.splu(
+            A,
+            permc_spec="MMD_AT_PLUS_A",
+            diag_pivot_thresh=0.0,
+            options={"Equil": True, "SymmetricMode": True},
+        )
+
+    def solve(self, B, tol=None):
+        import numpy as np
+
+        B = np.asarray(B)
+        X = np.ndarray(B.shape, order="F")
+        for j in range(B.shape[1]):
+            X[:, j] = self._LU.solve(B[:, j])
+        return X


 def _preprocess_graph(G, weight):
     """Compute edge weights and eliminate zero-weight edges."""
-    pass
+    if G.is_directed():
+        H = nx.MultiGraph()
+        H.add_nodes_from(G)
+        H.add_weighted_edges_from(
+            ((u, v, e.get(weight, 1.0)) for u, v, e in G.edges(data=True) if u != v),
+            weight=weight,
+        )
+        G = H
+    if not G.is_multigraph():
+        edges = (
+            (u, v, abs(e.get(weight, 1.0))) for u, v, e in G.edges(data=True) if u != v
+        )
+    else:
+        edges = (
+            (u, v, sum(abs(e.get(weight, 1.0)) for e in G[u][v].values()))
+            for u, v in G.edges()
+            if u != v
+        )
+    H = nx.Graph()
+    H.add_nodes_from(G)
+    H.add_weighted_edges_from((u, v, e) for u, v, e in edges if e != 0)
+    return H


 def _rcm_estimate(G, nodelist):
     """Estimate the Fiedler vector using the reverse Cuthill-McKee ordering."""
-    pass
+    import numpy as np
+
+    G = G.subgraph(nodelist)
+    order = reverse_cuthill_mckee_ordering(G)
+    n = len(nodelist)
+    index = dict(zip(nodelist, range(n)))
+    x = np.ndarray(n, dtype=float)
+    for i, u in enumerate(order):
+        x[index[u]] = i
+    x -= (n - 1) / 2.0
+    return x


 def _tracemin_fiedler(L, X, normalized, tol, method):
@@ -91,20 +181,142 @@ def _tracemin_fiedler(L, X, normalized, tol, method):
         As this is for Fiedler vectors, the zero eigenvalue (and
         constant eigenvector) are avoided.
     """
-    pass
+    import numpy as np
+    import scipy as sp
+
+    n = X.shape[0]
+
+    if normalized:
+        # Form the normalized Laplacian matrix and determine the eigenvector of
+        # its nullspace.
+        e = np.sqrt(L.diagonal())
+        # TODO: rm csr_array wrapper when spdiags array creation becomes available
+        D = sp.sparse.csr_array(sp.sparse.spdiags(1 / e, 0, n, n, format="csr"))
+        L = D @ L @ D
+        e *= 1.0 / np.linalg.norm(e, 2)
+
+    if normalized:
+
+        def project(X):
+            """Make X orthogonal to the nullspace of L."""
+            X = np.asarray(X)
+            for j in range(X.shape[1]):
+                X[:, j] -= (X[:, j] @ e) * e
+
+    else:
+
+        def project(X):
+            """Make X orthogonal to the nullspace of L."""
+            X = np.asarray(X)
+            for j in range(X.shape[1]):
+                X[:, j] -= X[:, j].sum() / n
+
+    if method == "tracemin_pcg":
+        D = L.diagonal().astype(float)
+        solver = _PCGSolver(lambda x: L @ x, lambda x: D * x)
+    elif method == "tracemin_lu":
+        # Convert A to CSC to suppress SparseEfficiencyWarning.
+        A = sp.sparse.csc_array(L, dtype=float, copy=True)
+        # Force A to be nonsingular. Since A is the Laplacian matrix of a
+        # connected graph, its rank deficiency is one, and thus one diagonal
+        # element needs to modified. Changing to infinity forces a zero in the
+        # corresponding element in the solution.
+        i = (A.indptr[1:] - A.indptr[:-1]).argmax()
+        A[i, i] = np.inf
+        solver = _LUSolver(A)
+    else:
+        raise nx.NetworkXError(f"Unknown linear system solver: {method}")
+
+    # Initialize.
+    Lnorm = abs(L).sum(axis=1).flatten().max()
+    project(X)
+    W = np.ndarray(X.shape, order="F")
+
+    while True:
+        # Orthonormalize X.
+        X = np.linalg.qr(X)[0]
+        # Compute iteration matrix H.
+        W[:, :] = L @ X
+        H = X.T @ W
+        sigma, Y = sp.linalg.eigh(H, overwrite_a=True)
+        # Compute the Ritz vectors.
+        X = X @ Y
+        # Test for convergence exploiting the fact that L * X == W * Y.
+        res = sp.linalg.blas.dasum(W @ Y[:, 0] - sigma[0] * X[:, 0]) / Lnorm
+        if res < tol:
+            break
+        # Compute X = L \ X / (X' * (L \ X)).
+        # L \ X can have an arbitrary projection on the nullspace of L,
+        # which will be eliminated.
+        W[:, :] = solver.solve(X, tol)
+        X = (sp.linalg.inv(W.T @ X) @ W.T).T  # Preserves Fortran storage order.
+        project(X)
+
+    return sigma, np.asarray(X)


 def _get_fiedler_func(method):
     """Returns a function that solves the Fiedler eigenvalue problem."""
-    pass
-
-
-@not_implemented_for('directed')
+    import numpy as np
+
+    if method == "tracemin":  # old style keyword <v2.1
+        method = "tracemin_pcg"
+    if method in ("tracemin_pcg", "tracemin_lu"):
+
+        def find_fiedler(L, x, normalized, tol, seed):
+            q = 1 if method == "tracemin_pcg" else min(4, L.shape[0] - 1)
+            X = np.asarray(seed.normal(size=(q, L.shape[0]))).T
+            sigma, X = _tracemin_fiedler(L, X, normalized, tol, method)
+            return sigma[0], X[:, 0]
+
+    elif method == "lanczos" or method == "lobpcg":
+
+        def find_fiedler(L, x, normalized, tol, seed):
+            import scipy as sp
+
+            L = sp.sparse.csc_array(L, dtype=float)
+            n = L.shape[0]
+            if normalized:
+                # TODO: rm csc_array wrapping when spdiags array becomes available
+                D = sp.sparse.csc_array(
+                    sp.sparse.spdiags(
+                        1.0 / np.sqrt(L.diagonal()), [0], n, n, format="csc"
+                    )
+                )
+                L = D @ L @ D
+            if method == "lanczos" or n < 10:
+                # Avoid LOBPCG when n < 10 due to
+                # https://github.com/scipy/scipy/issues/3592
+                # https://github.com/scipy/scipy/pull/3594
+                sigma, X = sp.sparse.linalg.eigsh(
+                    L, 2, which="SM", tol=tol, return_eigenvectors=True
+                )
+                return sigma[1], X[:, 1]
+            else:
+                X = np.asarray(np.atleast_2d(x).T)
+                # TODO: rm csr_array wrapping when spdiags array becomes available
+                M = sp.sparse.csr_array(sp.sparse.spdiags(1.0 / L.diagonal(), 0, n, n))
+                Y = np.ones(n)
+                if normalized:
+                    Y /= D.diagonal()
+                sigma, X = sp.sparse.linalg.lobpcg(
+                    L, X, M=M, Y=np.atleast_2d(Y).T, tol=tol, maxiter=n, largest=False
+                )
+                return sigma[0], X[:, 0]
+
+    else:
+        raise nx.NetworkXError(f"unknown method {method!r}.")
+
+    return find_fiedler
+
+
+@not_implemented_for("directed")
 @np_random_state(5)
-@nx._dispatchable(edge_attrs='weight')
-def algebraic_connectivity(G, weight='weight', normalized=False, tol=1e-08,
-    method='tracemin_pcg', seed=None):
-    """Returns the algebraic connectivity of an undirected graph.
+@nx._dispatchable(edge_attrs="weight")
+def algebraic_connectivity(
+    G, weight="weight", normalized=False, tol=1e-8, method="tracemin_pcg", seed=None
+):
+    r"""Returns the algebraic connectivity of an undirected graph.

     The algebraic connectivity of a connected undirected graph is the second
     smallest eigenvalue of its Laplacian matrix.
@@ -178,14 +390,28 @@ def algebraic_connectivity(G, weight='weight', normalized=False, tol=1e-08,
     False

     """
-    pass
+    if len(G) < 2:
+        raise nx.NetworkXError("graph has less than two nodes.")
+    G = _preprocess_graph(G, weight)
+    if not nx.is_connected(G):
+        return 0.0
+
+    L = nx.laplacian_matrix(G)
+    if L.shape[0] == 2:
+        return 2.0 * float(L[0, 0]) if not normalized else 2.0

+    find_fiedler = _get_fiedler_func(method)
+    x = None if method != "lobpcg" else _rcm_estimate(G, G)
+    sigma, fiedler = find_fiedler(L, x, normalized, tol, seed)
+    return float(sigma)

-@not_implemented_for('directed')
+
+@not_implemented_for("directed")
 @np_random_state(5)
-@nx._dispatchable(edge_attrs='weight')
-def fiedler_vector(G, weight='weight', normalized=False, tol=1e-08, method=
-    'tracemin_pcg', seed=None):
+@nx._dispatchable(edge_attrs="weight")
+def fiedler_vector(
+    G, weight="weight", normalized=False, tol=1e-8, method="tracemin_pcg", seed=None
+):
     """Returns the Fiedler vector of a connected undirected graph.

     The Fiedler vector of a connected undirected graph is the eigenvector
@@ -260,13 +486,29 @@ def fiedler_vector(G, weight='weight', normalized=False, tol=1e-08, method=

     The connected components are the two 5-node cliques of the barbell graph.
     """
-    pass
+    import numpy as np
+
+    if len(G) < 2:
+        raise nx.NetworkXError("graph has less than two nodes.")
+    G = _preprocess_graph(G, weight)
+    if not nx.is_connected(G):
+        raise nx.NetworkXError("graph is not connected.")
+
+    if len(G) == 2:
+        return np.array([1.0, -1.0])
+
+    find_fiedler = _get_fiedler_func(method)
+    L = nx.laplacian_matrix(G)
+    x = None if method != "lobpcg" else _rcm_estimate(G, G)
+    sigma, fiedler = find_fiedler(L, x, normalized, tol, seed)
+    return fiedler


 @np_random_state(5)
-@nx._dispatchable(edge_attrs='weight')
-def spectral_ordering(G, weight='weight', normalized=False, tol=1e-08,
-    method='tracemin_pcg', seed=None):
+@nx._dispatchable(edge_attrs="weight")
+def spectral_ordering(
+    G, weight="weight", normalized=False, tol=1e-8, method="tracemin_pcg", seed=None
+):
     """Compute the spectral_ordering of a graph.

     The spectral ordering of a graph is an ordering of its nodes where nodes
@@ -326,12 +568,30 @@ def spectral_ordering(G, weight='weight', normalized=False, tol=1e-08,
     --------
     laplacian_matrix
     """
-    pass
-
-
-@nx._dispatchable(edge_attrs='weight')
-def spectral_bisection(G, weight='weight', normalized=False, tol=1e-08,
-    method='tracemin_pcg', seed=None):
+    if len(G) == 0:
+        raise nx.NetworkXError("graph is empty.")
+    G = _preprocess_graph(G, weight)
+
+    find_fiedler = _get_fiedler_func(method)
+    order = []
+    for component in nx.connected_components(G):
+        size = len(component)
+        if size > 2:
+            L = nx.laplacian_matrix(G, component)
+            x = None if method != "lobpcg" else _rcm_estimate(G, component)
+            sigma, fiedler = find_fiedler(L, x, normalized, tol, seed)
+            sort_info = zip(fiedler, range(size), component)
+            order.extend(u for x, c, u in sorted(sort_info))
+        else:
+            order.extend(component)
+
+    return order
+
+
+@nx._dispatchable(edge_attrs="weight")
+def spectral_bisection(
+    G, weight="weight", normalized=False, tol=1e-8, method="tracemin_pcg", seed=None
+):
     """Bisect the graph using the Fiedler vector.

     This method uses the Fiedler vector to bisect a graph.
@@ -387,4 +647,10 @@ def spectral_bisection(G, weight='weight', normalized=False, tol=1e-08,
     .. [1] M. E. J Newman 'Networks: An Introduction', pages 364-370
        Oxford University Press 2011.
     """
-    pass
+    import numpy as np
+
+    v = nx.fiedler_vector(G, weight, normalized, tol, method, seed)
+    nodes = np.array(list(G))
+    pos_vals = v >= 0
+
+    return set(nodes[~pos_vals].tolist()), set(nodes[pos_vals].tolist())
diff --git a/networkx/linalg/attrmatrix.py b/networkx/linalg/attrmatrix.py
index 1e2c42177..4882c35af 100644
--- a/networkx/linalg/attrmatrix.py
+++ b/networkx/linalg/attrmatrix.py
@@ -2,7 +2,8 @@
     Functions for constructing matrix-like objects from graph attributes.
 """
 import networkx as nx
-__all__ = ['attr_matrix', 'attr_sparse_matrix']
+
+__all__ = ["attr_matrix", "attr_sparse_matrix"]


 def _node_value(G, node_attr):
@@ -29,7 +30,25 @@ def _node_value(G, node_attr):
         returns a value from G.nodes[u] that depends on `edge_attr`.

     """
-    pass
+    if node_attr is None:
+
+        def value(u):
+            return u
+
+    elif not callable(node_attr):
+        # assume it is a key for the node attribute dictionary
+        def value(u):
+            return G.nodes[u][node_attr]
+
+    else:
+        # Advanced:  Allow users to specify something else.
+        #
+        # For example,
+        #     node_attr = lambda u: G.nodes[u].get('size', .5) * 3
+        #
+        value = node_attr
+
+    return value


 def _edge_value(G, edge_attr):
@@ -62,12 +81,77 @@ def _edge_value(G, edge_attr):
         return a value from G[u][v] that depends on `edge_attr`.

     """
-    pass

+    if edge_attr is None:
+        # topological count of edges
+
+        if G.is_multigraph():
+
+            def value(u, v):
+                return len(G[u][v])
+
+        else:
+
+            def value(u, v):
+                return 1
+
+    elif not callable(edge_attr):
+        # assume it is a key for the edge attribute dictionary
+
+        if edge_attr == "weight":
+            # provide a default value
+            if G.is_multigraph():
+
+                def value(u, v):
+                    return sum(d.get(edge_attr, 1) for d in G[u][v].values())

-@nx._dispatchable(edge_attrs={'edge_attr': None}, node_attrs='node_attr')
-def attr_matrix(G, edge_attr=None, node_attr=None, normalized=False,
-    rc_order=None, dtype=None, order=None):
+            else:
+
+                def value(u, v):
+                    return G[u][v].get(edge_attr, 1)
+
+        else:
+            # otherwise, the edge attribute MUST exist for each edge
+            if G.is_multigraph():
+
+                def value(u, v):
+                    return sum(d[edge_attr] for d in G[u][v].values())
+
+            else:
+
+                def value(u, v):
+                    return G[u][v][edge_attr]
+
+    else:
+        # Advanced:  Allow users to specify something else.
+        #
+        # Alternative default value:
+        #     edge_attr = lambda u,v: G[u][v].get('thickness', .5)
+        #
+        # Function on an attribute:
+        #     edge_attr = lambda u,v: abs(G[u][v]['weight'])
+        #
+        # Handle Multi(Di)Graphs differently:
+        #     edge_attr = lambda u,v: numpy.prod([d['size'] for d in G[u][v].values()])
+        #
+        # Ignore multiple edges
+        #     edge_attr = lambda u,v: 1 if len(G[u][v]) else 0
+        #
+        value = edge_attr
+
+    return value
+
+
+@nx._dispatchable(edge_attrs={"edge_attr": None}, node_attrs="node_attr")
+def attr_matrix(
+    G,
+    edge_attr=None,
+    node_attr=None,
+    normalized=False,
+    rc_order=None,
+    dtype=None,
+    order=None,
+):
     """Returns the attribute matrix using attributes from `G` as a numpy array.

     If only `G` is passed in, then the adjacency matrix is constructed.
@@ -185,12 +269,47 @@ def attr_matrix(G, edge_attr=None, node_attr=None, normalized=False,
         (blue, blue) is 0   # there are no edges with blue endpoints

     """
-    pass
-
-
-@nx._dispatchable(edge_attrs={'edge_attr': None}, node_attrs='node_attr')
-def attr_sparse_matrix(G, edge_attr=None, node_attr=None, normalized=False,
-    rc_order=None, dtype=None):
+    import numpy as np
+
+    edge_value = _edge_value(G, edge_attr)
+    node_value = _node_value(G, node_attr)
+
+    if rc_order is None:
+        ordering = list({node_value(n) for n in G})
+    else:
+        ordering = rc_order
+
+    N = len(ordering)
+    undirected = not G.is_directed()
+    index = dict(zip(ordering, range(N)))
+    M = np.zeros((N, N), dtype=dtype, order=order)
+
+    seen = set()
+    for u, nbrdict in G.adjacency():
+        for v in nbrdict:
+            # Obtain the node attribute values.
+            i, j = index[node_value(u)], index[node_value(v)]
+            if v not in seen:
+                M[i, j] += edge_value(u, v)
+                if undirected:
+                    M[j, i] = M[i, j]
+
+        if undirected:
+            seen.add(u)
+
+    if normalized:
+        M /= M.sum(axis=1).reshape((N, 1))
+
+    if rc_order is None:
+        return M, ordering
+    else:
+        return M
+
+
+@nx._dispatchable(edge_attrs={"edge_attr": None}, node_attrs="node_attr")
+def attr_sparse_matrix(
+    G, edge_attr=None, node_attr=None, normalized=False, rc_order=None, dtype=None
+):
     """Returns a SciPy sparse array using attributes from G.

     If only `G` is passed in, then the adjacency matrix is constructed.
@@ -307,4 +426,39 @@ def attr_sparse_matrix(G, edge_attr=None, node_attr=None, normalized=False,
         (blue, blue) is 0   # there are no edges with blue endpoints

     """
-    pass
+    import numpy as np
+    import scipy as sp
+
+    edge_value = _edge_value(G, edge_attr)
+    node_value = _node_value(G, node_attr)
+
+    if rc_order is None:
+        ordering = list({node_value(n) for n in G})
+    else:
+        ordering = rc_order
+
+    N = len(ordering)
+    undirected = not G.is_directed()
+    index = dict(zip(ordering, range(N)))
+    M = sp.sparse.lil_array((N, N), dtype=dtype)
+
+    seen = set()
+    for u, nbrdict in G.adjacency():
+        for v in nbrdict:
+            # Obtain the node attribute values.
+            i, j = index[node_value(u)], index[node_value(v)]
+            if v not in seen:
+                M[i, j] += edge_value(u, v)
+                if undirected:
+                    M[j, i] = M[i, j]
+
+        if undirected:
+            seen.add(u)
+
+    if normalized:
+        M *= 1 / M.sum(axis=1)[:, np.newaxis]  # in-place mult preserves sparse
+
+    if rc_order is None:
+        return M, ordering
+    else:
+        return M
diff --git a/networkx/linalg/bethehessianmatrix.py b/networkx/linalg/bethehessianmatrix.py
index ee2e467c9..382e51810 100644
--- a/networkx/linalg/bethehessianmatrix.py
+++ b/networkx/linalg/bethehessianmatrix.py
@@ -1,14 +1,15 @@
 """Bethe Hessian or deformed Laplacian matrix of graphs."""
 import networkx as nx
 from networkx.utils import not_implemented_for
-__all__ = ['bethe_hessian_matrix']

+__all__ = ["bethe_hessian_matrix"]

-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 @nx._dispatchable
 def bethe_hessian_matrix(G, r=None, nodelist=None):
-    """Returns the Bethe Hessian matrix of G.
+    r"""Returns the Bethe Hessian matrix of G.

     The Bethe Hessian is a family of matrices parametrized by r, defined as
     H(r) = (r^2 - 1) I - r A + D where A is the adjacency matrix, D is the
@@ -18,7 +19,7 @@ def bethe_hessian_matrix(G, r=None, nodelist=None):
     The default choice of regularizer should be the ratio [2]_

     .. math::
-      r_m = \\left(\\sum k_i \\right)^{-1}\\left(\\sum k_i^2 \\right) - 1
+      r_m = \left(\sum k_i \right)^{-1}\left(\sum k_i^2 \right) - 1

     Parameters
     ----------
@@ -62,4 +63,16 @@ def bethe_hessian_matrix(G, r=None, nodelist=None):
        "Estimating the number of communities in networks by spectral methods"
        arXiv:1507.00827, 2015.
     """
-    pass
+    import scipy as sp
+
+    if nodelist is None:
+        nodelist = list(G)
+    if r is None:
+        r = sum(d**2 for v, d in nx.degree(G)) / sum(d for v, d in nx.degree(G)) - 1
+    A = nx.to_scipy_sparse_array(G, nodelist=nodelist, format="csr")
+    n, m = A.shape
+    # TODO: Rm csr_array wrapper when spdiags array creation becomes available
+    D = sp.sparse.csr_array(sp.sparse.spdiags(A.sum(axis=1), 0, m, n, format="csr"))
+    # TODO: Rm csr_array wrapper when eye array creation becomes available
+    I = sp.sparse.csr_array(sp.sparse.eye(m, n, format="csr"))
+    return (r**2 - 1) * I - r * A + D
diff --git a/networkx/linalg/graphmatrix.py b/networkx/linalg/graphmatrix.py
index 02c982d26..640fccc6e 100644
--- a/networkx/linalg/graphmatrix.py
+++ b/networkx/linalg/graphmatrix.py
@@ -2,12 +2,14 @@
 Adjacency matrix and incidence matrix of graphs.
 """
 import networkx as nx
-__all__ = ['incidence_matrix', 'adjacency_matrix']

+__all__ = ["incidence_matrix", "adjacency_matrix"]

-@nx._dispatchable(edge_attrs='weight')
-def incidence_matrix(G, nodelist=None, edgelist=None, oriented=False,
-    weight=None, *, dtype=None):
+
+@nx._dispatchable(edge_attrs="weight")
+def incidence_matrix(
+    G, nodelist=None, edgelist=None, oriented=False, weight=None, *, dtype=None
+):
     """Returns incidence matrix of G.

     The incidence matrix assigns each row to a node and each column to an edge.
@@ -63,11 +65,47 @@ def incidence_matrix(G, nodelist=None, edgelist=None, oriented=False,
     .. [1] Gil Strang, Network applications: A = incidence matrix,
        http://videolectures.net/mit18085f07_strang_lec03/
     """
-    pass
-
-
-@nx._dispatchable(edge_attrs='weight')
-def adjacency_matrix(G, nodelist=None, dtype=None, weight='weight'):
+    import scipy as sp
+
+    if nodelist is None:
+        nodelist = list(G)
+    if edgelist is None:
+        if G.is_multigraph():
+            edgelist = list(G.edges(keys=True))
+        else:
+            edgelist = list(G.edges())
+    A = sp.sparse.lil_array((len(nodelist), len(edgelist)), dtype=dtype)
+    node_index = {node: i for i, node in enumerate(nodelist)}
+    for ei, e in enumerate(edgelist):
+        (u, v) = e[:2]
+        if u == v:
+            continue  # self loops give zero column
+        try:
+            ui = node_index[u]
+            vi = node_index[v]
+        except KeyError as err:
+            raise nx.NetworkXError(
+                f"node {u} or {v} in edgelist but not in nodelist"
+            ) from err
+        if weight is None:
+            wt = 1
+        else:
+            if G.is_multigraph():
+                ekey = e[2]
+                wt = G[u][v][ekey].get(weight, 1)
+            else:
+                wt = G[u][v].get(weight, 1)
+        if oriented:
+            A[ui, ei] = -wt
+            A[vi, ei] = wt
+        else:
+            A[ui, ei] = wt
+            A[vi, ei] = wt
+    return A.asformat("csc")
+
+
+@nx._dispatchable(edge_attrs="weight")
+def adjacency_matrix(G, nodelist=None, dtype=None, weight="weight"):
     """Returns adjacency matrix of G.

     Parameters
@@ -125,4 +163,4 @@ def adjacency_matrix(G, nodelist=None, dtype=None, weight='weight'):
     to_dict_of_dicts
     adjacency_spectrum
     """
-    pass
+    return nx.to_scipy_sparse_array(G, nodelist=nodelist, dtype=dtype, weight=weight)
diff --git a/networkx/linalg/laplacianmatrix.py b/networkx/linalg/laplacianmatrix.py
index 4f556799c..f68c6614d 100644
--- a/networkx/linalg/laplacianmatrix.py
+++ b/networkx/linalg/laplacianmatrix.py
@@ -9,13 +9,18 @@ and `directed_combinatorial_laplacian_matrix` are all normalized.
 """
 import networkx as nx
 from networkx.utils import not_implemented_for
-__all__ = ['laplacian_matrix', 'normalized_laplacian_matrix',
-    'total_spanning_tree_weight', 'directed_laplacian_matrix',
-    'directed_combinatorial_laplacian_matrix']

+__all__ = [
+    "laplacian_matrix",
+    "normalized_laplacian_matrix",
+    "total_spanning_tree_weight",
+    "directed_laplacian_matrix",
+    "directed_combinatorial_laplacian_matrix",
+]

-@nx._dispatchable(edge_attrs='weight')
-def laplacian_matrix(G, nodelist=None, weight='weight'):
+
+@nx._dispatchable(edge_attrs="weight")
+def laplacian_matrix(G, nodelist=None, weight="weight"):
     """Returns the Laplacian matrix of G.

     The graph Laplacian is the matrix L = D - A, where
@@ -114,12 +119,20 @@ def laplacian_matrix(G, nodelist=None, weight='weight'):
        The Science of Search Engine Rankings. Princeton University Press, 2006.

     """
-    pass
+    import scipy as sp
+
+    if nodelist is None:
+        nodelist = list(G)
+    A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, format="csr")
+    n, m = A.shape
+    # TODO: rm csr_array wrapper when spdiags can produce arrays
+    D = sp.sparse.csr_array(sp.sparse.spdiags(A.sum(axis=1), 0, m, n, format="csr"))
+    return D - A


-@nx._dispatchable(edge_attrs='weight')
-def normalized_laplacian_matrix(G, nodelist=None, weight='weight'):
-    """Returns the normalized Laplacian matrix of G.
+@nx._dispatchable(edge_attrs="weight")
+def normalized_laplacian_matrix(G, nodelist=None, weight="weight"):
+    r"""Returns the normalized Laplacian matrix of G.

     The normalized graph Laplacian is the matrix

@@ -214,10 +227,26 @@ def normalized_laplacian_matrix(G, nodelist=None, weight='weight'):
     .. [3] Langville, Amy N., and Carl D. Meyer. Google’s PageRank and Beyond:
        The Science of Search Engine Rankings. Princeton University Press, 2006.
     """
-    pass
-
-
-@nx._dispatchable(edge_attrs='weight')
+    import numpy as np
+    import scipy as sp
+
+    if nodelist is None:
+        nodelist = list(G)
+    A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, format="csr")
+    n, _ = A.shape
+    diags = A.sum(axis=1)
+    # TODO: rm csr_array wrapper when spdiags can produce arrays
+    D = sp.sparse.csr_array(sp.sparse.spdiags(diags, 0, n, n, format="csr"))
+    L = D - A
+    with np.errstate(divide="ignore"):
+        diags_sqrt = 1.0 / np.sqrt(diags)
+    diags_sqrt[np.isinf(diags_sqrt)] = 0
+    # TODO: rm csr_array wrapper when spdiags can produce arrays
+    DH = sp.sparse.csr_array(sp.sparse.spdiags(diags_sqrt, 0, n, n, format="csr"))
+    return DH @ (L @ DH)
+
+
+@nx._dispatchable(edge_attrs="weight")
 def total_spanning_tree_weight(G, weight=None, root=None):
     """
     Returns the total weight of all spanning trees of `G`.
@@ -299,24 +328,40 @@ def total_spanning_tree_weight(G, weight=None, root=None):
         "Matrix-Tree Theorem for Directed Graphs"
         https://www.math.uchicago.edu/~may/VIGRE/VIGRE2010/REUPapers/Margoliash.pdf
     """
-    pass
+    import warnings
+
+    warnings.warn(
+        (
+            "\n\ntotal_spanning_tree_weight is deprecated and will be removed in v3.5.\n"
+            "Use `nx.number_of_spanning_trees(G)` instead."
+        ),
+        category=DeprecationWarning,
+        stacklevel=3,
+    )
+
+    return nx.number_of_spanning_trees(G, weight=weight, root=root)
+

+###############################################################################
+# Code based on work from https://github.com/bjedwards

-@not_implemented_for('undirected')
-@not_implemented_for('multigraph')
-@nx._dispatchable(edge_attrs='weight')
-def directed_laplacian_matrix(G, nodelist=None, weight='weight', walk_type=
-    None, alpha=0.95):
-    """Returns the directed Laplacian matrix of G.
+
+@not_implemented_for("undirected")
+@not_implemented_for("multigraph")
+@nx._dispatchable(edge_attrs="weight")
+def directed_laplacian_matrix(
+    G, nodelist=None, weight="weight", walk_type=None, alpha=0.95
+):
+    r"""Returns the directed Laplacian matrix of G.

     The graph directed Laplacian is the matrix

     .. math::

-        L = I - \\frac{1}{2} \\left (\\Phi^{1/2} P \\Phi^{-1/2} + \\Phi^{-1/2} P^T \\Phi^{1/2} \\right )
+        L = I - \frac{1}{2} \left (\Phi^{1/2} P \Phi^{-1/2} + \Phi^{-1/2} P^T \Phi^{1/2} \right )

     where `I` is the identity matrix, `P` is the transition matrix of the
-    graph, and `\\Phi` a matrix with the Perron vector of `P` in the diagonal and
+    graph, and `\Phi` a matrix with the Perron vector of `P` in the diagonal and
     zeros elsewhere [1]_.

     Depending on the value of walk_type, `P` can be the transition matrix
@@ -373,23 +418,49 @@ def directed_laplacian_matrix(G, nodelist=None, weight='weight', walk_type=
        Laplacians and the Cheeger inequality for directed graphs.
        Annals of Combinatorics, 9(1), 2005
     """
-    pass
-
-
-@not_implemented_for('undirected')
-@not_implemented_for('multigraph')
-@nx._dispatchable(edge_attrs='weight')
-def directed_combinatorial_laplacian_matrix(G, nodelist=None, weight=
-    'weight', walk_type=None, alpha=0.95):
-    """Return the directed combinatorial Laplacian matrix of G.
+    import numpy as np
+    import scipy as sp
+
+    # NOTE: P has type ndarray if walk_type=="pagerank", else csr_array
+    P = _transition_matrix(
+        G, nodelist=nodelist, weight=weight, walk_type=walk_type, alpha=alpha
+    )
+
+    n, m = P.shape
+
+    evals, evecs = sp.sparse.linalg.eigs(P.T, k=1)
+    v = evecs.flatten().real
+    p = v / v.sum()
+    # p>=0 by Perron-Frobenius Thm. Use abs() to fix roundoff across zero gh-6865
+    sqrtp = np.sqrt(np.abs(p))
+    Q = (
+        # TODO: rm csr_array wrapper when spdiags creates arrays
+        sp.sparse.csr_array(sp.sparse.spdiags(sqrtp, 0, n, n))
+        @ P
+        # TODO: rm csr_array wrapper when spdiags creates arrays
+        @ sp.sparse.csr_array(sp.sparse.spdiags(1.0 / sqrtp, 0, n, n))
+    )
+    # NOTE: This could be sparsified for the non-pagerank cases
+    I = np.identity(len(G))
+
+    return I - (Q + Q.T) / 2.0
+
+
+@not_implemented_for("undirected")
+@not_implemented_for("multigraph")
+@nx._dispatchable(edge_attrs="weight")
+def directed_combinatorial_laplacian_matrix(
+    G, nodelist=None, weight="weight", walk_type=None, alpha=0.95
+):
+    r"""Return the directed combinatorial Laplacian matrix of G.

     The graph directed combinatorial Laplacian is the matrix

     .. math::

-        L = \\Phi - \\frac{1}{2} \\left (\\Phi P + P^T \\Phi \\right)
+        L = \Phi - \frac{1}{2} \left (\Phi P + P^T \Phi \right)

-    where `P` is the transition matrix of the graph and `\\Phi` a matrix
+    where `P` is the transition matrix of the graph and `\Phi` a matrix
     with the Perron vector of `P` in the diagonal and zeros elsewhere [1]_.

     Depending on the value of walk_type, `P` can be the transition matrix
@@ -446,11 +517,25 @@ def directed_combinatorial_laplacian_matrix(G, nodelist=None, weight=
        Laplacians and the Cheeger inequality for directed graphs.
        Annals of Combinatorics, 9(1), 2005
     """
-    pass
+    import scipy as sp
+
+    P = _transition_matrix(
+        G, nodelist=nodelist, weight=weight, walk_type=walk_type, alpha=alpha
+    )
+
+    n, m = P.shape
+
+    evals, evecs = sp.sparse.linalg.eigs(P.T, k=1)
+    v = evecs.flatten().real
+    p = v / v.sum()
+    # NOTE: could be improved by not densifying
+    # TODO: Rm csr_array wrapper when spdiags array creation becomes available
+    Phi = sp.sparse.csr_array(sp.sparse.spdiags(p, 0, n, n)).toarray()
+
+    return Phi - (Phi @ P + P.T @ Phi) / 2.0


-def _transition_matrix(G, nodelist=None, weight='weight', walk_type=None,
-    alpha=0.95):
+def _transition_matrix(G, nodelist=None, weight="weight", walk_type=None, alpha=0.95):
     """Returns the transition matrix of G.

     This is a row stochastic giving the transition probabilities while
@@ -491,4 +576,41 @@ def _transition_matrix(G, nodelist=None, weight='weight', walk_type=None,
     NetworkXError
         If walk_type not specified or alpha not in valid range
     """
-    pass
+    import numpy as np
+    import scipy as sp
+
+    if walk_type is None:
+        if nx.is_strongly_connected(G):
+            if nx.is_aperiodic(G):
+                walk_type = "random"
+            else:
+                walk_type = "lazy"
+        else:
+            walk_type = "pagerank"
+
+    A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, dtype=float)
+    n, m = A.shape
+    if walk_type in ["random", "lazy"]:
+        # TODO: Rm csr_array wrapper when spdiags array creation becomes available
+        DI = sp.sparse.csr_array(sp.sparse.spdiags(1.0 / A.sum(axis=1), 0, n, n))
+        if walk_type == "random":
+            P = DI @ A
+        else:
+            # TODO: Rm csr_array wrapper when identity array creation becomes available
+            I = sp.sparse.csr_array(sp.sparse.identity(n))
+            P = (I + DI @ A) / 2.0
+
+    elif walk_type == "pagerank":
+        if not (0 < alpha < 1):
+            raise nx.NetworkXError("alpha must be between 0 and 1")
+        # this is using a dense representation. NOTE: This should be sparsified!
+        A = A.toarray()
+        # add constant to dangling nodes' row
+        A[A.sum(axis=1) == 0, :] = 1 / n
+        # normalize
+        A = A / A.sum(axis=1)[np.newaxis, :].T
+        P = alpha * A + (1 - alpha) / n
+    else:
+        raise nx.NetworkXError("walk_type must be random, lazy, or pagerank")
+
+    return P
diff --git a/networkx/linalg/modularitymatrix.py b/networkx/linalg/modularitymatrix.py
index e1c54d42d..fc599b353 100644
--- a/networkx/linalg/modularitymatrix.py
+++ b/networkx/linalg/modularitymatrix.py
@@ -2,14 +2,15 @@
 """
 import networkx as nx
 from networkx.utils import not_implemented_for
-__all__ = ['modularity_matrix', 'directed_modularity_matrix']

+__all__ = ["modularity_matrix", "directed_modularity_matrix"]

-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
-@nx._dispatchable(edge_attrs='weight')
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
+@nx._dispatchable(edge_attrs="weight")
 def modularity_matrix(G, nodelist=None, weight=None):
-    """Returns the modularity matrix of G.
+    r"""Returns the modularity matrix of G.

     The modularity matrix is the matrix B = A - <A>, where A is the adjacency
     matrix and <A> is the average adjacency matrix, assuming that the graph
@@ -18,7 +19,7 @@ def modularity_matrix(G, nodelist=None, weight=None):
     More specifically, the element B_ij of B is defined as

     .. math::
-        A_{ij} - {k_i k_j \\over 2 m}
+        A_{ij} - {k_i k_j \over 2 m}

     where k_i is the degree of node i, and where m is the number of edges
     in the graph. When weight is set to a name of an attribute edge, Aij, k_i,
@@ -61,12 +62,22 @@ def modularity_matrix(G, nodelist=None, weight=None):
     .. [1] M. E. J. Newman, "Modularity and community structure in networks",
            Proc. Natl. Acad. Sci. USA, vol. 103, pp. 8577-8582, 2006.
     """
-    pass
+    import numpy as np
+
+    if nodelist is None:
+        nodelist = list(G)
+    A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, format="csr")
+    k = A.sum(axis=1)
+    m = k.sum() * 0.5
+    # Expected adjacency matrix
+    X = np.outer(k, k) / (2 * m)
+
+    return A - X


-@not_implemented_for('undirected')
-@not_implemented_for('multigraph')
-@nx._dispatchable(edge_attrs='weight')
+@not_implemented_for("undirected")
+@not_implemented_for("multigraph")
+@nx._dispatchable(edge_attrs="weight")
 def directed_modularity_matrix(G, nodelist=None, weight=None):
     """Returns the directed modularity matrix of G.

@@ -141,4 +152,15 @@ def directed_modularity_matrix(G, nodelist=None, weight=None):
         "Community structure in directed networks",
         Phys. Rev Lett., vol. 100, no. 11, p. 118703, 2008.
     """
-    pass
+    import numpy as np
+
+    if nodelist is None:
+        nodelist = list(G)
+    A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, format="csr")
+    k_in = A.sum(axis=0)
+    k_out = A.sum(axis=1)
+    m = k_in.sum()
+    # Expected adjacency matrix
+    X = np.outer(k_out, k_in) / m
+
+    return A - X
diff --git a/networkx/linalg/spectrum.py b/networkx/linalg/spectrum.py
index 6242840de..16dfa148c 100644
--- a/networkx/linalg/spectrum.py
+++ b/networkx/linalg/spectrum.py
@@ -2,13 +2,18 @@
 Eigenvalue spectrum of graphs.
 """
 import networkx as nx
-__all__ = ['laplacian_spectrum', 'adjacency_spectrum',
-    'modularity_spectrum', 'normalized_laplacian_spectrum',
-    'bethe_hessian_spectrum']

+__all__ = [
+    "laplacian_spectrum",
+    "adjacency_spectrum",
+    "modularity_spectrum",
+    "normalized_laplacian_spectrum",
+    "bethe_hessian_spectrum",
+]

-@nx._dispatchable(edge_attrs='weight')
-def laplacian_spectrum(G, weight='weight'):
+
+@nx._dispatchable(edge_attrs="weight")
+def laplacian_spectrum(G, weight="weight"):
     """Returns eigenvalues of the Laplacian of G

     Parameters
@@ -46,11 +51,13 @@ def laplacian_spectrum(G, weight='weight'):
     array([0., 0., 0., 2., 2.])

     """
-    pass
+    import scipy as sp
+
+    return sp.linalg.eigvalsh(nx.laplacian_matrix(G, weight=weight).todense())


-@nx._dispatchable(edge_attrs='weight')
-def normalized_laplacian_spectrum(G, weight='weight'):
+@nx._dispatchable(edge_attrs="weight")
+def normalized_laplacian_spectrum(G, weight="weight"):
     """Return eigenvalues of the normalized Laplacian of G

     Parameters
@@ -76,11 +83,15 @@ def normalized_laplacian_spectrum(G, weight='weight'):
     --------
     normalized_laplacian_matrix
     """
-    pass
+    import scipy as sp
+
+    return sp.linalg.eigvalsh(
+        nx.normalized_laplacian_matrix(G, weight=weight).todense()
+    )


-@nx._dispatchable(edge_attrs='weight')
-def adjacency_spectrum(G, weight='weight'):
+@nx._dispatchable(edge_attrs="weight")
+def adjacency_spectrum(G, weight="weight"):
     """Returns eigenvalues of the adjacency matrix of G.

     Parameters
@@ -106,7 +117,9 @@ def adjacency_spectrum(G, weight='weight'):
     --------
     adjacency_matrix
     """
-    pass
+    import scipy as sp
+
+    return sp.linalg.eigvals(nx.adjacency_matrix(G, weight=weight).todense())


 @nx._dispatchable
@@ -132,7 +145,12 @@ def modularity_spectrum(G):
     .. [1] M. E. J. Newman, "Modularity and community structure in networks",
        Proc. Natl. Acad. Sci. USA, vol. 103, pp. 8577-8582, 2006.
     """
-    pass
+    import scipy as sp
+
+    if G.is_directed():
+        return sp.linalg.eigvals(nx.directed_modularity_matrix(G))
+    else:
+        return sp.linalg.eigvals(nx.modularity_matrix(G))


 @nx._dispatchable
@@ -162,4 +180,6 @@ def bethe_hessian_spectrum(G, r=None):
        "Spectral clustering of graphs with the bethe hessian",
        Advances in Neural Information Processing Systems. 2014.
     """
-    pass
+    import scipy as sp
+
+    return sp.linalg.eigvalsh(nx.bethe_hessian_matrix(G, r).todense())
diff --git a/networkx/readwrite/adjlist.py b/networkx/readwrite/adjlist.py
index 60e2bbf85..56a1b4d2b 100644
--- a/networkx/readwrite/adjlist.py
+++ b/networkx/readwrite/adjlist.py
@@ -21,13 +21,14 @@ adjacency list (anything following the # in a line is a comment)::
      a b c # source target target
      d e
 """
-__all__ = ['generate_adjlist', 'write_adjlist', 'parse_adjlist', 'read_adjlist'
-    ]
+
+__all__ = ["generate_adjlist", "write_adjlist", "parse_adjlist", "read_adjlist"]
+
 import networkx as nx
 from networkx.utils import open_file


-def generate_adjlist(G, delimiter=' '):
+def generate_adjlist(G, delimiter=" "):
     """Generate a single line of the graph G in adjacency list format.

     Parameters
@@ -68,11 +69,25 @@ def generate_adjlist(G, delimiter=' '):
     NB: This option is not available for data that isn't user-generated.

     """
-    pass
-
-
-@open_file(1, mode='wb')
-def write_adjlist(G, path, comments='#', delimiter=' ', encoding='utf-8'):
+    directed = G.is_directed()
+    seen = set()
+    for s, nbrs in G.adjacency():
+        line = str(s) + delimiter
+        for t, data in nbrs.items():
+            if not directed and t in seen:
+                continue
+            if G.is_multigraph():
+                for d in data.values():
+                    line += str(t) + delimiter
+            else:
+                line += str(t) + delimiter
+        if not directed:
+            seen.add(s)
+        yield line[: -len(delimiter)]
+
+
+@open_file(1, mode="wb")
+def write_adjlist(G, path, comments="#", delimiter=" ", encoding="utf-8"):
     """Write graph G in single-line adjacency-list format to path.


@@ -117,12 +132,28 @@ def write_adjlist(G, path, comments='#', delimiter=' ', encoding='utf-8'):
     --------
     read_adjlist, generate_adjlist
     """
-    pass
+    import sys
+    import time
+
+    pargs = comments + " ".join(sys.argv) + "\n"
+    header = (
+        pargs
+        + comments
+        + f" GMT {time.asctime(time.gmtime())}\n"
+        + comments
+        + f" {G.name}\n"
+    )
+    path.write(header.encode(encoding))
+
+    for line in generate_adjlist(G, delimiter):
+        line += "\n"
+        path.write(line.encode(encoding))


 @nx._dispatchable(graphs=None, returns_graph=True)
-def parse_adjlist(lines, comments='#', delimiter=None, create_using=None,
-    nodetype=None):
+def parse_adjlist(
+    lines, comments="#", delimiter=None, create_using=None, nodetype=None
+):
     """Parse lines of a graph adjacency list representation.

     Parameters
@@ -163,13 +194,45 @@ def parse_adjlist(lines, comments='#', delimiter=None, create_using=None,
     read_adjlist

     """
-    pass
-
-
-@open_file(0, mode='rb')
+    G = nx.empty_graph(0, create_using)
+    for line in lines:
+        p = line.find(comments)
+        if p >= 0:
+            line = line[:p]
+        if not len(line):
+            continue
+        vlist = line.strip().split(delimiter)
+        u = vlist.pop(0)
+        # convert types
+        if nodetype is not None:
+            try:
+                u = nodetype(u)
+            except BaseException as err:
+                raise TypeError(
+                    f"Failed to convert node ({u}) to type {nodetype}"
+                ) from err
+        G.add_node(u)
+        if nodetype is not None:
+            try:
+                vlist = list(map(nodetype, vlist))
+            except BaseException as err:
+                raise TypeError(
+                    f"Failed to convert nodes ({','.join(vlist)}) to type {nodetype}"
+                ) from err
+        G.add_edges_from([(u, v) for v in vlist])
+    return G
+
+
+@open_file(0, mode="rb")
 @nx._dispatchable(graphs=None, returns_graph=True)
-def read_adjlist(path, comments='#', delimiter=None, create_using=None,
-    nodetype=None, encoding='utf-8'):
+def read_adjlist(
+    path,
+    comments="#",
+    delimiter=None,
+    create_using=None,
+    nodetype=None,
+    encoding="utf-8",
+):
     """Read graph in adjacency list format from path.

     Parameters
@@ -237,4 +300,11 @@ def read_adjlist(path, comments='#', delimiter=None, create_using=None,
     --------
     write_adjlist
     """
-    pass
+    lines = (line.decode(encoding) for line in path)
+    return parse_adjlist(
+        lines,
+        comments=comments,
+        delimiter=delimiter,
+        create_using=create_using,
+        nodetype=nodetype,
+    )
diff --git a/networkx/readwrite/edgelist.py b/networkx/readwrite/edgelist.py
index e8aead3fe..ed445c80b 100644
--- a/networkx/readwrite/edgelist.py
+++ b/networkx/readwrite/edgelist.py
@@ -26,13 +26,21 @@ Arbitrary data::

  1 2 7 green
 """
-__all__ = ['generate_edgelist', 'write_edgelist', 'parse_edgelist',
-    'read_edgelist', 'read_weighted_edgelist', 'write_weighted_edgelist']
+
+__all__ = [
+    "generate_edgelist",
+    "write_edgelist",
+    "parse_edgelist",
+    "read_edgelist",
+    "read_weighted_edgelist",
+    "write_weighted_edgelist",
+]
+
 import networkx as nx
 from networkx.utils import open_file


-def generate_edgelist(G, delimiter=' ', data=True):
+def generate_edgelist(G, delimiter=" ", data=True):
     """Generate a single line of the graph G in edge list format.

     Parameters
@@ -97,12 +105,26 @@ def generate_edgelist(G, delimiter=' ', data=True):
     --------
     write_adjlist, read_adjlist
     """
-    pass
-
-
-@open_file(1, mode='wb')
-def write_edgelist(G, path, comments='#', delimiter=' ', data=True,
-    encoding='utf-8'):
+    if data is True:
+        for u, v, d in G.edges(data=True):
+            e = u, v, dict(d)
+            yield delimiter.join(map(str, e))
+    elif data is False:
+        for u, v in G.edges(data=False):
+            e = u, v
+            yield delimiter.join(map(str, e))
+    else:
+        for u, v, d in G.edges(data=True):
+            e = [u, v]
+            try:
+                e.extend(d[k] for k in data)
+            except KeyError:
+                pass  # missing data for this edge, should warn?
+            yield delimiter.join(map(str, e))
+
+
+@open_file(1, mode="wb")
+def write_edgelist(G, path, comments="#", delimiter=" ", data=True, encoding="utf-8"):
     """Write graph as a list of edges.

     Parameters
@@ -145,12 +167,16 @@ def write_edgelist(G, path, comments='#', delimiter=' ', data=True,
     read_edgelist
     write_weighted_edgelist
     """
-    pass
+
+    for line in generate_edgelist(G, delimiter, data):
+        line += "\n"
+        path.write(line.encode(encoding))


 @nx._dispatchable(graphs=None, returns_graph=True)
-def parse_edgelist(lines, comments='#', delimiter=None, create_using=None,
-    nodetype=None, data=True):
+def parse_edgelist(
+    lines, comments="#", delimiter=None, create_using=None, nodetype=None, data=True
+):
     """Parse lines of an edge list representation of a graph.

     Parameters
@@ -210,13 +236,79 @@ def parse_edgelist(lines, comments='#', delimiter=None, create_using=None,
     --------
     read_weighted_edgelist
     """
-    pass
-
-
-@open_file(0, mode='rb')
+    from ast import literal_eval
+
+    G = nx.empty_graph(0, create_using)
+    for line in lines:
+        if comments is not None:
+            p = line.find(comments)
+            if p >= 0:
+                line = line[:p]
+            if not line:
+                continue
+        # split line, should have 2 or more
+        s = line.strip().split(delimiter)
+        if len(s) < 2:
+            continue
+        u = s.pop(0)
+        v = s.pop(0)
+        d = s
+        if nodetype is not None:
+            try:
+                u = nodetype(u)
+                v = nodetype(v)
+            except Exception as err:
+                raise TypeError(
+                    f"Failed to convert nodes {u},{v} to type {nodetype}."
+                ) from err
+
+        if len(d) == 0 or data is False:
+            # no data or data type specified
+            edgedata = {}
+        elif data is True:
+            # no edge types specified
+            try:  # try to evaluate as dictionary
+                if delimiter == ",":
+                    edgedata_str = ",".join(d)
+                else:
+                    edgedata_str = " ".join(d)
+                edgedata = dict(literal_eval(edgedata_str.strip()))
+            except Exception as err:
+                raise TypeError(
+                    f"Failed to convert edge data ({d}) to dictionary."
+                ) from err
+        else:
+            # convert edge data to dictionary with specified keys and type
+            if len(d) != len(data):
+                raise IndexError(
+                    f"Edge data {d} and data_keys {data} are not the same length"
+                )
+            edgedata = {}
+            for (edge_key, edge_type), edge_value in zip(data, d):
+                try:
+                    edge_value = edge_type(edge_value)
+                except Exception as err:
+                    raise TypeError(
+                        f"Failed to convert {edge_key} data {edge_value} "
+                        f"to type {edge_type}."
+                    ) from err
+                edgedata.update({edge_key: edge_value})
+        G.add_edge(u, v, **edgedata)
+    return G
+
+
+@open_file(0, mode="rb")
 @nx._dispatchable(graphs=None, returns_graph=True)
-def read_edgelist(path, comments='#', delimiter=None, create_using=None,
-    nodetype=None, data=True, edgetype=None, encoding='utf-8'):
+def read_edgelist(
+    path,
+    comments="#",
+    delimiter=None,
+    create_using=None,
+    nodetype=None,
+    data=True,
+    edgetype=None,
+    encoding="utf-8",
+):
     """Read a graph from a list of edges.

     Parameters
@@ -282,11 +374,18 @@ def read_edgelist(path, comments='#', delimiter=None, create_using=None,
     Since nodes must be hashable, the function nodetype must return hashable
     types (e.g. int, float, str, frozenset - or tuples of those, etc.)
     """
-    pass
-
-
-def write_weighted_edgelist(G, path, comments='#', delimiter=' ', encoding=
-    'utf-8'):
+    lines = (line if isinstance(line, str) else line.decode(encoding) for line in path)
+    return parse_edgelist(
+        lines,
+        comments=comments,
+        delimiter=delimiter,
+        create_using=create_using,
+        nodetype=nodetype,
+        data=data,
+    )
+
+
+def write_weighted_edgelist(G, path, comments="#", delimiter=" ", encoding="utf-8"):
     """Write graph G as a list of edges with numeric weights.

     Parameters
@@ -316,12 +415,25 @@ def write_weighted_edgelist(G, path, comments='#', delimiter=' ', encoding=
     write_edgelist
     read_weighted_edgelist
     """
-    pass
+    write_edgelist(
+        G,
+        path,
+        comments=comments,
+        delimiter=delimiter,
+        data=("weight",),
+        encoding=encoding,
+    )


 @nx._dispatchable(graphs=None, returns_graph=True)
-def read_weighted_edgelist(path, comments='#', delimiter=None, create_using
-    =None, nodetype=None, encoding='utf-8'):
+def read_weighted_edgelist(
+    path,
+    comments="#",
+    delimiter=None,
+    create_using=None,
+    nodetype=None,
+    encoding="utf-8",
+):
     """Read a graph as list of edges with numeric weights.

     Parameters
@@ -366,4 +478,12 @@ def read_weighted_edgelist(path, comments='#', delimiter=None, create_using
     --------
     write_weighted_edgelist
     """
-    pass
+    return read_edgelist(
+        path,
+        comments=comments,
+        delimiter=delimiter,
+        create_using=create_using,
+        nodetype=nodetype,
+        data=(("weight", float),),
+        encoding=encoding,
+    )
diff --git a/networkx/readwrite/gexf.py b/networkx/readwrite/gexf.py
index 47ccf4e05..16b864377 100644
--- a/networkx/readwrite/gexf.py
+++ b/networkx/readwrite/gexf.py
@@ -18,15 +18,22 @@ specification and http://gexf.net/basic.html for examples.
 """
 import itertools
 import time
-from xml.etree.ElementTree import Element, ElementTree, SubElement, register_namespace, tostring
+from xml.etree.ElementTree import (
+    Element,
+    ElementTree,
+    SubElement,
+    register_namespace,
+    tostring,
+)
+
 import networkx as nx
 from networkx.utils import open_file
-__all__ = ['write_gexf', 'read_gexf', 'relabel_gexf_graph', 'generate_gexf']

+__all__ = ["write_gexf", "read_gexf", "relabel_gexf_graph", "generate_gexf"]

-@open_file(1, mode='wb')
-def write_gexf(G, path, encoding='utf-8', prettyprint=True, version='1.2draft'
-    ):
+
+@open_file(1, mode="wb")
+def write_gexf(G, path, encoding="utf-8", prettyprint=True, version="1.2draft"):
     """Write G in GEXF format to path.

     "GEXF (Graph Exchange XML Format) is a language for describing
@@ -75,10 +82,12 @@ def write_gexf(G, path, encoding='utf-8', prettyprint=True, version='1.2draft'
     .. [1] GEXF File Format, http://gexf.net/
     .. [2] GEXF schema, http://gexf.net/schema.html
     """
-    pass
+    writer = GEXFWriter(encoding=encoding, prettyprint=prettyprint, version=version)
+    writer.add_graph(G)
+    writer.write(path)


-def generate_gexf(G, encoding='utf-8', prettyprint=True, version='1.2draft'):
+def generate_gexf(G, encoding="utf-8", prettyprint=True, version="1.2draft"):
     """Generate lines of GEXF format representation of G.

     "GEXF (Graph Exchange XML Format) is a language for describing
@@ -100,8 +109,7 @@ def generate_gexf(G, encoding='utf-8', prettyprint=True, version='1.2draft'):
     Examples
     --------
     >>> G = nx.path_graph(4)
-    >>> linefeed = chr(10)  # linefeed=
-
+    >>> linefeed = chr(10)  # linefeed=\n
     >>> s = linefeed.join(nx.generate_gexf(G))
     >>> for line in nx.generate_gexf(G):  # doctest: +SKIP
     ...     print(line)
@@ -119,12 +127,14 @@ def generate_gexf(G, encoding='utf-8', prettyprint=True, version='1.2draft'):
     ----------
     .. [1] GEXF File Format, https://gephi.org/gexf/format/
     """
-    pass
+    writer = GEXFWriter(encoding=encoding, prettyprint=prettyprint, version=version)
+    writer.add_graph(G)
+    yield from str(writer).splitlines()


-@open_file(0, mode='rb')
+@open_file(0, mode="rb")
 @nx._dispatchable(graphs=None, returns_graph=True)
-def read_gexf(path, node_type=None, relabel=False, version='1.2draft'):
+def read_gexf(path, node_type=None, relabel=False, version="1.2draft"):
     """Read graph in GEXF format from path.

     "GEXF (Graph Exchange XML Format) is a language for describing
@@ -159,51 +169,151 @@ def read_gexf(path, node_type=None, relabel=False, version='1.2draft'):
     ----------
     .. [1] GEXF File Format, http://gexf.net/
     """
-    pass
+    reader = GEXFReader(node_type=node_type, version=version)
+    if relabel:
+        G = relabel_gexf_graph(reader(path))
+    else:
+        G = reader(path)
+    return G


 class GEXF:
-    versions = {'1.1draft': {'NS_GEXF': 'http://www.gexf.net/1.1draft',
-        'NS_VIZ': 'http://www.gexf.net/1.1draft/viz', 'NS_XSI':
-        'http://www.w3.org/2001/XMLSchema-instance', 'SCHEMALOCATION': ' '.
-        join(['http://www.gexf.net/1.1draft',
-        'http://www.gexf.net/1.1draft/gexf.xsd']), 'VERSION': '1.1'},
-        '1.2draft': {'NS_GEXF': 'http://www.gexf.net/1.2draft', 'NS_VIZ':
-        'http://www.gexf.net/1.2draft/viz', 'NS_XSI':
-        'http://www.w3.org/2001/XMLSchema-instance', 'SCHEMALOCATION': ' '.
-        join(['http://www.gexf.net/1.2draft',
-        'http://www.gexf.net/1.2draft/gexf.xsd']), 'VERSION': '1.2'}}
-    convert_bool = {'true': True, 'false': False, 'True': True, 'False': 
-        False, '0': False, (0): False, '1': True, (1): True}
+    versions = {
+        "1.1draft": {
+            "NS_GEXF": "http://www.gexf.net/1.1draft",
+            "NS_VIZ": "http://www.gexf.net/1.1draft/viz",
+            "NS_XSI": "http://www.w3.org/2001/XMLSchema-instance",
+            "SCHEMALOCATION": " ".join(
+                [
+                    "http://www.gexf.net/1.1draft",
+                    "http://www.gexf.net/1.1draft/gexf.xsd",
+                ]
+            ),
+            "VERSION": "1.1",
+        },
+        "1.2draft": {
+            "NS_GEXF": "http://www.gexf.net/1.2draft",
+            "NS_VIZ": "http://www.gexf.net/1.2draft/viz",
+            "NS_XSI": "http://www.w3.org/2001/XMLSchema-instance",
+            "SCHEMALOCATION": " ".join(
+                [
+                    "http://www.gexf.net/1.2draft",
+                    "http://www.gexf.net/1.2draft/gexf.xsd",
+                ]
+            ),
+            "VERSION": "1.2",
+        },
+    }

+    def construct_types(self):
+        types = [
+            (int, "integer"),
+            (float, "float"),
+            (float, "double"),
+            (bool, "boolean"),
+            (list, "string"),
+            (dict, "string"),
+            (int, "long"),
+            (str, "liststring"),
+            (str, "anyURI"),
+            (str, "string"),
+        ]
+
+        # These additions to types allow writing numpy types
+        try:
+            import numpy as np
+        except ImportError:
+            pass
+        else:
+            # prepend so that python types are created upon read (last entry wins)
+            types = [
+                (np.float64, "float"),
+                (np.float32, "float"),
+                (np.float16, "float"),
+                (np.int_, "int"),
+                (np.int8, "int"),
+                (np.int16, "int"),
+                (np.int32, "int"),
+                (np.int64, "int"),
+                (np.uint8, "int"),
+                (np.uint16, "int"),
+                (np.uint32, "int"),
+                (np.uint64, "int"),
+                (np.int_, "int"),
+                (np.intc, "int"),
+                (np.intp, "int"),
+            ] + types
+
+        self.xml_type = dict(types)
+        self.python_type = dict(reversed(a) for a in types)
+
+    # http://www.w3.org/TR/xmlschema-2/#boolean
+    convert_bool = {
+        "true": True,
+        "false": False,
+        "True": True,
+        "False": False,
+        "0": False,
+        0: False,
+        "1": True,
+        1: True,
+    }
+
+    def set_version(self, version):
+        d = self.versions.get(version)
+        if d is None:
+            raise nx.NetworkXError(f"Unknown GEXF version {version}.")
+        self.NS_GEXF = d["NS_GEXF"]
+        self.NS_VIZ = d["NS_VIZ"]
+        self.NS_XSI = d["NS_XSI"]
+        self.SCHEMALOCATION = d["SCHEMALOCATION"]
+        self.VERSION = d["VERSION"]
+        self.version = version

-class GEXFWriter(GEXF):

-    def __init__(self, graph=None, encoding='utf-8', prettyprint=True,
-        version='1.2draft'):
+class GEXFWriter(GEXF):
+    # class for writing GEXF format files
+    # use write_gexf() function
+    def __init__(
+        self, graph=None, encoding="utf-8", prettyprint=True, version="1.2draft"
+    ):
         self.construct_types()
         self.prettyprint = prettyprint
         self.encoding = encoding
         self.set_version(version)
-        self.xml = Element('gexf', {'xmlns': self.NS_GEXF, 'xmlns:xsi':
-            self.NS_XSI, 'xsi:schemaLocation': self.SCHEMALOCATION,
-            'version': self.VERSION})
-        meta_element = Element('meta')
-        subelement_text = f'NetworkX {nx.__version__}'
-        SubElement(meta_element, 'creator').text = subelement_text
-        meta_element.set('lastmodifieddate', time.strftime('%Y-%m-%d'))
+        self.xml = Element(
+            "gexf",
+            {
+                "xmlns": self.NS_GEXF,
+                "xmlns:xsi": self.NS_XSI,
+                "xsi:schemaLocation": self.SCHEMALOCATION,
+                "version": self.VERSION,
+            },
+        )
+
+        # Make meta element a non-graph element
+        # Also add lastmodifieddate as attribute, not tag
+        meta_element = Element("meta")
+        subelement_text = f"NetworkX {nx.__version__}"
+        SubElement(meta_element, "creator").text = subelement_text
+        meta_element.set("lastmodifieddate", time.strftime("%Y-%m-%d"))
         self.xml.append(meta_element)
-        register_namespace('viz', self.NS_VIZ)
+
+        register_namespace("viz", self.NS_VIZ)
+
+        # counters for edge and attribute identifiers
         self.edge_id = itertools.count()
         self.attr_id = itertools.count()
         self.all_edge_ids = set()
+        # default attributes are stored in dictionaries
         self.attr = {}
-        self.attr['node'] = {}
-        self.attr['edge'] = {}
-        self.attr['node']['dynamic'] = {}
-        self.attr['node']['static'] = {}
-        self.attr['edge']['dynamic'] = {}
-        self.attr['edge']['static'] = {}
+        self.attr["node"] = {}
+        self.attr["edge"] = {}
+        self.attr["node"]["dynamic"] = {}
+        self.attr["node"]["static"] = {}
+        self.attr["edge"]["dynamic"] = {}
+        self.attr["edge"]["static"] = {}
+
         if graph is not None:
             self.add_graph(graph)

@@ -213,26 +323,694 @@ class GEXFWriter(GEXF):
         s = tostring(self.xml).decode(self.encoding)
         return s

+    def add_graph(self, G):
+        # first pass through G collecting edge ids
+        for u, v, dd in G.edges(data=True):
+            eid = dd.get("id")
+            if eid is not None:
+                self.all_edge_ids.add(str(eid))
+        # set graph attributes
+        if G.graph.get("mode") == "dynamic":
+            mode = "dynamic"
+        else:
+            mode = "static"
+        # Add a graph element to the XML
+        if G.is_directed():
+            default = "directed"
+        else:
+            default = "undirected"
+        name = G.graph.get("name", "")
+        graph_element = Element("graph", defaultedgetype=default, mode=mode, name=name)
+        self.graph_element = graph_element
+        self.add_nodes(G, graph_element)
+        self.add_edges(G, graph_element)
+        self.xml.append(graph_element)

-class GEXFReader(GEXF):
+    def add_nodes(self, G, graph_element):
+        nodes_element = Element("nodes")
+        for node, data in G.nodes(data=True):
+            node_data = data.copy()
+            node_id = str(node_data.pop("id", node))
+            kw = {"id": node_id}
+            label = str(node_data.pop("label", node))
+            kw["label"] = label
+            try:
+                pid = node_data.pop("pid")
+                kw["pid"] = str(pid)
+            except KeyError:
+                pass
+            try:
+                start = node_data.pop("start")
+                kw["start"] = str(start)
+                self.alter_graph_mode_timeformat(start)
+            except KeyError:
+                pass
+            try:
+                end = node_data.pop("end")
+                kw["end"] = str(end)
+                self.alter_graph_mode_timeformat(end)
+            except KeyError:
+                pass
+            # add node element with attributes
+            node_element = Element("node", **kw)
+            # add node element and attr subelements
+            default = G.graph.get("node_default", {})
+            node_data = self.add_parents(node_element, node_data)
+            if self.VERSION == "1.1":
+                node_data = self.add_slices(node_element, node_data)
+            else:
+                node_data = self.add_spells(node_element, node_data)
+            node_data = self.add_viz(node_element, node_data)
+            node_data = self.add_attributes("node", node_element, node_data, default)
+            nodes_element.append(node_element)
+        graph_element.append(nodes_element)
+
+    def add_edges(self, G, graph_element):
+        def edge_key_data(G):
+            # helper function to unify multigraph and graph edge iterator
+            if G.is_multigraph():
+                for u, v, key, data in G.edges(data=True, keys=True):
+                    edge_data = data.copy()
+                    edge_data.update(key=key)
+                    edge_id = edge_data.pop("id", None)
+                    if edge_id is None:
+                        edge_id = next(self.edge_id)
+                        while str(edge_id) in self.all_edge_ids:
+                            edge_id = next(self.edge_id)
+                        self.all_edge_ids.add(str(edge_id))
+                    yield u, v, edge_id, edge_data
+            else:
+                for u, v, data in G.edges(data=True):
+                    edge_data = data.copy()
+                    edge_id = edge_data.pop("id", None)
+                    if edge_id is None:
+                        edge_id = next(self.edge_id)
+                        while str(edge_id) in self.all_edge_ids:
+                            edge_id = next(self.edge_id)
+                        self.all_edge_ids.add(str(edge_id))
+                    yield u, v, edge_id, edge_data
+
+        edges_element = Element("edges")
+        for u, v, key, edge_data in edge_key_data(G):
+            kw = {"id": str(key)}
+            try:
+                edge_label = edge_data.pop("label")
+                kw["label"] = str(edge_label)
+            except KeyError:
+                pass
+            try:
+                edge_weight = edge_data.pop("weight")
+                kw["weight"] = str(edge_weight)
+            except KeyError:
+                pass
+            try:
+                edge_type = edge_data.pop("type")
+                kw["type"] = str(edge_type)
+            except KeyError:
+                pass
+            try:
+                start = edge_data.pop("start")
+                kw["start"] = str(start)
+                self.alter_graph_mode_timeformat(start)
+            except KeyError:
+                pass
+            try:
+                end = edge_data.pop("end")
+                kw["end"] = str(end)
+                self.alter_graph_mode_timeformat(end)
+            except KeyError:
+                pass
+            source_id = str(G.nodes[u].get("id", u))
+            target_id = str(G.nodes[v].get("id", v))
+            edge_element = Element("edge", source=source_id, target=target_id, **kw)
+            default = G.graph.get("edge_default", {})
+            if self.VERSION == "1.1":
+                edge_data = self.add_slices(edge_element, edge_data)
+            else:
+                edge_data = self.add_spells(edge_element, edge_data)
+            edge_data = self.add_viz(edge_element, edge_data)
+            edge_data = self.add_attributes("edge", edge_element, edge_data, default)
+            edges_element.append(edge_element)
+        graph_element.append(edges_element)

-    def __init__(self, node_type=None, version='1.2draft'):
+    def add_attributes(self, node_or_edge, xml_obj, data, default):
+        # Add attrvalues to node or edge
+        attvalues = Element("attvalues")
+        if len(data) == 0:
+            return data
+        mode = "static"
+        for k, v in data.items():
+            # rename generic multigraph key to avoid any name conflict
+            if k == "key":
+                k = "networkx_key"
+            val_type = type(v)
+            if val_type not in self.xml_type:
+                raise TypeError(f"attribute value type is not allowed: {val_type}")
+            if isinstance(v, list):
+                # dynamic data
+                for val, start, end in v:
+                    val_type = type(val)
+                    if start is not None or end is not None:
+                        mode = "dynamic"
+                        self.alter_graph_mode_timeformat(start)
+                        self.alter_graph_mode_timeformat(end)
+                        break
+                attr_id = self.get_attr_id(
+                    str(k), self.xml_type[val_type], node_or_edge, default, mode
+                )
+                for val, start, end in v:
+                    e = Element("attvalue")
+                    e.attrib["for"] = attr_id
+                    e.attrib["value"] = str(val)
+                    # Handle nan, inf, -inf differently
+                    if val_type == float:
+                        if e.attrib["value"] == "inf":
+                            e.attrib["value"] = "INF"
+                        elif e.attrib["value"] == "nan":
+                            e.attrib["value"] = "NaN"
+                        elif e.attrib["value"] == "-inf":
+                            e.attrib["value"] = "-INF"
+                    if start is not None:
+                        e.attrib["start"] = str(start)
+                    if end is not None:
+                        e.attrib["end"] = str(end)
+                    attvalues.append(e)
+            else:
+                # static data
+                mode = "static"
+                attr_id = self.get_attr_id(
+                    str(k), self.xml_type[val_type], node_or_edge, default, mode
+                )
+                e = Element("attvalue")
+                e.attrib["for"] = attr_id
+                if isinstance(v, bool):
+                    e.attrib["value"] = str(v).lower()
+                else:
+                    e.attrib["value"] = str(v)
+                    # Handle float nan, inf, -inf differently
+                    if val_type == float:
+                        if e.attrib["value"] == "inf":
+                            e.attrib["value"] = "INF"
+                        elif e.attrib["value"] == "nan":
+                            e.attrib["value"] = "NaN"
+                        elif e.attrib["value"] == "-inf":
+                            e.attrib["value"] = "-INF"
+                attvalues.append(e)
+        xml_obj.append(attvalues)
+        return data
+
+    def get_attr_id(self, title, attr_type, edge_or_node, default, mode):
+        # find the id of the attribute or generate a new id
+        try:
+            return self.attr[edge_or_node][mode][title]
+        except KeyError:
+            # generate new id
+            new_id = str(next(self.attr_id))
+            self.attr[edge_or_node][mode][title] = new_id
+            attr_kwargs = {"id": new_id, "title": title, "type": attr_type}
+            attribute = Element("attribute", **attr_kwargs)
+            # add subelement for data default value if present
+            default_title = default.get(title)
+            if default_title is not None:
+                default_element = Element("default")
+                default_element.text = str(default_title)
+                attribute.append(default_element)
+            # new insert it into the XML
+            attributes_element = None
+            for a in self.graph_element.findall("attributes"):
+                # find existing attributes element by class and mode
+                a_class = a.get("class")
+                a_mode = a.get("mode", "static")
+                if a_class == edge_or_node and a_mode == mode:
+                    attributes_element = a
+            if attributes_element is None:
+                # create new attributes element
+                attr_kwargs = {"mode": mode, "class": edge_or_node}
+                attributes_element = Element("attributes", **attr_kwargs)
+                self.graph_element.insert(0, attributes_element)
+            attributes_element.append(attribute)
+        return new_id
+
+    def add_viz(self, element, node_data):
+        viz = node_data.pop("viz", False)
+        if viz:
+            color = viz.get("color")
+            if color is not None:
+                if self.VERSION == "1.1":
+                    e = Element(
+                        f"{{{self.NS_VIZ}}}color",
+                        r=str(color.get("r")),
+                        g=str(color.get("g")),
+                        b=str(color.get("b")),
+                    )
+                else:
+                    e = Element(
+                        f"{{{self.NS_VIZ}}}color",
+                        r=str(color.get("r")),
+                        g=str(color.get("g")),
+                        b=str(color.get("b")),
+                        a=str(color.get("a", 1.0)),
+                    )
+                element.append(e)
+
+            size = viz.get("size")
+            if size is not None:
+                e = Element(f"{{{self.NS_VIZ}}}size", value=str(size))
+                element.append(e)
+
+            thickness = viz.get("thickness")
+            if thickness is not None:
+                e = Element(f"{{{self.NS_VIZ}}}thickness", value=str(thickness))
+                element.append(e)
+
+            shape = viz.get("shape")
+            if shape is not None:
+                if shape.startswith("http"):
+                    e = Element(
+                        f"{{{self.NS_VIZ}}}shape", value="image", uri=str(shape)
+                    )
+                else:
+                    e = Element(f"{{{self.NS_VIZ}}}shape", value=str(shape))
+                element.append(e)
+
+            position = viz.get("position")
+            if position is not None:
+                e = Element(
+                    f"{{{self.NS_VIZ}}}position",
+                    x=str(position.get("x")),
+                    y=str(position.get("y")),
+                    z=str(position.get("z")),
+                )
+                element.append(e)
+        return node_data
+
+    def add_parents(self, node_element, node_data):
+        parents = node_data.pop("parents", False)
+        if parents:
+            parents_element = Element("parents")
+            for p in parents:
+                e = Element("parent")
+                e.attrib["for"] = str(p)
+                parents_element.append(e)
+            node_element.append(parents_element)
+        return node_data
+
+    def add_slices(self, node_or_edge_element, node_or_edge_data):
+        slices = node_or_edge_data.pop("slices", False)
+        if slices:
+            slices_element = Element("slices")
+            for start, end in slices:
+                e = Element("slice", start=str(start), end=str(end))
+                slices_element.append(e)
+            node_or_edge_element.append(slices_element)
+        return node_or_edge_data
+
+    def add_spells(self, node_or_edge_element, node_or_edge_data):
+        spells = node_or_edge_data.pop("spells", False)
+        if spells:
+            spells_element = Element("spells")
+            for start, end in spells:
+                e = Element("spell")
+                if start is not None:
+                    e.attrib["start"] = str(start)
+                    self.alter_graph_mode_timeformat(start)
+                if end is not None:
+                    e.attrib["end"] = str(end)
+                    self.alter_graph_mode_timeformat(end)
+                spells_element.append(e)
+            node_or_edge_element.append(spells_element)
+        return node_or_edge_data
+
+    def alter_graph_mode_timeformat(self, start_or_end):
+        # If 'start' or 'end' appears, alter Graph mode to dynamic and
+        # set timeformat
+        if self.graph_element.get("mode") == "static":
+            if start_or_end is not None:
+                if isinstance(start_or_end, str):
+                    timeformat = "date"
+                elif isinstance(start_or_end, float):
+                    timeformat = "double"
+                elif isinstance(start_or_end, int):
+                    timeformat = "long"
+                else:
+                    raise nx.NetworkXError(
+                        "timeformat should be of the type int, float or str"
+                    )
+                self.graph_element.set("timeformat", timeformat)
+                self.graph_element.set("mode", "dynamic")
+
+    def write(self, fh):
+        # Serialize graph G in GEXF to the open fh
+        if self.prettyprint:
+            self.indent(self.xml)
+        document = ElementTree(self.xml)
+        document.write(fh, encoding=self.encoding, xml_declaration=True)
+
+    def indent(self, elem, level=0):
+        # in-place prettyprint formatter
+        i = "\n" + "  " * level
+        if len(elem):
+            if not elem.text or not elem.text.strip():
+                elem.text = i + "  "
+            if not elem.tail or not elem.tail.strip():
+                elem.tail = i
+            for elem in elem:
+                self.indent(elem, level + 1)
+            if not elem.tail or not elem.tail.strip():
+                elem.tail = i
+        else:
+            if level and (not elem.tail or not elem.tail.strip()):
+                elem.tail = i
+
+
+class GEXFReader(GEXF):
+    # Class to read GEXF format files
+    # use read_gexf() function
+    def __init__(self, node_type=None, version="1.2draft"):
         self.construct_types()
         self.node_type = node_type
+        # assume simple graph and test for multigraph on read
         self.simple_graph = True
         self.set_version(version)

     def __call__(self, stream):
         self.xml = ElementTree(file=stream)
-        g = self.xml.find(f'{{{self.NS_GEXF}}}graph')
+        g = self.xml.find(f"{{{self.NS_GEXF}}}graph")
         if g is not None:
             return self.make_graph(g)
+        # try all the versions
         for version in self.versions:
             self.set_version(version)
-            g = self.xml.find(f'{{{self.NS_GEXF}}}graph')
+            g = self.xml.find(f"{{{self.NS_GEXF}}}graph")
             if g is not None:
                 return self.make_graph(g)
-        raise nx.NetworkXError('No <graph> element in GEXF file.')
+        raise nx.NetworkXError("No <graph> element in GEXF file.")
+
+    def make_graph(self, graph_xml):
+        # start with empty DiGraph or MultiDiGraph
+        edgedefault = graph_xml.get("defaultedgetype", None)
+        if edgedefault == "directed":
+            G = nx.MultiDiGraph()
+        else:
+            G = nx.MultiGraph()
+
+        # graph attributes
+        graph_name = graph_xml.get("name", "")
+        if graph_name != "":
+            G.graph["name"] = graph_name
+        graph_start = graph_xml.get("start")
+        if graph_start is not None:
+            G.graph["start"] = graph_start
+        graph_end = graph_xml.get("end")
+        if graph_end is not None:
+            G.graph["end"] = graph_end
+        graph_mode = graph_xml.get("mode", "")
+        if graph_mode == "dynamic":
+            G.graph["mode"] = "dynamic"
+        else:
+            G.graph["mode"] = "static"
+
+        # timeformat
+        self.timeformat = graph_xml.get("timeformat")
+        if self.timeformat == "date":
+            self.timeformat = "string"
+
+        # node and edge attributes
+        attributes_elements = graph_xml.findall(f"{{{self.NS_GEXF}}}attributes")
+        # dictionaries to hold attributes and attribute defaults
+        node_attr = {}
+        node_default = {}
+        edge_attr = {}
+        edge_default = {}
+        for a in attributes_elements:
+            attr_class = a.get("class")
+            if attr_class == "node":
+                na, nd = self.find_gexf_attributes(a)
+                node_attr.update(na)
+                node_default.update(nd)
+                G.graph["node_default"] = node_default
+            elif attr_class == "edge":
+                ea, ed = self.find_gexf_attributes(a)
+                edge_attr.update(ea)
+                edge_default.update(ed)
+                G.graph["edge_default"] = edge_default
+            else:
+                raise  # unknown attribute class
+
+        # Hack to handle Gephi0.7beta bug
+        # add weight attribute
+        ea = {"weight": {"type": "double", "mode": "static", "title": "weight"}}
+        ed = {}
+        edge_attr.update(ea)
+        edge_default.update(ed)
+        G.graph["edge_default"] = edge_default
+
+        # add nodes
+        nodes_element = graph_xml.find(f"{{{self.NS_GEXF}}}nodes")
+        if nodes_element is not None:
+            for node_xml in nodes_element.findall(f"{{{self.NS_GEXF}}}node"):
+                self.add_node(G, node_xml, node_attr)
+
+        # add edges
+        edges_element = graph_xml.find(f"{{{self.NS_GEXF}}}edges")
+        if edges_element is not None:
+            for edge_xml in edges_element.findall(f"{{{self.NS_GEXF}}}edge"):
+                self.add_edge(G, edge_xml, edge_attr)
+
+        # switch to Graph or DiGraph if no parallel edges were found.
+        if self.simple_graph:
+            if G.is_directed():
+                G = nx.DiGraph(G)
+            else:
+                G = nx.Graph(G)
+        return G
+
+    def add_node(self, G, node_xml, node_attr, node_pid=None):
+        # add a single node with attributes to the graph
+
+        # get attributes and subattributues for node
+        data = self.decode_attr_elements(node_attr, node_xml)
+        data = self.add_parents(data, node_xml)  # add any parents
+        if self.VERSION == "1.1":
+            data = self.add_slices(data, node_xml)  # add slices
+        else:
+            data = self.add_spells(data, node_xml)  # add spells
+        data = self.add_viz(data, node_xml)  # add viz
+        data = self.add_start_end(data, node_xml)  # add start/end
+
+        # find the node id and cast it to the appropriate type
+        node_id = node_xml.get("id")
+        if self.node_type is not None:
+            node_id = self.node_type(node_id)
+
+        # every node should have a label
+        node_label = node_xml.get("label")
+        data["label"] = node_label
+
+        # parent node id
+        node_pid = node_xml.get("pid", node_pid)
+        if node_pid is not None:
+            data["pid"] = node_pid
+
+        # check for subnodes, recursive
+        subnodes = node_xml.find(f"{{{self.NS_GEXF}}}nodes")
+        if subnodes is not None:
+            for node_xml in subnodes.findall(f"{{{self.NS_GEXF}}}node"):
+                self.add_node(G, node_xml, node_attr, node_pid=node_id)
+
+        G.add_node(node_id, **data)
+
+    def add_start_end(self, data, xml):
+        # start and end times
+        ttype = self.timeformat
+        node_start = xml.get("start")
+        if node_start is not None:
+            data["start"] = self.python_type[ttype](node_start)
+        node_end = xml.get("end")
+        if node_end is not None:
+            data["end"] = self.python_type[ttype](node_end)
+        return data
+
+    def add_viz(self, data, node_xml):
+        # add viz element for node
+        viz = {}
+        color = node_xml.find(f"{{{self.NS_VIZ}}}color")
+        if color is not None:
+            if self.VERSION == "1.1":
+                viz["color"] = {
+                    "r": int(color.get("r")),
+                    "g": int(color.get("g")),
+                    "b": int(color.get("b")),
+                }
+            else:
+                viz["color"] = {
+                    "r": int(color.get("r")),
+                    "g": int(color.get("g")),
+                    "b": int(color.get("b")),
+                    "a": float(color.get("a", 1)),
+                }
+
+        size = node_xml.find(f"{{{self.NS_VIZ}}}size")
+        if size is not None:
+            viz["size"] = float(size.get("value"))
+
+        thickness = node_xml.find(f"{{{self.NS_VIZ}}}thickness")
+        if thickness is not None:
+            viz["thickness"] = float(thickness.get("value"))
+
+        shape = node_xml.find(f"{{{self.NS_VIZ}}}shape")
+        if shape is not None:
+            viz["shape"] = shape.get("shape")
+            if viz["shape"] == "image":
+                viz["shape"] = shape.get("uri")
+
+        position = node_xml.find(f"{{{self.NS_VIZ}}}position")
+        if position is not None:
+            viz["position"] = {
+                "x": float(position.get("x", 0)),
+                "y": float(position.get("y", 0)),
+                "z": float(position.get("z", 0)),
+            }
+
+        if len(viz) > 0:
+            data["viz"] = viz
+        return data
+
+    def add_parents(self, data, node_xml):
+        parents_element = node_xml.find(f"{{{self.NS_GEXF}}}parents")
+        if parents_element is not None:
+            data["parents"] = []
+            for p in parents_element.findall(f"{{{self.NS_GEXF}}}parent"):
+                parent = p.get("for")
+                data["parents"].append(parent)
+        return data
+
+    def add_slices(self, data, node_or_edge_xml):
+        slices_element = node_or_edge_xml.find(f"{{{self.NS_GEXF}}}slices")
+        if slices_element is not None:
+            data["slices"] = []
+            for s in slices_element.findall(f"{{{self.NS_GEXF}}}slice"):
+                start = s.get("start")
+                end = s.get("end")
+                data["slices"].append((start, end))
+        return data
+
+    def add_spells(self, data, node_or_edge_xml):
+        spells_element = node_or_edge_xml.find(f"{{{self.NS_GEXF}}}spells")
+        if spells_element is not None:
+            data["spells"] = []
+            ttype = self.timeformat
+            for s in spells_element.findall(f"{{{self.NS_GEXF}}}spell"):
+                start = self.python_type[ttype](s.get("start"))
+                end = self.python_type[ttype](s.get("end"))
+                data["spells"].append((start, end))
+        return data
+
+    def add_edge(self, G, edge_element, edge_attr):
+        # add an edge to the graph
+
+        # raise error if we find mixed directed and undirected edges
+        edge_direction = edge_element.get("type")
+        if G.is_directed() and edge_direction == "undirected":
+            raise nx.NetworkXError("Undirected edge found in directed graph.")
+        if (not G.is_directed()) and edge_direction == "directed":
+            raise nx.NetworkXError("Directed edge found in undirected graph.")
+
+        # Get source and target and recast type if required
+        source = edge_element.get("source")
+        target = edge_element.get("target")
+        if self.node_type is not None:
+            source = self.node_type(source)
+            target = self.node_type(target)
+
+        data = self.decode_attr_elements(edge_attr, edge_element)
+        data = self.add_start_end(data, edge_element)
+
+        if self.VERSION == "1.1":
+            data = self.add_slices(data, edge_element)  # add slices
+        else:
+            data = self.add_spells(data, edge_element)  # add spells
+
+        # GEXF stores edge ids as an attribute
+        # NetworkX uses them as keys in multigraphs
+        # if networkx_key is not specified as an attribute
+        edge_id = edge_element.get("id")
+        if edge_id is not None:
+            data["id"] = edge_id
+
+        # check if there is a 'multigraph_key' and use that as edge_id
+        multigraph_key = data.pop("networkx_key", None)
+        if multigraph_key is not None:
+            edge_id = multigraph_key
+
+        weight = edge_element.get("weight")
+        if weight is not None:
+            data["weight"] = float(weight)
+
+        edge_label = edge_element.get("label")
+        if edge_label is not None:
+            data["label"] = edge_label
+
+        if G.has_edge(source, target):
+            # seen this edge before - this is a multigraph
+            self.simple_graph = False
+        G.add_edge(source, target, key=edge_id, **data)
+        if edge_direction == "mutual":
+            G.add_edge(target, source, key=edge_id, **data)
+
+    def decode_attr_elements(self, gexf_keys, obj_xml):
+        # Use the key information to decode the attr XML
+        attr = {}
+        # look for outer '<attvalues>' element
+        attr_element = obj_xml.find(f"{{{self.NS_GEXF}}}attvalues")
+        if attr_element is not None:
+            # loop over <attvalue> elements
+            for a in attr_element.findall(f"{{{self.NS_GEXF}}}attvalue"):
+                key = a.get("for")  # for is required
+                try:  # should be in our gexf_keys dictionary
+                    title = gexf_keys[key]["title"]
+                except KeyError as err:
+                    raise nx.NetworkXError(f"No attribute defined for={key}.") from err
+                atype = gexf_keys[key]["type"]
+                value = a.get("value")
+                if atype == "boolean":
+                    value = self.convert_bool[value]
+                else:
+                    value = self.python_type[atype](value)
+                if gexf_keys[key]["mode"] == "dynamic":
+                    # for dynamic graphs use list of three-tuples
+                    # [(value1,start1,end1), (value2,start2,end2), etc]
+                    ttype = self.timeformat
+                    start = self.python_type[ttype](a.get("start"))
+                    end = self.python_type[ttype](a.get("end"))
+                    if title in attr:
+                        attr[title].append((value, start, end))
+                    else:
+                        attr[title] = [(value, start, end)]
+                else:
+                    # for static graphs just assign the value
+                    attr[title] = value
+        return attr
+
+    def find_gexf_attributes(self, attributes_element):
+        # Extract all the attributes and defaults
+        attrs = {}
+        defaults = {}
+        mode = attributes_element.get("mode")
+        for k in attributes_element.findall(f"{{{self.NS_GEXF}}}attribute"):
+            attr_id = k.get("id")
+            title = k.get("title")
+            atype = k.get("type")
+            attrs[attr_id] = {"title": title, "type": atype, "mode": mode}
+            # check for the 'default' subelement of key element and add
+            default = k.find(f"{{{self.NS_GEXF}}}default")
+            if default is not None:
+                if atype == "boolean":
+                    value = self.convert_bool[default.text]
+                else:
+                    value = self.python_type[atype](default.text)
+                defaults[title] = value
+        return attrs, defaults


 def relabel_gexf_graph(G):
@@ -259,4 +1037,29 @@ def relabel_gexf_graph(G):
     "label" attribute.  It also handles relabeling the specific GEXF
     node attributes "parents", and "pid".
     """
-    pass
+    # build mapping of node labels, do some error checking
+    try:
+        mapping = [(u, G.nodes[u]["label"]) for u in G]
+    except KeyError as err:
+        raise nx.NetworkXError(
+            "Failed to relabel nodes: missing node labels found. Use relabel=False."
+        ) from err
+    x, y = zip(*mapping)
+    if len(set(y)) != len(G):
+        raise nx.NetworkXError(
+            "Failed to relabel nodes: "
+            "duplicate node labels found. "
+            "Use relabel=False."
+        )
+    mapping = dict(mapping)
+    H = nx.relabel_nodes(G, mapping)
+    # relabel attributes
+    for n in G:
+        m = mapping[n]
+        H.nodes[m]["id"] = n
+        H.nodes[m].pop("label")
+        if "pid" in H.nodes[m]:
+            H.nodes[m]["pid"] = mapping[G.nodes[n]["pid"]]
+        if "parents" in H.nodes[m]:
+            H.nodes[m]["parents"] = [mapping[p] for p in G.nodes[n]["parents"]]
+    return H
diff --git a/networkx/readwrite/gml.py b/networkx/readwrite/gml.py
index f9204066b..bec991549 100644
--- a/networkx/readwrite/gml.py
+++ b/networkx/readwrite/gml.py
@@ -35,10 +35,12 @@ from collections import defaultdict
 from enum import Enum
 from io import StringIO
 from typing import Any, NamedTuple
+
 import networkx as nx
 from networkx.exception import NetworkXError
 from networkx.utils import open_file
-__all__ = ['read_gml', 'parse_gml', 'generate_gml', 'write_gml']
+
+__all__ = ["read_gml", "parse_gml", "generate_gml", "write_gml"]


 def escape(text):
@@ -47,12 +49,38 @@ def escape(text):
     Use XML character references for unprintable or non-ASCII
     characters, double quotes and ampersands in a string
     """
-    pass
+
+    def fixup(m):
+        ch = m.group(0)
+        return "&#" + str(ord(ch)) + ";"
+
+    text = re.sub('[^ -~]|[&"]', fixup, text)
+    return text if isinstance(text, str) else str(text)


 def unescape(text):
     """Replace XML character references with the referenced characters"""
-    pass
+
+    def fixup(m):
+        text = m.group(0)
+        if text[1] == "#":
+            # Character reference
+            if text[2] == "x":
+                code = int(text[3:-1], 16)
+            else:
+                code = int(text[2:-1])
+        else:
+            # Named entity
+            try:
+                code = htmlentitydefs.name2codepoint[text[1:-1]]
+            except KeyError:
+                return text  # leave unchanged
+        try:
+            return chr(code)
+        except (ValueError, OverflowError):
+            return text  # leave unchanged
+
+    return re.sub("&(?:[0-9A-Za-z]+|#(?:[0-9]+|x[0-9A-Fa-f]+));", fixup, text)


 def literal_destringizer(rep):
@@ -73,12 +101,19 @@ def literal_destringizer(rep):
     ValueError
         If `rep` is not a Python literal.
     """
-    pass
+    if isinstance(rep, str):
+        orig_rep = rep
+        try:
+            return literal_eval(rep)
+        except SyntaxError as err:
+            raise ValueError(f"{orig_rep!r} is not a valid Python literal") from err
+    else:
+        raise ValueError(f"{rep!r} is not a string")


-@open_file(0, mode='rb')
+@open_file(0, mode="rb")
 @nx._dispatchable(graphs=None, returns_graph=True)
-def read_gml(path, label='label', destringizer=None):
+def read_gml(path, label="label", destringizer=None):
     """Read graph in GML format from `path`.

     Parameters
@@ -143,11 +178,25 @@ def read_gml(path, label='label', destringizer=None):
     NodeView((0, 1, 2, 3))

     """
-    pass
+
+    def filter_lines(lines):
+        for line in lines:
+            try:
+                line = line.decode("ascii")
+            except UnicodeDecodeError as err:
+                raise NetworkXError("input is not ASCII-encoded") from err
+            if not isinstance(line, str):
+                lines = str(lines)
+            if line and line[-1] == "\n":
+                line = line[:-1]
+            yield line
+
+    G = parse_gml_lines(filter_lines(path), label, destringizer)
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
-def parse_gml(lines, label='label', destringizer=None):
+def parse_gml(lines, label="label", destringizer=None):
     """Parse GML graph from a string or iterable.

     Parameters
@@ -195,11 +244,38 @@ def parse_gml(lines, label='label', destringizer=None):

     See the module docstring :mod:`networkx.readwrite.gml` for more details.
     """
-    pass
+
+    def decode_line(line):
+        if isinstance(line, bytes):
+            try:
+                line.decode("ascii")
+            except UnicodeDecodeError as err:
+                raise NetworkXError("input is not ASCII-encoded") from err
+        if not isinstance(line, str):
+            line = str(line)
+        return line
+
+    def filter_lines(lines):
+        if isinstance(lines, str):
+            lines = decode_line(lines)
+            lines = lines.splitlines()
+            yield from lines
+        else:
+            for line in lines:
+                line = decode_line(line)
+                if line and line[-1] == "\n":
+                    line = line[:-1]
+                if line.find("\n") != -1:
+                    raise NetworkXError("input line contains newline")
+                yield line
+
+    G = parse_gml_lines(filter_lines(lines), label, destringizer)
+    return G


 class Pattern(Enum):
     """encodes the index of each token-matching pattern in `tokenize`."""
+
     KEYS = 0
     REALS = 1
     INTS = 2
@@ -216,12 +292,233 @@ class Token(NamedTuple):
     position: int


-LIST_START_VALUE = '_networkx_list_start'
+LIST_START_VALUE = "_networkx_list_start"


 def parse_gml_lines(lines, label, destringizer):
     """Parse GML `lines` into a graph."""
-    pass
+
+    def tokenize():
+        patterns = [
+            r"[A-Za-z][0-9A-Za-z_]*\b",  # keys
+            # reals
+            r"[+-]?(?:[0-9]*\.[0-9]+|[0-9]+\.[0-9]*|INF)(?:[Ee][+-]?[0-9]+)?",
+            r"[+-]?[0-9]+",  # ints
+            r'".*?"',  # strings
+            r"\[",  # dict start
+            r"\]",  # dict end
+            r"#.*$|\s+",  # comments and whitespaces
+        ]
+        tokens = re.compile("|".join(f"({pattern})" for pattern in patterns))
+        lineno = 0
+        multilines = []  # entries spread across multiple lines
+        for line in lines:
+            pos = 0
+
+            # deal with entries spread across multiple lines
+            #
+            # should we actually have to deal with escaped "s then do it here
+            if multilines:
+                multilines.append(line.strip())
+                if line[-1] == '"':  # closing multiline entry
+                    # multiline entries will be joined by space. cannot
+                    # reintroduce newlines as this will break the tokenizer
+                    line = " ".join(multilines)
+                    multilines = []
+                else:  # continued multiline entry
+                    lineno += 1
+                    continue
+            else:
+                if line.count('"') == 1:  # opening multiline entry
+                    if line.strip()[0] != '"' and line.strip()[-1] != '"':
+                        # since we expect something like key "value", the " should not be found at ends
+                        # otherwise tokenizer will pick up the formatting mistake.
+                        multilines = [line.rstrip()]
+                        lineno += 1
+                        continue
+
+            length = len(line)
+
+            while pos < length:
+                match = tokens.match(line, pos)
+                if match is None:
+                    m = f"cannot tokenize {line[pos:]} at ({lineno + 1}, {pos + 1})"
+                    raise NetworkXError(m)
+                for i in range(len(patterns)):
+                    group = match.group(i + 1)
+                    if group is not None:
+                        if i == 0:  # keys
+                            value = group.rstrip()
+                        elif i == 1:  # reals
+                            value = float(group)
+                        elif i == 2:  # ints
+                            value = int(group)
+                        else:
+                            value = group
+                        if i != 6:  # comments and whitespaces
+                            yield Token(Pattern(i), value, lineno + 1, pos + 1)
+                        pos += len(group)
+                        break
+            lineno += 1
+        yield Token(None, None, lineno + 1, 1)  # EOF
+
+    def unexpected(curr_token, expected):
+        category, value, lineno, pos = curr_token
+        value = repr(value) if value is not None else "EOF"
+        raise NetworkXError(f"expected {expected}, found {value} at ({lineno}, {pos})")
+
+    def consume(curr_token, category, expected):
+        if curr_token.category == category:
+            return next(tokens)
+        unexpected(curr_token, expected)
+
+    def parse_kv(curr_token):
+        dct = defaultdict(list)
+        while curr_token.category == Pattern.KEYS:
+            key = curr_token.value
+            curr_token = next(tokens)
+            category = curr_token.category
+            if category == Pattern.REALS or category == Pattern.INTS:
+                value = curr_token.value
+                curr_token = next(tokens)
+            elif category == Pattern.STRINGS:
+                value = unescape(curr_token.value[1:-1])
+                if destringizer:
+                    try:
+                        value = destringizer(value)
+                    except ValueError:
+                        pass
+                # Special handling for empty lists and tuples
+                if value == "()":
+                    value = ()
+                if value == "[]":
+                    value = []
+                curr_token = next(tokens)
+            elif category == Pattern.DICT_START:
+                curr_token, value = parse_dict(curr_token)
+            else:
+                # Allow for string convertible id and label values
+                if key in ("id", "label", "source", "target"):
+                    try:
+                        # String convert the token value
+                        value = unescape(str(curr_token.value))
+                        if destringizer:
+                            try:
+                                value = destringizer(value)
+                            except ValueError:
+                                pass
+                        curr_token = next(tokens)
+                    except Exception:
+                        msg = (
+                            "an int, float, string, '[' or string"
+                            + " convertible ASCII value for node id or label"
+                        )
+                        unexpected(curr_token, msg)
+                # Special handling for nan and infinity.  Since the gml language
+                # defines unquoted strings as keys, the numeric and string branches
+                # are skipped and we end up in this special branch, so we need to
+                # convert the current token value to a float for NAN and plain INF.
+                # +/-INF are handled in the pattern for 'reals' in tokenize().  This
+                # allows labels and values to be nan or infinity, but not keys.
+                elif curr_token.value in {"NAN", "INF"}:
+                    value = float(curr_token.value)
+                    curr_token = next(tokens)
+                else:  # Otherwise error out
+                    unexpected(curr_token, "an int, float, string or '['")
+            dct[key].append(value)
+
+        def clean_dict_value(value):
+            if not isinstance(value, list):
+                return value
+            if len(value) == 1:
+                return value[0]
+            if value[0] == LIST_START_VALUE:
+                return value[1:]
+            return value
+
+        dct = {key: clean_dict_value(value) for key, value in dct.items()}
+        return curr_token, dct
+
+    def parse_dict(curr_token):
+        # dict start
+        curr_token = consume(curr_token, Pattern.DICT_START, "'['")
+        # dict contents
+        curr_token, dct = parse_kv(curr_token)
+        # dict end
+        curr_token = consume(curr_token, Pattern.DICT_END, "']'")
+        return curr_token, dct
+
+    def parse_graph():
+        curr_token, dct = parse_kv(next(tokens))
+        if curr_token.category is not None:  # EOF
+            unexpected(curr_token, "EOF")
+        if "graph" not in dct:
+            raise NetworkXError("input contains no graph")
+        graph = dct["graph"]
+        if isinstance(graph, list):
+            raise NetworkXError("input contains more than one graph")
+        return graph
+
+    tokens = tokenize()
+    graph = parse_graph()
+
+    directed = graph.pop("directed", False)
+    multigraph = graph.pop("multigraph", False)
+    if not multigraph:
+        G = nx.DiGraph() if directed else nx.Graph()
+    else:
+        G = nx.MultiDiGraph() if directed else nx.MultiGraph()
+    graph_attr = {k: v for k, v in graph.items() if k not in ("node", "edge")}
+    G.graph.update(graph_attr)
+
+    def pop_attr(dct, category, attr, i):
+        try:
+            return dct.pop(attr)
+        except KeyError as err:
+            raise NetworkXError(f"{category} #{i} has no {attr!r} attribute") from err
+
+    nodes = graph.get("node", [])
+    mapping = {}
+    node_labels = set()
+    for i, node in enumerate(nodes if isinstance(nodes, list) else [nodes]):
+        id = pop_attr(node, "node", "id", i)
+        if id in G:
+            raise NetworkXError(f"node id {id!r} is duplicated")
+        if label is not None and label != "id":
+            node_label = pop_attr(node, "node", label, i)
+            if node_label in node_labels:
+                raise NetworkXError(f"node label {node_label!r} is duplicated")
+            node_labels.add(node_label)
+            mapping[id] = node_label
+        G.add_node(id, **node)
+
+    edges = graph.get("edge", [])
+    for i, edge in enumerate(edges if isinstance(edges, list) else [edges]):
+        source = pop_attr(edge, "edge", "source", i)
+        target = pop_attr(edge, "edge", "target", i)
+        if source not in G:
+            raise NetworkXError(f"edge #{i} has undefined source {source!r}")
+        if target not in G:
+            raise NetworkXError(f"edge #{i} has undefined target {target!r}")
+        if not multigraph:
+            if not G.has_edge(source, target):
+                G.add_edge(source, target, **edge)
+            else:
+                arrow = "->" if directed else "--"
+                msg = f"edge #{i} ({source!r}{arrow}{target!r}) is duplicated"
+                raise nx.NetworkXError(msg)
+        else:
+            key = edge.pop("key", None)
+            if key is not None and G.has_edge(source, target, key):
+                arrow = "->" if directed else "--"
+                msg = f"edge #{i} ({source!r}{arrow}{target!r}, {key!r})"
+                msg2 = 'Hint: If multigraph add "multigraph 1" to file header.'
+                raise nx.NetworkXError(msg + " is duplicated\n" + msg2)
+            G.add_edge(source, target, key, **edge)
+
+    if label is not None and label != "id":
+        G = nx.relabel_nodes(G, mapping)
+    return G


 def literal_stringizer(value):
@@ -248,11 +545,85 @@ def literal_stringizer(value):
     The original value can be recovered using the
     :func:`networkx.readwrite.gml.literal_destringizer` function.
     """
-    pass
+
+    def stringize(value):
+        if isinstance(value, int | bool) or value is None:
+            if value is True:  # GML uses 1/0 for boolean values.
+                buf.write(str(1))
+            elif value is False:
+                buf.write(str(0))
+            else:
+                buf.write(str(value))
+        elif isinstance(value, str):
+            text = repr(value)
+            if text[0] != "u":
+                try:
+                    value.encode("latin1")
+                except UnicodeEncodeError:
+                    text = "u" + text
+            buf.write(text)
+        elif isinstance(value, float | complex | str | bytes):
+            buf.write(repr(value))
+        elif isinstance(value, list):
+            buf.write("[")
+            first = True
+            for item in value:
+                if not first:
+                    buf.write(",")
+                else:
+                    first = False
+                stringize(item)
+            buf.write("]")
+        elif isinstance(value, tuple):
+            if len(value) > 1:
+                buf.write("(")
+                first = True
+                for item in value:
+                    if not first:
+                        buf.write(",")
+                    else:
+                        first = False
+                    stringize(item)
+                buf.write(")")
+            elif value:
+                buf.write("(")
+                stringize(value[0])
+                buf.write(",)")
+            else:
+                buf.write("()")
+        elif isinstance(value, dict):
+            buf.write("{")
+            first = True
+            for key, value in value.items():
+                if not first:
+                    buf.write(",")
+                else:
+                    first = False
+                stringize(key)
+                buf.write(":")
+                stringize(value)
+            buf.write("}")
+        elif isinstance(value, set):
+            buf.write("{")
+            first = True
+            for item in value:
+                if not first:
+                    buf.write(",")
+                else:
+                    first = False
+                stringize(item)
+            buf.write("}")
+        else:
+            msg = f"{value!r} cannot be converted into a Python literal"
+            raise ValueError(msg)
+
+    buf = StringIO()
+    stringize(value)
+    return buf.getvalue()


 def generate_gml(G, stringizer=None):
-    """Generate a single entry of the graph `G` in GML format.
+    r"""Generate a single entry of the graph `G` in GML format.

     Parameters
     ----------
@@ -303,7 +674,7 @@ def generate_gml(G, stringizer=None):
     --------
     >>> G = nx.Graph()
     >>> G.add_node("1")
-    >>> print("\\n".join(nx.generate_gml(G)))
+    >>> print("\n".join(nx.generate_gml(G)))
     graph [
       node [
         id 0
@@ -311,7 +682,7 @@ def generate_gml(G, stringizer=None):
       ]
     ]
     >>> G = nx.MultiGraph([("a", "b"), ("a", "b")])
-    >>> print("\\n".join(nx.generate_gml(G)))
+    >>> print("\n".join(nx.generate_gml(G)))
     graph [
       multigraph 1
       node [
@@ -334,10 +705,115 @@ def generate_gml(G, stringizer=None):
       ]
     ]
     """
-    pass
-
-
-@open_file(1, mode='wb')
+    valid_keys = re.compile("^[A-Za-z][0-9A-Za-z_]*$")
+
+    def stringize(key, value, ignored_keys, indent, in_list=False):
+        if not isinstance(key, str):
+            raise NetworkXError(f"{key!r} is not a string")
+        if not valid_keys.match(key):
+            raise NetworkXError(f"{key!r} is not a valid key")
+        if not isinstance(key, str):
+            key = str(key)
+        if key not in ignored_keys:
+            if isinstance(value, int | bool):
+                if key == "label":
+                    yield indent + key + ' "' + str(value) + '"'
+                elif value is True:
+                    # python bool is an instance of int
+                    yield indent + key + " 1"
+                elif value is False:
+                    yield indent + key + " 0"
+                # GML only supports signed 32-bit integers
+                elif value < -(2**31) or value >= 2**31:
+                    yield indent + key + ' "' + str(value) + '"'
+                else:
+                    yield indent + key + " " + str(value)
+            elif isinstance(value, float):
+                text = repr(value).upper()
+                # GML matches INF to keys, so prepend + to INF. Use repr(float(*))
+                # instead of string literal to future proof against changes to repr.
+                if text == repr(float("inf")).upper():
+                    text = "+" + text
+                else:
+                    # GML requires that a real literal contain a decimal point, but
+                    # repr may not output a decimal point when the mantissa is
+                    # integral and hence needs fixing.
+                    epos = text.rfind("E")
+                    if epos != -1 and text.find(".", 0, epos) == -1:
+                        text = text[:epos] + "." + text[epos:]
+                if key == "label":
+                    yield indent + key + ' "' + text + '"'
+                else:
+                    yield indent + key + " " + text
+            elif isinstance(value, dict):
+                yield indent + key + " ["
+                next_indent = indent + "  "
+                for key, value in value.items():
+                    yield from stringize(key, value, (), next_indent)
+                yield indent + "]"
+            elif isinstance(value, tuple) and key == "label":
+                yield indent + key + f" \"({','.join(repr(v) for v in value)})\""
+            elif isinstance(value, list | tuple) and key != "label" and not in_list:
+                if len(value) == 0:
+                    yield indent + key + " " + f'"{value!r}"'
+                if len(value) == 1:
+                    yield indent + key + " " + f'"{LIST_START_VALUE}"'
+                for val in value:
+                    yield from stringize(key, val, (), indent, True)
+            else:
+                if stringizer:
+                    try:
+                        value = stringizer(value)
+                    except ValueError as err:
+                        raise NetworkXError(
+                            f"{value!r} cannot be converted into a string"
+                        ) from err
+                if not isinstance(value, str):
+                    raise NetworkXError(f"{value!r} is not a string")
+                yield indent + key + ' "' + escape(value) + '"'
+
+    multigraph = G.is_multigraph()
+    yield "graph ["
+
+    # Output graph attributes
+    if G.is_directed():
+        yield "  directed 1"
+    if multigraph:
+        yield "  multigraph 1"
+    ignored_keys = {"directed", "multigraph", "node", "edge"}
+    for attr, value in G.graph.items():
+        yield from stringize(attr, value, ignored_keys, "  ")
+
+    # Output node data
+    node_id = dict(zip(G, range(len(G))))
+    ignored_keys = {"id", "label"}
+    for node, attrs in G.nodes.items():
+        yield "  node ["
+        yield "    id " + str(node_id[node])
+        yield from stringize("label", node, (), "    ")
+        for attr, value in attrs.items():
+            yield from stringize(attr, value, ignored_keys, "    ")
+        yield "  ]"
+
+    # Output edge data
+    ignored_keys = {"source", "target"}
+    kwargs = {"data": True}
+    if multigraph:
+        ignored_keys.add("key")
+        kwargs["keys"] = True
+    for e in G.edges(**kwargs):
+        yield "  edge ["
+        yield "    source " + str(node_id[e[0]])
+        yield "    target " + str(node_id[e[1]])
+        if multigraph:
+            yield from stringize("key", e[2], (), "    ")
+        for attr, value in e[-1].items():
+            yield from stringize(attr, value, ignored_keys, "    ")
+        yield "  ]"
+    yield "]"
+
+
+@open_file(1, mode="wb")
 def write_gml(G, path, stringizer=None):
     """Write a graph `G` in GML format to the file or file handle `path`.

@@ -398,4 +874,5 @@ def write_gml(G, path, stringizer=None):

     >>> nx.write_gml(G, "test.gml.gz")
     """
-    pass
+    for line in generate_gml(G, stringizer):
+        path.write((line + "\n").encode("ascii"))
diff --git a/networkx/readwrite/graph6.py b/networkx/readwrite/graph6.py
index bde8b1b56..5e2a30aa7 100644
--- a/networkx/readwrite/graph6.py
+++ b/networkx/readwrite/graph6.py
@@ -1,3 +1,5 @@
+# Original author: D. Eppstein, UC Irvine, August 12, 2003.
+# The original code at http://www.ics.uci.edu/~eppstein/PADS/ is public domain.
 """Functions for reading and writing graphs in the *graph6* format.

 The *graph6* file format is suitable for small graphs or large dense
@@ -9,11 +11,12 @@ For more information, see the `graph6`_ homepage.

 """
 from itertools import islice
+
 import networkx as nx
 from networkx.exception import NetworkXError
 from networkx.utils import not_implemented_for, open_file
-__all__ = ['from_graph6_bytes', 'read_graph6', 'to_graph6_bytes',
-    'write_graph6']
+
+__all__ = ["from_graph6_bytes", "read_graph6", "to_graph6_bytes", "write_graph6"]


 def _generate_graph6_bytes(G, nodes, header):
@@ -37,7 +40,24 @@ def _generate_graph6_bytes(G, nodes, header):
     the graph6 format (that is, greater than ``2 ** 36`` nodes).

     """
-    pass
+    n = len(G)
+    if n >= 2**36:
+        raise ValueError(
+            "graph6 is only defined if number of nodes is less than 2 ** 36"
+        )
+    if header:
+        yield b">>graph6<<"
+    for d in n_to_data(n):
+        yield str.encode(chr(d + 63))
+    # This generates the same as `(v in G[u] for u, v in combinations(G, 2))`,
+    # but in "column-major" order instead of "row-major" order.
+    bits = (nodes[j] in G[nodes[i]] for j in range(1, n) for i in range(j))
+    chunk = list(islice(bits, 6))
+    while chunk:
+        d = sum(b << 5 - i for i, b in enumerate(chunk))
+        yield str.encode(chr(d + 63))
+        chunk = list(islice(bits, 6))
+    yield b"\n"


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -78,11 +98,39 @@ def from_graph6_bytes(bytes_in):
            <http://users.cecs.anu.edu.au/~bdm/data/formats.html>

     """
-    pass

+    def bits():
+        """Returns sequence of individual bits from 6-bit-per-value
+        list of data values."""
+        for d in data:
+            for i in [5, 4, 3, 2, 1, 0]:
+                yield (d >> i) & 1
+
+    if bytes_in.startswith(b">>graph6<<"):
+        bytes_in = bytes_in[10:]
+
+    data = [c - 63 for c in bytes_in]
+    if any(c > 63 for c in data):
+        raise ValueError("each input character must be in range(63, 127)")
+
+    n, data = data_to_n(data)
+    nd = (n * (n - 1) // 2 + 5) // 6
+    if len(data) != nd:
+        raise NetworkXError(
+            f"Expected {n * (n - 1) // 2} bits but got {len(data) * 6} in graph6"
+        )

-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+    G = nx.Graph()
+    G.add_nodes_from(range(n))
+    for (i, j), b in zip(((i, j) for j in range(1, n) for i in range(j)), bits()):
+        if b:
+            G.add_edge(i, j)
+
+    return G
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 def to_graph6_bytes(G, nodes=None, header=True):
     """Convert a simple undirected graph to bytes in graph6 format.

@@ -128,10 +176,14 @@ def to_graph6_bytes(G, nodes=None, header=True):
            <http://users.cecs.anu.edu.au/~bdm/data/formats.html>

     """
-    pass
+    if nodes is not None:
+        G = G.subgraph(nodes)
+    H = nx.convert_node_labels_to_integers(G)
+    nodes = sorted(H.nodes())
+    return b"".join(_generate_graph6_bytes(H, nodes, header))


-@open_file(0, mode='rb')
+@open_file(0, mode="rb")
 @nx._dispatchable(graphs=None, returns_graph=True)
 def read_graph6(path):
     """Read simple undirected graphs in graph6 format from path.
@@ -183,12 +235,21 @@ def read_graph6(path):
            <http://users.cecs.anu.edu.au/~bdm/data/formats.html>

     """
-    pass
-
-
-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
-@open_file(1, mode='wb')
+    glist = []
+    for line in path:
+        line = line.strip()
+        if not len(line):
+            continue
+        glist.append(from_graph6_bytes(line))
+    if len(glist) == 1:
+        return glist[0]
+    else:
+        return glist
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
+@open_file(1, mode="wb")
 def write_graph6(G, path, nodes=None, header=True):
     """Write a simple undirected graph to a path in graph6 format.

@@ -244,11 +305,11 @@ def write_graph6(G, path, nodes=None, header=True):
            <http://users.cecs.anu.edu.au/~bdm/data/formats.html>

     """
-    pass
+    return write_graph6_file(G, path, nodes=nodes, header=header)


-@not_implemented_for('directed')
-@not_implemented_for('multigraph')
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
 def write_graph6_file(G, f, nodes=None, header=True):
     """Write a simple undirected graph to a file-like object in graph6 format.

@@ -304,7 +365,12 @@ def write_graph6_file(G, f, nodes=None, header=True):
            <http://users.cecs.anu.edu.au/~bdm/data/formats.html>

     """
-    pass
+    if nodes is not None:
+        G = G.subgraph(nodes)
+    H = nx.convert_node_labels_to_integers(G)
+    nodes = sorted(H.nodes())
+    for b in _generate_graph6_bytes(H, nodes, header):
+        f.write(b)


 def data_to_n(data):
@@ -312,7 +378,19 @@ def data_to_n(data):
     integer sequence.

     Return (value, rest of seq.)"""
-    pass
+    if data[0] <= 62:
+        return data[0], data[1:]
+    if data[1] <= 62:
+        return (data[1] << 12) + (data[2] << 6) + data[3], data[4:]
+    return (
+        (data[2] << 30)
+        + (data[3] << 24)
+        + (data[4] << 18)
+        + (data[5] << 12)
+        + (data[6] << 6)
+        + data[7],
+        data[8:],
+    )


 def n_to_data(n):
@@ -321,4 +399,18 @@ def n_to_data(n):
     This function is undefined if `n` is not in ``range(2 ** 36)``.

     """
-    pass
+    if n <= 62:
+        return [n]
+    elif n <= 258047:
+        return [63, (n >> 12) & 0x3F, (n >> 6) & 0x3F, n & 0x3F]
+    else:  # if n <= 68719476735:
+        return [
+            63,
+            63,
+            (n >> 30) & 0x3F,
+            (n >> 24) & 0x3F,
+            (n >> 18) & 0x3F,
+            (n >> 12) & 0x3F,
+            (n >> 6) & 0x3F,
+            n & 0x3F,
+        ]
diff --git a/networkx/readwrite/graphml.py b/networkx/readwrite/graphml.py
index 4b7a04ad1..0b05e03a3 100644
--- a/networkx/readwrite/graphml.py
+++ b/networkx/readwrite/graphml.py
@@ -42,17 +42,32 @@ for examples.
 """
 import warnings
 from collections import defaultdict
+
 import networkx as nx
 from networkx.utils import open_file
-__all__ = ['write_graphml', 'read_graphml', 'generate_graphml',
-    'write_graphml_xml', 'write_graphml_lxml', 'parse_graphml',
-    'GraphMLWriter', 'GraphMLReader']
-

-@open_file(1, mode='wb')
-def write_graphml_xml(G, path, encoding='utf-8', prettyprint=True,
-    infer_numeric_types=False, named_key_ids=False, edge_id_from_attribute=None
-    ):
+__all__ = [
+    "write_graphml",
+    "read_graphml",
+    "generate_graphml",
+    "write_graphml_xml",
+    "write_graphml_lxml",
+    "parse_graphml",
+    "GraphMLWriter",
+    "GraphMLReader",
+]
+
+
+@open_file(1, mode="wb")
+def write_graphml_xml(
+    G,
+    path,
+    encoding="utf-8",
+    prettyprint=True,
+    infer_numeric_types=False,
+    named_key_ids=False,
+    edge_id_from_attribute=None,
+):
     """Write G in GraphML XML format to path

     Parameters
@@ -87,13 +102,27 @@ def write_graphml_xml(G, path, encoding='utf-8', prettyprint=True,
     This implementation does not support mixed graphs (directed
     and unidirected edges together) hyperedges, nested graphs, or ports.
     """
-    pass
-
-
-@open_file(1, mode='wb')
-def write_graphml_lxml(G, path, encoding='utf-8', prettyprint=True,
-    infer_numeric_types=False, named_key_ids=False, edge_id_from_attribute=None
-    ):
+    writer = GraphMLWriter(
+        encoding=encoding,
+        prettyprint=prettyprint,
+        infer_numeric_types=infer_numeric_types,
+        named_key_ids=named_key_ids,
+        edge_id_from_attribute=edge_id_from_attribute,
+    )
+    writer.add_graph_element(G)
+    writer.dump(path)
+
+
+@open_file(1, mode="wb")
+def write_graphml_lxml(
+    G,
+    path,
+    encoding="utf-8",
+    prettyprint=True,
+    infer_numeric_types=False,
+    named_key_ids=False,
+    edge_id_from_attribute=None,
+):
     """Write G in GraphML XML format to path

     This function uses the LXML framework and should be faster than
@@ -131,11 +160,38 @@ def write_graphml_lxml(G, path, encoding='utf-8', prettyprint=True,
     This implementation does not support mixed graphs (directed
     and unidirected edges together) hyperedges, nested graphs, or ports.
     """
-    pass
-
-
-def generate_graphml(G, encoding='utf-8', prettyprint=True, named_key_ids=
-    False, edge_id_from_attribute=None):
+    try:
+        import lxml.etree as lxmletree
+    except ImportError:
+        return write_graphml_xml(
+            G,
+            path,
+            encoding,
+            prettyprint,
+            infer_numeric_types,
+            named_key_ids,
+            edge_id_from_attribute,
+        )
+
+    writer = GraphMLWriterLxml(
+        path,
+        graph=G,
+        encoding=encoding,
+        prettyprint=prettyprint,
+        infer_numeric_types=infer_numeric_types,
+        named_key_ids=named_key_ids,
+        edge_id_from_attribute=edge_id_from_attribute,
+    )
+    writer.dump()
+
+
+def generate_graphml(
+    G,
+    encoding="utf-8",
+    prettyprint=True,
+    named_key_ids=False,
+    edge_id_from_attribute=None,
+):
     """Generate GraphML lines for G

     Parameters
@@ -156,8 +212,7 @@ def generate_graphml(G, encoding='utf-8', prettyprint=True, named_key_ids=
     Examples
     --------
     >>> G = nx.path_graph(4)
-    >>> linefeed = chr(10)  # linefeed = 
-
+    >>> linefeed = chr(10)  # linefeed = \n
     >>> s = linefeed.join(nx.generate_graphml(G))
     >>> for line in nx.generate_graphml(G):  # doctest: +SKIP
     ...     print(line)
@@ -167,13 +222,19 @@ def generate_graphml(G, encoding='utf-8', prettyprint=True, named_key_ids=
     This implementation does not support mixed graphs (directed and unidirected
     edges together) hyperedges, nested graphs, or ports.
     """
-    pass
+    writer = GraphMLWriter(
+        encoding=encoding,
+        prettyprint=prettyprint,
+        named_key_ids=named_key_ids,
+        edge_id_from_attribute=edge_id_from_attribute,
+    )
+    writer.add_graph_element(G)
+    yield from str(writer).splitlines()


-@open_file(0, mode='rb')
+@open_file(0, mode="rb")
 @nx._dispatchable(graphs=None, returns_graph=True)
-def read_graphml(path, node_type=str, edge_key_type=int, force_multigraph=False
-    ):
+def read_graphml(path, node_type=str, edge_key_type=int, force_multigraph=False):
     """Read graph in GraphML format from path.

     Parameters
@@ -230,12 +291,25 @@ def read_graphml(path, node_type=str, edge_key_type=int, force_multigraph=False
     the file to "file.graphml.gz".

     """
-    pass
+    reader = GraphMLReader(node_type, edge_key_type, force_multigraph)
+    # need to check for multiple graphs
+    glist = list(reader(path=path))
+    if len(glist) == 0:
+        # If no graph comes back, try looking for an incomplete header
+        header = b'<graphml xmlns="http://graphml.graphdrawing.org/xmlns">'
+        path.seek(0)
+        old_bytes = path.read()
+        new_bytes = old_bytes.replace(b"<graphml>", header)
+        glist = list(reader(string=new_bytes))
+        if len(glist) == 0:
+            raise nx.NetworkXError("file not successfully read as graphml")
+    return glist[0]


 @nx._dispatchable(graphs=None, returns_graph=True)
-def parse_graphml(graphml_string, node_type=str, edge_key_type=int,
-    force_multigraph=False):
+def parse_graphml(
+    graphml_string, node_type=str, edge_key_type=int, force_multigraph=False
+):
     """Read graph in GraphML format from string.

     Parameters
@@ -265,8 +339,7 @@ def parse_graphml(graphml_string, node_type=str, edge_key_type=int,
     Examples
     --------
     >>> G = nx.path_graph(4)
-    >>> linefeed = chr(10)  # linefeed = 
-
+    >>> linefeed = chr(10)  # linefeed = \n
     >>> s = linefeed.join(nx.generate_graphml(G))
     >>> H = nx.parse_graphml(s)

@@ -294,49 +367,136 @@ def parse_graphml(graphml_string, node_type=str, edge_key_type=int,
     will be provided.

     """
-    pass
+    reader = GraphMLReader(node_type, edge_key_type, force_multigraph)
+    # need to check for multiple graphs
+    glist = list(reader(string=graphml_string))
+    if len(glist) == 0:
+        # If no graph comes back, try looking for an incomplete header
+        header = '<graphml xmlns="http://graphml.graphdrawing.org/xmlns">'
+        new_string = graphml_string.replace("<graphml>", header)
+        glist = list(reader(string=new_string))
+        if len(glist) == 0:
+            raise nx.NetworkXError("file not successfully read as graphml")
+    return glist[0]


 class GraphML:
-    NS_GRAPHML = 'http://graphml.graphdrawing.org/xmlns'
-    NS_XSI = 'http://www.w3.org/2001/XMLSchema-instance'
-    NS_Y = 'http://www.yworks.com/xml/graphml'
-    SCHEMALOCATION = ' '.join(['http://graphml.graphdrawing.org/xmlns',
-        'http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd'])
-    convert_bool = {'true': True, 'false': False, '0': False, (0): False,
-        '1': True, (1): True}
+    NS_GRAPHML = "http://graphml.graphdrawing.org/xmlns"
+    NS_XSI = "http://www.w3.org/2001/XMLSchema-instance"
+    # xmlns:y="http://www.yworks.com/xml/graphml"
+    NS_Y = "http://www.yworks.com/xml/graphml"
+    SCHEMALOCATION = " ".join(
+        [
+            "http://graphml.graphdrawing.org/xmlns",
+            "http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd",
+        ]
+    )
+
+    def construct_types(self):
+        types = [
+            (int, "integer"),  # for Gephi GraphML bug
+            (str, "yfiles"),
+            (str, "string"),
+            (int, "int"),
+            (int, "long"),
+            (float, "float"),
+            (float, "double"),
+            (bool, "boolean"),
+        ]
+
+        # These additions to types allow writing numpy types
+        try:
+            import numpy as np
+        except:
+            pass
+        else:
+            # prepend so that python types are created upon read (last entry wins)
+            types = [
+                (np.float64, "float"),
+                (np.float32, "float"),
+                (np.float16, "float"),
+                (np.int_, "int"),
+                (np.int8, "int"),
+                (np.int16, "int"),
+                (np.int32, "int"),
+                (np.int64, "int"),
+                (np.uint8, "int"),
+                (np.uint16, "int"),
+                (np.uint32, "int"),
+                (np.uint64, "int"),
+                (np.int_, "int"),
+                (np.intc, "int"),
+                (np.intp, "int"),
+            ] + types
+
+        self.xml_type = dict(types)
+        self.python_type = dict(reversed(a) for a in types)
+
+    # This page says that data types in GraphML follow Java(TM).
+    #  http://graphml.graphdrawing.org/primer/graphml-primer.html#AttributesDefinition
+    # true and false are the only boolean literals:
+    #  http://en.wikibooks.org/wiki/Java_Programming/Literals#Boolean_Literals
+    convert_bool = {
+        # We use data.lower() in actual use.
+        "true": True,
+        "false": False,
+        # Include integer strings for convenience.
+        "0": False,
+        0: False,
+        "1": True,
+        1: True,
+    }

     def get_xml_type(self, key):
         """Wrapper around the xml_type dict that raises a more informative
         exception message when a user attempts to use data of a type not
         supported by GraphML."""
-        pass
+        try:
+            return self.xml_type[key]
+        except KeyError as err:
+            raise TypeError(
+                f"GraphML does not support type {key} as data values."
+            ) from err


 class GraphMLWriter(GraphML):
-
-    def __init__(self, graph=None, encoding='utf-8', prettyprint=True,
-        infer_numeric_types=False, named_key_ids=False,
-        edge_id_from_attribute=None):
+    def __init__(
+        self,
+        graph=None,
+        encoding="utf-8",
+        prettyprint=True,
+        infer_numeric_types=False,
+        named_key_ids=False,
+        edge_id_from_attribute=None,
+    ):
         self.construct_types()
         from xml.etree.ElementTree import Element
+
         self.myElement = Element
+
         self.infer_numeric_types = infer_numeric_types
         self.prettyprint = prettyprint
         self.named_key_ids = named_key_ids
         self.edge_id_from_attribute = edge_id_from_attribute
         self.encoding = encoding
-        self.xml = self.myElement('graphml', {'xmlns': self.NS_GRAPHML,
-            'xmlns:xsi': self.NS_XSI, 'xsi:schemaLocation': self.
-            SCHEMALOCATION})
+        self.xml = self.myElement(
+            "graphml",
+            {
+                "xmlns": self.NS_GRAPHML,
+                "xmlns:xsi": self.NS_XSI,
+                "xsi:schemaLocation": self.SCHEMALOCATION,
+            },
+        )
         self.keys = {}
         self.attributes = defaultdict(list)
         self.attribute_types = defaultdict(set)
+
         if graph is not None:
             self.add_graph_element(graph)

     def __str__(self):
         from xml.etree.ElementTree import tostring
+
         if self.prettyprint:
             self.indent(self.xml)
         s = tostring(self.xml).decode(self.encoding)
@@ -351,30 +511,176 @@ class GraphMLWriter(GraphML):
         means edges with data named 'weight' are treated separately from nodes
         with data named 'weight'.
         """
-        pass
-
-    def add_data(self, name, element_type, value, scope='all', default=None):
+        if self.infer_numeric_types:
+            types = self.attribute_types[(name, scope)]
+
+            if len(types) > 1:
+                types = {self.get_xml_type(t) for t in types}
+                if "string" in types:
+                    return str
+                elif "float" in types or "double" in types:
+                    return float
+                else:
+                    return int
+            else:
+                return list(types)[0]
+        else:
+            return type(value)
+
+    def get_key(self, name, attr_type, scope, default):
+        keys_key = (name, attr_type, scope)
+        try:
+            return self.keys[keys_key]
+        except KeyError:
+            if self.named_key_ids:
+                new_id = name
+            else:
+                new_id = f"d{len(list(self.keys))}"
+
+            self.keys[keys_key] = new_id
+            key_kwargs = {
+                "id": new_id,
+                "for": scope,
+                "attr.name": name,
+                "attr.type": attr_type,
+            }
+            key_element = self.myElement("key", **key_kwargs)
+            # add subelement for data default value if present
+            if default is not None:
+                default_element = self.myElement("default")
+                default_element.text = str(default)
+                key_element.append(default_element)
+            self.xml.insert(0, key_element)
+        return new_id
+
+    def add_data(self, name, element_type, value, scope="all", default=None):
         """
         Make a data element for an edge or a node. Keep a log of the
         type in the keys table.
         """
-        pass
+        if element_type not in self.xml_type:
+            raise nx.NetworkXError(
+                f"GraphML writer does not support {element_type} as data values."
+            )
+        keyid = self.get_key(name, self.get_xml_type(element_type), scope, default)
+        data_element = self.myElement("data", key=keyid)
+        data_element.text = str(value)
+        return data_element

     def add_attributes(self, scope, xml_obj, data, default):
         """Appends attribute data to edges or nodes, and stores type information
         to be added later. See add_graph_element.
         """
-        pass
+        for k, v in data.items():
+            self.attribute_types[(str(k), scope)].add(type(v))
+            self.attributes[xml_obj].append([k, v, scope, default.get(k)])
+
+    def add_nodes(self, G, graph_element):
+        default = G.graph.get("node_default", {})
+        for node, data in G.nodes(data=True):
+            node_element = self.myElement("node", id=str(node))
+            self.add_attributes("node", node_element, data, default)
+            graph_element.append(node_element)
+
+    def add_edges(self, G, graph_element):
+        if G.is_multigraph():
+            for u, v, key, data in G.edges(data=True, keys=True):
+                edge_element = self.myElement(
+                    "edge",
+                    source=str(u),
+                    target=str(v),
+                    id=str(data.get(self.edge_id_from_attribute))
+                    if self.edge_id_from_attribute
+                    and self.edge_id_from_attribute in data
+                    else str(key),
+                )
+                default = G.graph.get("edge_default", {})
+                self.add_attributes("edge", edge_element, data, default)
+                graph_element.append(edge_element)
+        else:
+            for u, v, data in G.edges(data=True):
+                if self.edge_id_from_attribute and self.edge_id_from_attribute in data:
+                    # select attribute to be edge id
+                    edge_element = self.myElement(
+                        "edge",
+                        source=str(u),
+                        target=str(v),
+                        id=str(data.get(self.edge_id_from_attribute)),
+                    )
+                else:
+                    # default: no edge id
+                    edge_element = self.myElement("edge", source=str(u), target=str(v))
+                default = G.graph.get("edge_default", {})
+                self.add_attributes("edge", edge_element, data, default)
+                graph_element.append(edge_element)

     def add_graph_element(self, G):
         """
         Serialize graph G in GraphML to the stream.
         """
-        pass
+        if G.is_directed():
+            default_edge_type = "directed"
+        else:
+            default_edge_type = "undirected"
+
+        graphid = G.graph.pop("id", None)
+        if graphid is None:
+            graph_element = self.myElement("graph", edgedefault=default_edge_type)
+        else:
+            graph_element = self.myElement(
+                "graph", edgedefault=default_edge_type, id=graphid
+            )
+        default = {}
+        data = {
+            k: v
+            for (k, v) in G.graph.items()
+            if k not in ["node_default", "edge_default"]
+        }
+        self.add_attributes("graph", graph_element, data, default)
+        self.add_nodes(G, graph_element)
+        self.add_edges(G, graph_element)
+
+        # self.attributes contains a mapping from XML Objects to a list of
+        # data that needs to be added to them.
+        # We postpone processing in order to do type inference/generalization.
+        # See self.attr_type
+        for xml_obj, data in self.attributes.items():
+            for k, v, scope, default in data:
+                xml_obj.append(
+                    self.add_data(
+                        str(k), self.attr_type(k, scope, v), str(v), scope, default
+                    )
+                )
+        self.xml.append(graph_element)

     def add_graphs(self, graph_list):
         """Add many graphs to this GraphML document."""
-        pass
+        for G in graph_list:
+            self.add_graph_element(G)
+
+    def dump(self, stream):
+        from xml.etree.ElementTree import ElementTree
+
+        if self.prettyprint:
+            self.indent(self.xml)
+        document = ElementTree(self.xml)
+        document.write(stream, encoding=self.encoding, xml_declaration=True)
+
+    def indent(self, elem, level=0):
+        # in-place prettyprint formatter
+        i = "\n" + level * "  "
+        if len(elem):
+            if not elem.text or not elem.text.strip():
+                elem.text = i + "  "
+            if not elem.tail or not elem.tail.strip():
+                elem.tail = i
+            for elem in elem:
+                self.indent(elem, level + 1)
+            if not elem.tail or not elem.tail.strip():
+                elem.tail = i
+        else:
+            if level and (not elem.tail or not elem.tail.strip()):
+                elem.tail = i


 class IncrementalElement:
@@ -388,31 +694,54 @@ class IncrementalElement:
         self.xml = xml
         self.prettyprint = prettyprint

+    def append(self, element):
+        self.xml.write(element, pretty_print=self.prettyprint)

-class GraphMLWriterLxml(GraphMLWriter):

-    def __init__(self, path, graph=None, encoding='utf-8', prettyprint=True,
-        infer_numeric_types=False, named_key_ids=False,
-        edge_id_from_attribute=None):
+class GraphMLWriterLxml(GraphMLWriter):
+    def __init__(
+        self,
+        path,
+        graph=None,
+        encoding="utf-8",
+        prettyprint=True,
+        infer_numeric_types=False,
+        named_key_ids=False,
+        edge_id_from_attribute=None,
+    ):
         self.construct_types()
         import lxml.etree as lxmletree
+
         self.myElement = lxmletree.Element
+
         self._encoding = encoding
         self._prettyprint = prettyprint
         self.named_key_ids = named_key_ids
         self.edge_id_from_attribute = edge_id_from_attribute
         self.infer_numeric_types = infer_numeric_types
+
         self._xml_base = lxmletree.xmlfile(path, encoding=encoding)
         self._xml = self._xml_base.__enter__()
         self._xml.write_declaration()
+
+        # We need to have a xml variable that support insertion. This call is
+        # used for adding the keys to the document.
+        # We will store those keys in a plain list, and then after the graph
+        # element is closed we will add them to the main graphml element.
         self.xml = []
         self._keys = self.xml
-        self._graphml = self._xml.element('graphml', {'xmlns': self.
-            NS_GRAPHML, 'xmlns:xsi': self.NS_XSI, 'xsi:schemaLocation':
-            self.SCHEMALOCATION})
+        self._graphml = self._xml.element(
+            "graphml",
+            {
+                "xmlns": self.NS_GRAPHML,
+                "xmlns:xsi": self.NS_XSI,
+                "xsi:schemaLocation": self.SCHEMALOCATION,
+            },
+        )
         self._graphml.__enter__()
         self.keys = {}
         self.attribute_types = defaultdict(set)
+
         if graph is not None:
             self.add_graph_element(graph)

@@ -420,54 +749,304 @@ class GraphMLWriterLxml(GraphMLWriter):
         """
         Serialize graph G in GraphML to the stream.
         """
-        pass
+        if G.is_directed():
+            default_edge_type = "directed"
+        else:
+            default_edge_type = "undirected"
+
+        graphid = G.graph.pop("id", None)
+        if graphid is None:
+            graph_element = self._xml.element("graph", edgedefault=default_edge_type)
+        else:
+            graph_element = self._xml.element(
+                "graph", edgedefault=default_edge_type, id=graphid
+            )
+
+        # gather attributes types for the whole graph
+        # to find the most general numeric format needed.
+        # Then pass through attributes to create key_id for each.
+        graphdata = {
+            k: v
+            for k, v in G.graph.items()
+            if k not in ("node_default", "edge_default")
+        }
+        node_default = G.graph.get("node_default", {})
+        edge_default = G.graph.get("edge_default", {})
+        # Graph attributes
+        for k, v in graphdata.items():
+            self.attribute_types[(str(k), "graph")].add(type(v))
+        for k, v in graphdata.items():
+            element_type = self.get_xml_type(self.attr_type(k, "graph", v))
+            self.get_key(str(k), element_type, "graph", None)
+        # Nodes and data
+        for node, d in G.nodes(data=True):
+            for k, v in d.items():
+                self.attribute_types[(str(k), "node")].add(type(v))
+        for node, d in G.nodes(data=True):
+            for k, v in d.items():
+                T = self.get_xml_type(self.attr_type(k, "node", v))
+                self.get_key(str(k), T, "node", node_default.get(k))
+        # Edges and data
+        if G.is_multigraph():
+            for u, v, ekey, d in G.edges(keys=True, data=True):
+                for k, v in d.items():
+                    self.attribute_types[(str(k), "edge")].add(type(v))
+            for u, v, ekey, d in G.edges(keys=True, data=True):
+                for k, v in d.items():
+                    T = self.get_xml_type(self.attr_type(k, "edge", v))
+                    self.get_key(str(k), T, "edge", edge_default.get(k))
+        else:
+            for u, v, d in G.edges(data=True):
+                for k, v in d.items():
+                    self.attribute_types[(str(k), "edge")].add(type(v))
+            for u, v, d in G.edges(data=True):
+                for k, v in d.items():
+                    T = self.get_xml_type(self.attr_type(k, "edge", v))
+                    self.get_key(str(k), T, "edge", edge_default.get(k))
+
+        # Now add attribute keys to the xml file
+        for key in self.xml:
+            self._xml.write(key, pretty_print=self._prettyprint)
+
+        # The incremental_writer writes each node/edge as it is created
+        incremental_writer = IncrementalElement(self._xml, self._prettyprint)
+        with graph_element:
+            self.add_attributes("graph", incremental_writer, graphdata, {})
+            self.add_nodes(G, incremental_writer)  # adds attributes too
+            self.add_edges(G, incremental_writer)  # adds attributes too

     def add_attributes(self, scope, xml_obj, data, default):
         """Appends attribute data."""
-        pass
+        for k, v in data.items():
+            data_element = self.add_data(
+                str(k), self.attr_type(str(k), scope, v), str(v), scope, default.get(k)
+            )
+            xml_obj.append(data_element)

     def __str__(self):
         return object.__str__(self)

+    def dump(self, stream=None):
+        self._graphml.__exit__(None, None, None)
+        self._xml_base.__exit__(None, None, None)

+
+# default is lxml is present.
 write_graphml = write_graphml_lxml


 class GraphMLReader(GraphML):
     """Read a GraphML document.  Produces NetworkX graph objects."""

-    def __init__(self, node_type=str, edge_key_type=int, force_multigraph=False
-        ):
+    def __init__(self, node_type=str, edge_key_type=int, force_multigraph=False):
         self.construct_types()
         self.node_type = node_type
         self.edge_key_type = edge_key_type
-        self.multigraph = force_multigraph
-        self.edge_ids = {}
+        self.multigraph = force_multigraph  # If False, test for multiedges
+        self.edge_ids = {}  # dict mapping (u,v) tuples to edge id attributes

     def __call__(self, path=None, string=None):
         from xml.etree.ElementTree import ElementTree, fromstring
+
         if path is not None:
             self.xml = ElementTree(file=path)
         elif string is not None:
             self.xml = fromstring(string)
         else:
             raise ValueError("Must specify either 'path' or 'string' as kwarg")
-        keys, defaults = self.find_graphml_keys(self.xml)
-        for g in self.xml.findall(f'{{{self.NS_GRAPHML}}}graph'):
+        (keys, defaults) = self.find_graphml_keys(self.xml)
+        for g in self.xml.findall(f"{{{self.NS_GRAPHML}}}graph"):
             yield self.make_graph(g, keys, defaults)

+    def make_graph(self, graph_xml, graphml_keys, defaults, G=None):
+        # set default graph type
+        edgedefault = graph_xml.get("edgedefault", None)
+        if G is None:
+            if edgedefault == "directed":
+                G = nx.MultiDiGraph()
+            else:
+                G = nx.MultiGraph()
+        # set defaults for graph attributes
+        G.graph["node_default"] = {}
+        G.graph["edge_default"] = {}
+        for key_id, value in defaults.items():
+            key_for = graphml_keys[key_id]["for"]
+            name = graphml_keys[key_id]["name"]
+            python_type = graphml_keys[key_id]["type"]
+            if key_for == "node":
+                G.graph["node_default"].update({name: python_type(value)})
+            if key_for == "edge":
+                G.graph["edge_default"].update({name: python_type(value)})
+        # hyperedges are not supported
+        hyperedge = graph_xml.find(f"{{{self.NS_GRAPHML}}}hyperedge")
+        if hyperedge is not None:
+            raise nx.NetworkXError("GraphML reader doesn't support hyperedges")
+        # add nodes
+        for node_xml in graph_xml.findall(f"{{{self.NS_GRAPHML}}}node"):
+            self.add_node(G, node_xml, graphml_keys, defaults)
+        # add edges
+        for edge_xml in graph_xml.findall(f"{{{self.NS_GRAPHML}}}edge"):
+            self.add_edge(G, edge_xml, graphml_keys)
+        # add graph data
+        data = self.decode_data_elements(graphml_keys, graph_xml)
+        G.graph.update(data)
+
+        # switch to Graph or DiGraph if no parallel edges were found
+        if self.multigraph:
+            return G
+
+        G = nx.DiGraph(G) if G.is_directed() else nx.Graph(G)
+        # add explicit edge "id" from file as attribute in NX graph.
+        nx.set_edge_attributes(G, values=self.edge_ids, name="id")
+        return G
+
     def add_node(self, G, node_xml, graphml_keys, defaults):
         """Add a node to the graph."""
-        pass
+        # warn on finding unsupported ports tag
+        ports = node_xml.find(f"{{{self.NS_GRAPHML}}}port")
+        if ports is not None:
+            warnings.warn("GraphML port tag not supported.")
+        # find the node by id and cast it to the appropriate type
+        node_id = self.node_type(node_xml.get("id"))
+        # get data/attributes for node
+        data = self.decode_data_elements(graphml_keys, node_xml)
+        G.add_node(node_id, **data)
+        # get child nodes
+        if node_xml.attrib.get("yfiles.foldertype") == "group":
+            graph_xml = node_xml.find(f"{{{self.NS_GRAPHML}}}graph")
+            self.make_graph(graph_xml, graphml_keys, defaults, G)

     def add_edge(self, G, edge_element, graphml_keys):
         """Add an edge to the graph."""
-        pass
+        # warn on finding unsupported ports tag
+        ports = edge_element.find(f"{{{self.NS_GRAPHML}}}port")
+        if ports is not None:
+            warnings.warn("GraphML port tag not supported.")
+
+        # raise error if we find mixed directed and undirected edges
+        directed = edge_element.get("directed")
+        if G.is_directed() and directed == "false":
+            msg = "directed=false edge found in directed graph."
+            raise nx.NetworkXError(msg)
+        if (not G.is_directed()) and directed == "true":
+            msg = "directed=true edge found in undirected graph."
+            raise nx.NetworkXError(msg)
+
+        source = self.node_type(edge_element.get("source"))
+        target = self.node_type(edge_element.get("target"))
+        data = self.decode_data_elements(graphml_keys, edge_element)
+        # GraphML stores edge ids as an attribute
+        # NetworkX uses them as keys in multigraphs too if no key
+        # attribute is specified
+        edge_id = edge_element.get("id")
+        if edge_id:
+            # self.edge_ids is used by `make_graph` method for non-multigraphs
+            self.edge_ids[source, target] = edge_id
+            try:
+                edge_id = self.edge_key_type(edge_id)
+            except ValueError:  # Could not convert.
+                pass
+        else:
+            edge_id = data.get("key")
+
+        if G.has_edge(source, target):
+            # mark this as a multigraph
+            self.multigraph = True
+
+        # Use add_edges_from to avoid error with add_edge when `'key' in data`
+        # Note there is only one edge here...
+        G.add_edges_from([(source, target, edge_id, data)])

     def decode_data_elements(self, graphml_keys, obj_xml):
         """Use the key information to decode the data XML if present."""
-        pass
+        data = {}
+        for data_element in obj_xml.findall(f"{{{self.NS_GRAPHML}}}data"):
+            key = data_element.get("key")
+            try:
+                data_name = graphml_keys[key]["name"]
+                data_type = graphml_keys[key]["type"]
+            except KeyError as err:
+                raise nx.NetworkXError(f"Bad GraphML data: no key {key}") from err
+            text = data_element.text
+            # assume anything with subelements is a yfiles extension
+            if text is not None and len(list(data_element)) == 0:
+                if data_type == bool:
+                    # Ignore cases.
+                    # http://docs.oracle.com/javase/6/docs/api/java/lang/
+                    # Boolean.html#parseBoolean%28java.lang.String%29
+                    data[data_name] = self.convert_bool[text.lower()]
+                else:
+                    data[data_name] = data_type(text)
+            elif len(list(data_element)) > 0:
+                # Assume yfiles as subelements, try to extract node_label
+                node_label = None
+                # set GenericNode's configuration as shape type
+                gn = data_element.find(f"{{{self.NS_Y}}}GenericNode")
+                if gn is not None:
+                    data["shape_type"] = gn.get("configuration")
+                for node_type in ["GenericNode", "ShapeNode", "SVGNode", "ImageNode"]:
+                    pref = f"{{{self.NS_Y}}}{node_type}/{{{self.NS_Y}}}"
+                    geometry = data_element.find(f"{pref}Geometry")
+                    if geometry is not None:
+                        data["x"] = geometry.get("x")
+                        data["y"] = geometry.get("y")
+                    if node_label is None:
+                        node_label = data_element.find(f"{pref}NodeLabel")
+                    shape = data_element.find(f"{pref}Shape")
+                    if shape is not None:
+                        data["shape_type"] = shape.get("type")
+                if node_label is not None:
+                    data["label"] = node_label.text
+
+                # check all the different types of edges available in yEd.
+                for edge_type in [
+                    "PolyLineEdge",
+                    "SplineEdge",
+                    "QuadCurveEdge",
+                    "BezierEdge",
+                    "ArcEdge",
+                ]:
+                    pref = f"{{{self.NS_Y}}}{edge_type}/{{{self.NS_Y}}}"
+                    edge_label = data_element.find(f"{pref}EdgeLabel")
+                    if edge_label is not None:
+                        break
+                if edge_label is not None:
+                    data["label"] = edge_label.text
+            elif text is None:
+                data[data_name] = ""
+        return data

     def find_graphml_keys(self, graph_element):
         """Extracts all the keys and key defaults from the xml."""
-        pass
+        graphml_keys = {}
+        graphml_key_defaults = {}
+        for k in graph_element.findall(f"{{{self.NS_GRAPHML}}}key"):
+            attr_id = k.get("id")
+            attr_type = k.get("attr.type")
+            attr_name = k.get("attr.name")
+            yfiles_type = k.get("yfiles.type")
+            if yfiles_type is not None:
+                attr_name = yfiles_type
+                attr_type = "yfiles"
+            if attr_type is None:
+                attr_type = "string"
+                warnings.warn(f"No key type for id {attr_id}. Using string")
+            if attr_name is None:
+                raise nx.NetworkXError(f"Unknown key for id {attr_id}.")
+            graphml_keys[attr_id] = {
+                "name": attr_name,
+                "type": self.python_type[attr_type],
+                "for": k.get("for"),
+            }
+            # check for "default" sub-element of key element
+            default = k.find(f"{{{self.NS_GRAPHML}}}default")
+            if default is not None:
+                # Handle default values identically to data element values
+                python_type = graphml_keys[attr_id]["type"]
+                if python_type == bool:
+                    graphml_key_defaults[attr_id] = self.convert_bool[
+                        default.text.lower()
+                    ]
+                else:
+                    graphml_key_defaults[attr_id] = python_type(default.text)
+        return graphml_keys, graphml_key_defaults
diff --git a/networkx/readwrite/json_graph/adjacency.py b/networkx/readwrite/json_graph/adjacency.py
index fa9e1461a..3b0574756 100644
--- a/networkx/readwrite/json_graph/adjacency.py
+++ b/networkx/readwrite/json_graph/adjacency.py
@@ -1,6 +1,8 @@
 import networkx as nx
-__all__ = ['adjacency_data', 'adjacency_graph']
-_attrs = {'id': 'id', 'key': 'key'}
+
+__all__ = ["adjacency_data", "adjacency_graph"]
+
+_attrs = {"id": "id", "key": "key"}


 def adjacency_data(G, attrs=_attrs):
@@ -53,7 +55,30 @@ def adjacency_data(G, attrs=_attrs):
     --------
     adjacency_graph, node_link_data, tree_data
     """
-    pass
+    multigraph = G.is_multigraph()
+    id_ = attrs["id"]
+    # Allow 'key' to be omitted from attrs if the graph is not a multigraph.
+    key = None if not multigraph else attrs["key"]
+    if id_ == key:
+        raise nx.NetworkXError("Attribute names are not unique.")
+    data = {}
+    data["directed"] = G.is_directed()
+    data["multigraph"] = multigraph
+    data["graph"] = list(G.graph.items())
+    data["nodes"] = []
+    data["adjacency"] = []
+    for n, nbrdict in G.adjacency():
+        data["nodes"].append({**G.nodes[n], id_: n})
+        adj = []
+        if multigraph:
+            for nbr, keys in nbrdict.items():
+                for k, d in keys.items():
+                    adj.append({**d, id_: nbr, key: k})
+        else:
+            for nbr, d in nbrdict.items():
+                adj.append({**d, id_: nbr})
+        data["adjacency"].append(adj)
+    return data


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -97,4 +122,35 @@ def adjacency_graph(data, directed=False, multigraph=True, attrs=_attrs):
     --------
     adjacency_graph, node_link_data, tree_data
     """
-    pass
+    multigraph = data.get("multigraph", multigraph)
+    directed = data.get("directed", directed)
+    if multigraph:
+        graph = nx.MultiGraph()
+    else:
+        graph = nx.Graph()
+    if directed:
+        graph = graph.to_directed()
+    id_ = attrs["id"]
+    # Allow 'key' to be omitted from attrs if the graph is not a multigraph.
+    key = None if not multigraph else attrs["key"]
+    graph.graph = dict(data.get("graph", []))
+    mapping = []
+    for d in data["nodes"]:
+        node_data = d.copy()
+        node = node_data.pop(id_)
+        mapping.append(node)
+        graph.add_node(node)
+        graph.nodes[node].update(node_data)
+    for i, d in enumerate(data["adjacency"]):
+        source = mapping[i]
+        for tdata in d:
+            target_data = tdata.copy()
+            target = target_data.pop(id_)
+            if not multigraph:
+                graph.add_edge(source, target)
+                graph[source][target].update(target_data)
+            else:
+                ky = target_data.pop(key, None)
+                graph.add_edge(source, target, key=ky)
+                graph[source][target][ky].update(target_data)
+    return graph
diff --git a/networkx/readwrite/json_graph/cytoscape.py b/networkx/readwrite/json_graph/cytoscape.py
index fbb5b7437..2f3b2176a 100644
--- a/networkx/readwrite/json_graph/cytoscape.py
+++ b/networkx/readwrite/json_graph/cytoscape.py
@@ -1,8 +1,9 @@
 import networkx as nx
-__all__ = ['cytoscape_data', 'cytoscape_graph']

+__all__ = ["cytoscape_data", "cytoscape_graph"]

-def cytoscape_data(G, name='name', ident='id'):
+
+def cytoscape_data(G, name="name", ident="id"):
     """Returns data in Cytoscape JSON format (cyjs).

     Parameters
@@ -46,11 +47,41 @@ def cytoscape_data(G, name='name', ident='id'):
        {'data': {'id': '1', 'value': 1, 'name': '1'}}],
       'edges': [{'data': {'source': 0, 'target': 1}}]}}
     """
-    pass
+    if name == ident:
+        raise nx.NetworkXError("name and ident must be different.")
+
+    jsondata = {"data": list(G.graph.items())}
+    jsondata["directed"] = G.is_directed()
+    jsondata["multigraph"] = G.is_multigraph()
+    jsondata["elements"] = {"nodes": [], "edges": []}
+    nodes = jsondata["elements"]["nodes"]
+    edges = jsondata["elements"]["edges"]
+
+    for i, j in G.nodes.items():
+        n = {"data": j.copy()}
+        n["data"]["id"] = j.get(ident) or str(i)
+        n["data"]["value"] = i
+        n["data"]["name"] = j.get(name) or str(i)
+        nodes.append(n)
+
+    if G.is_multigraph():
+        for e in G.edges(keys=True):
+            n = {"data": G.adj[e[0]][e[1]][e[2]].copy()}
+            n["data"]["source"] = e[0]
+            n["data"]["target"] = e[1]
+            n["data"]["key"] = e[2]
+            edges.append(n)
+    else:
+        for e in G.edges():
+            n = {"data": G.adj[e[0]][e[1]].copy()}
+            n["data"]["source"] = e[0]
+            n["data"]["target"] = e[1]
+            edges.append(n)
+    return jsondata


 @nx._dispatchable(graphs=None, returns_graph=True)
-def cytoscape_graph(data, name='name', ident='id'):
+def cytoscape_graph(data, name="name", ident="id"):
     """
     Create a NetworkX graph from a dictionary in cytoscape JSON format.

@@ -109,4 +140,39 @@ def cytoscape_graph(data, name='name', ident='id'):
     >>> G.edges(data=True)
     EdgeDataView([(0, 1, {'source': 0, 'target': 1})])
     """
-    pass
+    if name == ident:
+        raise nx.NetworkXError("name and ident must be different.")
+
+    multigraph = data.get("multigraph")
+    directed = data.get("directed")
+    if multigraph:
+        graph = nx.MultiGraph()
+    else:
+        graph = nx.Graph()
+    if directed:
+        graph = graph.to_directed()
+    graph.graph = dict(data.get("data"))
+    for d in data["elements"]["nodes"]:
+        node_data = d["data"].copy()
+        node = d["data"]["value"]
+
+        if d["data"].get(name):
+            node_data[name] = d["data"].get(name)
+        if d["data"].get(ident):
+            node_data[ident] = d["data"].get(ident)
+
+        graph.add_node(node)
+        graph.nodes[node].update(node_data)
+
+    for d in data["elements"]["edges"]:
+        edge_data = d["data"].copy()
+        sour = d["data"]["source"]
+        targ = d["data"]["target"]
+        if multigraph:
+            key = d["data"].get("key", 0)
+            graph.add_edge(sour, targ, key=key)
+            graph.edges[sour, targ, key].update(edge_data)
+        else:
+            graph.add_edge(sour, targ)
+            graph.edges[sour, targ].update(edge_data)
+    return graph
diff --git a/networkx/readwrite/json_graph/node_link.py b/networkx/readwrite/json_graph/node_link.py
index a03c444ff..e29100f44 100644
--- a/networkx/readwrite/json_graph/node_link.py
+++ b/networkx/readwrite/json_graph/node_link.py
@@ -1,8 +1,17 @@
 from itertools import chain, count
+
 import networkx as nx
-__all__ = ['node_link_data', 'node_link_graph']
-_attrs = {'source': 'source', 'target': 'target', 'name': 'id', 'key':
-    'key', 'link': 'links'}
+
+__all__ = ["node_link_data", "node_link_graph"]
+
+
+_attrs = {
+    "source": "source",
+    "target": "target",
+    "name": "id",
+    "key": "key",
+    "link": "links",
+}


 def _to_tuple(x):
@@ -17,11 +26,20 @@ def _to_tuple(x):
     >>> _to_tuple([1, 2, [3, 4]])
     (1, 2, (3, 4))
     """
-    pass
-
-
-def node_link_data(G, *, source='source', target='target', name='id', key=
-    'key', link='links'):
+    if not isinstance(x, tuple | list):
+        return x
+    return tuple(map(_to_tuple, x))
+
+
+def node_link_data(
+    G,
+    *,
+    source="source",
+    target="target",
+    name="id",
+    key="key",
+    link="links",
+):
     """Returns data in node-link format that is suitable for JSON serialization
     and use in JavaScript documents.

@@ -92,12 +110,40 @@ def node_link_data(G, *, source='source', target='target', name='id', key=
     --------
     node_link_graph, adjacency_data, tree_data
     """
-    pass
+    multigraph = G.is_multigraph()
+
+    # Allow 'key' to be omitted from attrs if the graph is not a multigraph.
+    key = None if not multigraph else key
+    if len({source, target, key}) < 3:
+        raise nx.NetworkXError("Attribute names are not unique.")
+    data = {
+        "directed": G.is_directed(),
+        "multigraph": multigraph,
+        "graph": G.graph,
+        "nodes": [{**G.nodes[n], name: n} for n in G],
+    }
+    if multigraph:
+        data[link] = [
+            {**d, source: u, target: v, key: k}
+            for u, v, k, d in G.edges(keys=True, data=True)
+        ]
+    else:
+        data[link] = [{**d, source: u, target: v} for u, v, d in G.edges(data=True)]
+    return data


 @nx._dispatchable(graphs=None, returns_graph=True)
-def node_link_graph(data, directed=False, multigraph=True, *, source=
-    'source', target='target', name='id', key='key', link='links'):
+def node_link_graph(
+    data,
+    directed=False,
+    multigraph=True,
+    *,
+    source="source",
+    target="target",
+    name="id",
+    key="key",
+    link="links",
+):
     """Returns graph from node-link data format.
     Useful for de-serialization from JSON.

@@ -164,4 +210,35 @@ def node_link_graph(data, directed=False, multigraph=True, *, source=
     --------
     node_link_data, adjacency_data, tree_data
     """
-    pass
+    multigraph = data.get("multigraph", multigraph)
+    directed = data.get("directed", directed)
+    if multigraph:
+        graph = nx.MultiGraph()
+    else:
+        graph = nx.Graph()
+    if directed:
+        graph = graph.to_directed()
+
+    # Allow 'key' to be omitted from attrs if the graph is not a multigraph.
+    key = None if not multigraph else key
+    graph.graph = data.get("graph", {})
+    c = count()
+    for d in data["nodes"]:
+        node = _to_tuple(d.get(name, next(c)))
+        nodedata = {str(k): v for k, v in d.items() if k != name}
+        graph.add_node(node, **nodedata)
+    for d in data[link]:
+        src = tuple(d[source]) if isinstance(d[source], list) else d[source]
+        tgt = tuple(d[target]) if isinstance(d[target], list) else d[target]
+        if not multigraph:
+            edgedata = {str(k): v for k, v in d.items() if k != source and k != target}
+            graph.add_edge(src, tgt, **edgedata)
+        else:
+            ky = d.get(key, None)
+            edgedata = {
+                str(k): v
+                for k, v in d.items()
+                if k != source and k != target and k != key
+            }
+            graph.add_edge(src, tgt, ky, **edgedata)
+    return graph
diff --git a/networkx/readwrite/json_graph/tree.py b/networkx/readwrite/json_graph/tree.py
index 97edd3f5b..22b07b09d 100644
--- a/networkx/readwrite/json_graph/tree.py
+++ b/networkx/readwrite/json_graph/tree.py
@@ -1,9 +1,11 @@
 from itertools import chain
+
 import networkx as nx
-__all__ = ['tree_data', 'tree_graph']
+
+__all__ = ["tree_data", "tree_graph"]


-def tree_data(G, root, ident='id', children='children'):
+def tree_data(G, root, ident="id", children="children"):
     """Returns data in tree format that is suitable for JSON serialization
     and use in JavaScript documents.

@@ -55,11 +57,34 @@ def tree_data(G, root, ident='id', children='children'):
     --------
     tree_graph, node_link_data, adjacency_data
     """
-    pass
+    if G.number_of_nodes() != G.number_of_edges() + 1:
+        raise TypeError("G is not a tree.")
+    if not G.is_directed():
+        raise TypeError("G is not directed.")
+    if not nx.is_weakly_connected(G):
+        raise TypeError("G is not weakly connected.")
+
+    if ident == children:
+        raise nx.NetworkXError("The values for `id` and `children` must be different.")
+
+    def add_children(n, G):
+        nbrs = G[n]
+        if len(nbrs) == 0:
+            return []
+        children_ = []
+        for child in nbrs:
+            d = {**G.nodes[child], ident: child}
+            c = add_children(child, G)
+            if c:
+                d[children] = c
+            children_.append(d)
+        return children_
+
+    return {**G.nodes[root], ident: root, children: add_children(root, G)}


 @nx._dispatchable(graphs=None, returns_graph=True)
-def tree_graph(data, ident='id', children='children'):
+def tree_graph(data, ident="id", children="children"):
     """Returns graph from tree data format.

     Parameters
@@ -90,4 +115,23 @@ def tree_graph(data, ident='id', children='children'):
     --------
     tree_data, node_link_data, adjacency_data
     """
-    pass
+    graph = nx.DiGraph()
+
+    def add_children(parent, children_):
+        for data in children_:
+            child = data[ident]
+            graph.add_edge(parent, child)
+            grandchildren = data.get(children, [])
+            if grandchildren:
+                add_children(child, grandchildren)
+            nodedata = {
+                str(k): v for k, v in data.items() if k != ident and k != children
+            }
+            graph.add_node(child, **nodedata)
+
+    root = data[ident]
+    children_ = data.get(children, [])
+    nodedata = {str(k): v for k, v in data.items() if k != ident and k != children}
+    graph.add_node(root, **nodedata)
+    add_children(root, children_)
+    return graph
diff --git a/networkx/readwrite/leda.py b/networkx/readwrite/leda.py
index 260434ad9..9fb57db14 100644
--- a/networkx/readwrite/leda.py
+++ b/networkx/readwrite/leda.py
@@ -8,15 +8,19 @@ Format
 See http://www.algorithmic-solutions.info/leda_guide/graphs/leda_native_graph_fileformat.html

 """
-__all__ = ['read_leda', 'parse_leda']
+# Original author: D. Eppstein, UC Irvine, August 12, 2003.
+# The original code at http://www.ics.uci.edu/~eppstein/PADS/ is public domain.
+
+__all__ = ["read_leda", "parse_leda"]
+
 import networkx as nx
 from networkx.exception import NetworkXError
 from networkx.utils import open_file


-@open_file(0, mode='rb')
+@open_file(0, mode="rb")
 @nx._dispatchable(graphs=None, returns_graph=True)
-def read_leda(path, encoding='UTF-8'):
+def read_leda(path, encoding="UTF-8"):
     """Read graph in LEDA format from path.

     Parameters
@@ -37,7 +41,9 @@ def read_leda(path, encoding='UTF-8'):
     ----------
     .. [1] http://www.algorithmic-solutions.info/leda_guide/graphs/leda_native_graph_fileformat.html
     """
-    pass
+    lines = (line.decode(encoding) for line in path)
+    G = parse_leda(lines)
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -61,4 +67,42 @@ def parse_leda(lines):
     ----------
     .. [1] http://www.algorithmic-solutions.info/leda_guide/graphs/leda_native_graph_fileformat.html
     """
-    pass
+    if isinstance(lines, str):
+        lines = iter(lines.split("\n"))
+    lines = iter(
+        [
+            line.rstrip("\n")
+            for line in lines
+            if not (line.startswith(("#", "\n")) or line == "")
+        ]
+    )
+    for i in range(3):
+        next(lines)
+    # Graph
+    du = int(next(lines))  # -1=directed, -2=undirected
+    if du == -1:
+        G = nx.DiGraph()
+    else:
+        G = nx.Graph()
+
+    # Nodes
+    n = int(next(lines))  # number of nodes
+    node = {}
+    for i in range(1, n + 1):  # LEDA counts from 1 to n
+        symbol = next(lines).rstrip().strip("|{}|  ")
+        if symbol == "":
+            symbol = str(i)  # use int if no label - could be trouble
+        node[i] = symbol
+
+    G.add_nodes_from([s for i, s in node.items()])
+
+    # Edges
+    m = int(next(lines))  # number of edges
+    for i in range(m):
+        try:
+            s, t, reversal, label = next(lines).split()
+        except BaseException as err:
+            raise NetworkXError(f"Too few fields in LEDA.GRAPH edge {i+1}") from err
+        # BEWARE: no handling of reversal edges
+        G.add_edge(node[int(s)], node[int(t)], label=label[2:-2])
+    return G
diff --git a/networkx/readwrite/multiline_adjlist.py b/networkx/readwrite/multiline_adjlist.py
index 97096a711..526b30a9b 100644
--- a/networkx/readwrite/multiline_adjlist.py
+++ b/networkx/readwrite/multiline_adjlist.py
@@ -24,13 +24,19 @@ adjacency list (anything following the # in a line is a comment)::
      d 1
      e
 """
-__all__ = ['generate_multiline_adjlist', 'write_multiline_adjlist',
-    'parse_multiline_adjlist', 'read_multiline_adjlist']
+
+__all__ = [
+    "generate_multiline_adjlist",
+    "write_multiline_adjlist",
+    "parse_multiline_adjlist",
+    "read_multiline_adjlist",
+]
+
 import networkx as nx
 from networkx.utils import open_file


-def generate_multiline_adjlist(G, delimiter=' '):
+def generate_multiline_adjlist(G, delimiter=" "):
     """Generate a single line of the graph G in multiline adjacency list format.

     Parameters
@@ -71,12 +77,64 @@ def generate_multiline_adjlist(G, delimiter=' '):
     --------
     write_multiline_adjlist, read_multiline_adjlist
     """
-    pass
-
-
-@open_file(1, mode='wb')
-def write_multiline_adjlist(G, path, delimiter=' ', comments='#', encoding=
-    'utf-8'):
+    if G.is_directed():
+        if G.is_multigraph():
+            for s, nbrs in G.adjacency():
+                nbr_edges = [
+                    (u, data)
+                    for u, datadict in nbrs.items()
+                    for key, data in datadict.items()
+                ]
+                deg = len(nbr_edges)
+                yield str(s) + delimiter + str(deg)
+                for u, d in nbr_edges:
+                    if d is None:
+                        yield str(u)
+                    else:
+                        yield str(u) + delimiter + str(d)
+        else:  # directed single edges
+            for s, nbrs in G.adjacency():
+                deg = len(nbrs)
+                yield str(s) + delimiter + str(deg)
+                for u, d in nbrs.items():
+                    if d is None:
+                        yield str(u)
+                    else:
+                        yield str(u) + delimiter + str(d)
+    else:  # undirected
+        if G.is_multigraph():
+            seen = set()  # helper dict used to avoid duplicate edges
+            for s, nbrs in G.adjacency():
+                nbr_edges = [
+                    (u, data)
+                    for u, datadict in nbrs.items()
+                    if u not in seen
+                    for key, data in datadict.items()
+                ]
+                deg = len(nbr_edges)
+                yield str(s) + delimiter + str(deg)
+                for u, d in nbr_edges:
+                    if d is None:
+                        yield str(u)
+                    else:
+                        yield str(u) + delimiter + str(d)
+                seen.add(s)
+        else:  # undirected single edges
+            seen = set()  # helper dict used to avoid duplicate edges
+            for s, nbrs in G.adjacency():
+                nbr_edges = [(u, d) for u, d in nbrs.items() if u not in seen]
+                deg = len(nbr_edges)
+                yield str(s) + delimiter + str(deg)
+                for u, d in nbr_edges:
+                    if d is None:
+                        yield str(u)
+                    else:
+                        yield str(u) + delimiter + str(d)
+                seen.add(s)
+
+
+@open_file(1, mode="wb")
+def write_multiline_adjlist(G, path, delimiter=" ", comments="#", encoding="utf-8"):
     """Write the graph G in multiline adjacency list format to path

     Parameters
@@ -115,12 +173,28 @@ def write_multiline_adjlist(G, path, delimiter=' ', comments='#', encoding=
     --------
     read_multiline_adjlist
     """
-    pass
+    import sys
+    import time
+
+    pargs = comments + " ".join(sys.argv)
+    header = (
+        f"{pargs}\n"
+        + comments
+        + f" GMT {time.asctime(time.gmtime())}\n"
+        + comments
+        + f" {G.name}\n"
+    )
+    path.write(header.encode(encoding))
+
+    for multiline in generate_multiline_adjlist(G, delimiter):
+        multiline += "\n"
+        path.write(multiline.encode(encoding))


 @nx._dispatchable(graphs=None, returns_graph=True)
-def parse_multiline_adjlist(lines, comments='#', delimiter=None,
-    create_using=None, nodetype=None, edgetype=None):
+def parse_multiline_adjlist(
+    lines, comments="#", delimiter=None, create_using=None, nodetype=None, edgetype=None
+):
     """Parse lines of a multiline adjacency list representation of a graph.

     Parameters
@@ -162,13 +236,81 @@ def parse_multiline_adjlist(lines, comments='#', delimiter=None,
     [1, 2, 3, 5]

     """
-    pass
-
-
-@open_file(0, mode='rb')
+    from ast import literal_eval
+
+    G = nx.empty_graph(0, create_using)
+    for line in lines:
+        p = line.find(comments)
+        if p >= 0:
+            line = line[:p]
+        if not line:
+            continue
+        try:
+            (u, deg) = line.strip().split(delimiter)
+            deg = int(deg)
+        except BaseException as err:
+            raise TypeError(f"Failed to read node and degree on line ({line})") from err
+        if nodetype is not None:
+            try:
+                u = nodetype(u)
+            except BaseException as err:
+                raise TypeError(
+                    f"Failed to convert node ({u}) to type {nodetype}"
+                ) from err
+        G.add_node(u)
+        for i in range(deg):
+            while True:
+                try:
+                    line = next(lines)
+                except StopIteration as err:
+                    msg = f"Failed to find neighbor for node ({u})"
+                    raise TypeError(msg) from err
+                p = line.find(comments)
+                if p >= 0:
+                    line = line[:p]
+                if line:
+                    break
+            vlist = line.strip().split(delimiter)
+            numb = len(vlist)
+            if numb < 1:
+                continue  # isolated node
+            v = vlist.pop(0)
+            data = "".join(vlist)
+            if nodetype is not None:
+                try:
+                    v = nodetype(v)
+                except BaseException as err:
+                    raise TypeError(
+                        f"Failed to convert node ({v}) to type {nodetype}"
+                    ) from err
+            if edgetype is not None:
+                try:
+                    edgedata = {"weight": edgetype(data)}
+                except BaseException as err:
+                    raise TypeError(
+                        f"Failed to convert edge data ({data}) to type {edgetype}"
+                    ) from err
+            else:
+                try:  # try to evaluate
+                    edgedata = literal_eval(data)
+                except:
+                    edgedata = {}
+            G.add_edge(u, v, **edgedata)
+
+    return G
+
+
+@open_file(0, mode="rb")
 @nx._dispatchable(graphs=None, returns_graph=True)
-def read_multiline_adjlist(path, comments='#', delimiter=None, create_using
-    =None, nodetype=None, edgetype=None, encoding='utf-8'):
+def read_multiline_adjlist(
+    path,
+    comments="#",
+    delimiter=None,
+    create_using=None,
+    nodetype=None,
+    edgetype=None,
+    encoding="utf-8",
+):
     """Read graph in multi-line adjacency list format from path.

     Parameters
@@ -240,4 +382,12 @@ def read_multiline_adjlist(path, comments='#', delimiter=None, create_using
     --------
     write_multiline_adjlist
     """
-    pass
+    lines = (line.decode(encoding) for line in path)
+    return parse_multiline_adjlist(
+        lines,
+        comments=comments,
+        delimiter=delimiter,
+        create_using=create_using,
+        nodetype=nodetype,
+        edgetype=edgetype,
+    )
diff --git a/networkx/readwrite/p2g.py b/networkx/readwrite/p2g.py
index 6a11f184f..85f07ec84 100644
--- a/networkx/readwrite/p2g.py
+++ b/networkx/readwrite/p2g.py
@@ -35,8 +35,8 @@ import networkx as nx
 from networkx.utils import open_file


-@open_file(1, mode='w')
-def write_p2g(G, path, encoding='utf-8'):
+@open_file(1, mode="w")
+def write_p2g(G, path, encoding="utf-8"):
     """Write NetworkX graph in p2g format.

     Notes
@@ -44,12 +44,21 @@ def write_p2g(G, path, encoding='utf-8'):
     This format is meant to be used with directed graphs with
     possible self loops.
     """
-    pass
-
-
-@open_file(0, mode='r')
+    path.write((f"{G.name}\n").encode(encoding))
+    path.write((f"{G.order()} {G.size()}\n").encode(encoding))
+    nodes = list(G)
+    # make dictionary mapping nodes to integers
+    nodenumber = dict(zip(nodes, range(len(nodes))))
+    for n in nodes:
+        path.write((f"{n}\n").encode(encoding))
+        for nbr in G.neighbors(n):
+            path.write((f"{nodenumber[nbr]} ").encode(encoding))
+        path.write("\n".encode(encoding))
+
+
+@open_file(0, mode="r")
 @nx._dispatchable(graphs=None, returns_graph=True)
-def read_p2g(path, encoding='utf-8'):
+def read_p2g(path, encoding="utf-8"):
     """Read graph in p2g format from path.

     Returns
@@ -61,7 +70,9 @@ def read_p2g(path, encoding='utf-8'):
     If you want a DiGraph (with no self loops allowed and no edge data)
     use D=nx.DiGraph(read_p2g(path))
     """
-    pass
+    lines = (line.decode(encoding) for line in path)
+    G = parse_p2g(lines)
+    return G


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -72,4 +83,22 @@ def parse_p2g(lines):
     -------
     MultiDiGraph
     """
-    pass
+    description = next(lines).strip()
+    # are multiedges (parallel edges) allowed?
+    G = nx.MultiDiGraph(name=description, selfloops=True)
+    nnodes, nedges = map(int, next(lines).split())
+    nodelabel = {}
+    nbrs = {}
+    # loop over the nodes keeping track of node labels and out neighbors
+    # defer adding edges until all node labels are known
+    for i in range(nnodes):
+        n = next(lines).strip()
+        nodelabel[i] = n
+        G.add_node(n)
+        nbrs[n] = map(int, next(lines).split())
+    # now we know all of the node labels so we can add the edges
+    # with the correct labels
+    for n in G:
+        for nbr in nbrs[n]:
+            G.add_edge(n, nodelabel[nbr])
+    return G
diff --git a/networkx/readwrite/pajek.py b/networkx/readwrite/pajek.py
index 59e1de9ac..f148f1620 100644
--- a/networkx/readwrite/pajek.py
+++ b/networkx/readwrite/pajek.py
@@ -13,10 +13,13 @@ See http://vlado.fmf.uni-lj.si/pub/networks/pajek/doc/draweps.htm
 for format information.

 """
+
 import warnings
+
 import networkx as nx
 from networkx.utils import open_file
-__all__ = ['read_pajek', 'parse_pajek', 'generate_pajek', 'write_pajek']
+
+__all__ = ["read_pajek", "parse_pajek", "generate_pajek", "write_pajek"]


 def generate_pajek(G):
@@ -32,11 +35,69 @@ def generate_pajek(G):
     See http://vlado.fmf.uni-lj.si/pub/networks/pajek/doc/draweps.htm
     for format information.
     """
-    pass
+    if G.name == "":
+        name = "NetworkX"
+    else:
+        name = G.name
+    # Apparently many Pajek format readers can't process this line
+    # So we'll leave it out for now.
+    # yield '*network %s'%name
+
+    # write nodes with attributes
+    yield f"*vertices {G.order()}"
+    nodes = list(G)
+    # make dictionary mapping nodes to integers
+    nodenumber = dict(zip(nodes, range(1, len(nodes) + 1)))
+    for n in nodes:
+        # copy node attributes and pop mandatory attributes
+        # to avoid duplication.
+        na = G.nodes.get(n, {}).copy()
+        x = na.pop("x", 0.0)
+        y = na.pop("y", 0.0)
+        try:
+            id = int(na.pop("id", nodenumber[n]))
+        except ValueError as err:
+            err.args += (
+                (
+                    "Pajek format requires 'id' to be an int()."
+                    " Refer to the 'Relabeling nodes' section."
+                ),
+            )
+            raise
+        nodenumber[n] = id
+        shape = na.pop("shape", "ellipse")
+        s = " ".join(map(make_qstr, (id, n, x, y, shape)))
+        # only optional attributes are left in na.
+        for k, v in na.items():
+            if isinstance(v, str) and v.strip() != "":
+                s += f" {make_qstr(k)} {make_qstr(v)}"
+            else:
+                warnings.warn(
+                    f"Node attribute {k} is not processed. {('Empty attribute' if isinstance(v, str) else 'Non-string attribute')}."
+                )
+        yield s

+    # write edges with attributes
+    if G.is_directed():
+        yield "*arcs"
+    else:
+        yield "*edges"
+    for u, v, edgedata in G.edges(data=True):
+        d = edgedata.copy()
+        value = d.pop("weight", 1.0)  # use 1 as default edge value
+        s = " ".join(map(make_qstr, (nodenumber[u], nodenumber[v], value)))
+        for k, v in d.items():
+            if isinstance(v, str) and v.strip() != "":
+                s += f" {make_qstr(k)} {make_qstr(v)}"
+            else:
+                warnings.warn(
+                    f"Edge attribute {k} is not processed. {('Empty attribute' if isinstance(v, str) else 'Non-string attribute')}."
+                )
+        yield s

-@open_file(1, mode='wb')
-def write_pajek(G, path, encoding='UTF-8'):
+
+@open_file(1, mode="wb")
+def write_pajek(G, path, encoding="UTF-8"):
     """Write graph in Pajek format to path.

     Parameters
@@ -63,12 +124,14 @@ def write_pajek(G, path, encoding='UTF-8'):
     See http://vlado.fmf.uni-lj.si/pub/networks/pajek/doc/draweps.htm
     for format information.
     """
-    pass
+    for line in generate_pajek(G):
+        line += "\n"
+        path.write(line.encode(encoding))


-@open_file(0, mode='rb')
+@open_file(0, mode="rb")
 @nx._dispatchable(graphs=None, returns_graph=True)
-def read_pajek(path, encoding='UTF-8'):
+def read_pajek(path, encoding="UTF-8"):
     """Read graph in Pajek format from path.

     Parameters
@@ -96,7 +159,8 @@ def read_pajek(path, encoding='UTF-8'):
     See http://vlado.fmf.uni-lj.si/pub/networks/pajek/doc/draweps.htm
     for format information.
     """
-    pass
+    lines = (line.decode(encoding) for line in path)
+    return parse_pajek(lines)


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -117,11 +181,106 @@ def parse_pajek(lines):
     read_pajek

     """
-    pass
+    import shlex
+
+    # multigraph=False
+    if isinstance(lines, str):
+        lines = iter(lines.split("\n"))
+    lines = iter([line.rstrip("\n") for line in lines])
+    G = nx.MultiDiGraph()  # are multiedges allowed in Pajek? assume yes
+    labels = []  # in the order of the file, needed for matrix
+    while lines:
+        try:
+            l = next(lines)
+        except:  # EOF
+            break
+        if l.lower().startswith("*network"):
+            try:
+                label, name = l.split(None, 1)
+            except ValueError:
+                # Line was not of the form:  *network NAME
+                pass
+            else:
+                G.graph["name"] = name
+        elif l.lower().startswith("*vertices"):
+            nodelabels = {}
+            l, nnodes = l.split()
+            for i in range(int(nnodes)):
+                l = next(lines)
+                try:
+                    splitline = [
+                        x.decode("utf-8") for x in shlex.split(str(l).encode("utf-8"))
+                    ]
+                except AttributeError:
+                    splitline = shlex.split(str(l))
+                id, label = splitline[0:2]
+                labels.append(label)
+                G.add_node(label)
+                nodelabels[id] = label
+                G.nodes[label]["id"] = id
+                try:
+                    x, y, shape = splitline[2:5]
+                    G.nodes[label].update(
+                        {"x": float(x), "y": float(y), "shape": shape}
+                    )
+                except:
+                    pass
+                extra_attr = zip(splitline[5::2], splitline[6::2])
+                G.nodes[label].update(extra_attr)
+        elif l.lower().startswith("*edges") or l.lower().startswith("*arcs"):
+            if l.lower().startswith("*edge"):
+                # switch from multidigraph to multigraph
+                G = nx.MultiGraph(G)
+            if l.lower().startswith("*arcs"):
+                # switch to directed with multiple arcs for each existing edge
+                G = G.to_directed()
+            for l in lines:
+                try:
+                    splitline = [
+                        x.decode("utf-8") for x in shlex.split(str(l).encode("utf-8"))
+                    ]
+                except AttributeError:
+                    splitline = shlex.split(str(l))
+
+                if len(splitline) < 2:
+                    continue
+                ui, vi = splitline[0:2]
+                u = nodelabels.get(ui, ui)
+                v = nodelabels.get(vi, vi)
+                # parse the data attached to this edge and put in a dictionary
+                edge_data = {}
+                try:
+                    # there should always be a single value on the edge?
+                    w = splitline[2:3]
+                    edge_data.update({"weight": float(w[0])})
+                except:
+                    pass
+                    # if there isn't, just assign a 1
+                #                    edge_data.update({'value':1})
+                extra_attr = zip(splitline[3::2], splitline[4::2])
+                edge_data.update(extra_attr)
+                # if G.has_edge(u,v):
+                #     multigraph=True
+                G.add_edge(u, v, **edge_data)
+        elif l.lower().startswith("*matrix"):
+            G = nx.DiGraph(G)
+            adj_list = (
+                (labels[row], labels[col], {"weight": int(data)})
+                for (row, line) in enumerate(lines)
+                for (col, data) in enumerate(line.split())
+                if int(data) != 0
+            )
+            G.add_edges_from(adj_list)
+
+    return G


 def make_qstr(t):
     """Returns the string representation of t.
     Add outer double-quotes if the string has a space.
     """
-    pass
+    if not isinstance(t, str):
+        t = str(t)
+    if " " in t:
+        t = f'"{t}"'
+    return t
diff --git a/networkx/readwrite/sparse6.py b/networkx/readwrite/sparse6.py
index 2030070f6..a70599438 100644
--- a/networkx/readwrite/sparse6.py
+++ b/networkx/readwrite/sparse6.py
@@ -1,3 +1,5 @@
+# Original author: D. Eppstein, UC Irvine, August 12, 2003.
+# The original code at https://www.ics.uci.edu/~eppstein/PADS/ is public domain.
 """Functions for reading and writing graphs in the *sparse6* format.

 The *sparse6* file format is a space-efficient format for large sparse
@@ -13,8 +15,8 @@ import networkx as nx
 from networkx.exception import NetworkXError
 from networkx.readwrite.graph6 import data_to_n, n_to_data
 from networkx.utils import not_implemented_for, open_file
-__all__ = ['from_sparse6_bytes', 'read_sparse6', 'to_sparse6_bytes',
-    'write_sparse6']
+
+__all__ = ["from_sparse6_bytes", "read_sparse6", "to_sparse6_bytes", "write_sparse6"]


 def _generate_sparse6_bytes(G, nodes, header):
@@ -38,7 +40,65 @@ def _generate_sparse6_bytes(G, nodes, header):
     the graph6 format (that is, greater than ``2 ** 36`` nodes).

     """
-    pass
+    n = len(G)
+    if n >= 2**36:
+        raise ValueError(
+            "sparse6 is only defined if number of nodes is less than 2 ** 36"
+        )
+    if header:
+        yield b">>sparse6<<"
+    yield b":"
+    for d in n_to_data(n):
+        yield str.encode(chr(d + 63))
+
+    k = 1
+    while 1 << k < n:
+        k += 1
+
+    def enc(x):
+        """Big endian k-bit encoding of x"""
+        return [1 if (x & 1 << (k - 1 - i)) else 0 for i in range(k)]
+
+    edges = sorted((max(u, v), min(u, v)) for u, v in G.edges())
+    bits = []
+    curv = 0
+    for v, u in edges:
+        if v == curv:  # current vertex edge
+            bits.append(0)
+            bits.extend(enc(u))
+        elif v == curv + 1:  # next vertex edge
+            curv += 1
+            bits.append(1)
+            bits.extend(enc(u))
+        else:  # skip to vertex v and then add edge to u
+            curv = v
+            bits.append(1)
+            bits.extend(enc(v))
+            bits.append(0)
+            bits.extend(enc(u))
+    if k < 6 and n == (1 << k) and ((-len(bits)) % 6) >= k and curv < (n - 1):
+        # Padding special case: small k, n=2^k,
+        # more than k bits of padding needed,
+        # current vertex is not (n-1) --
+        # appending 1111... would add a loop on (n-1)
+        bits.append(0)
+        bits.extend([1] * ((-len(bits)) % 6))
+    else:
+        bits.extend([1] * ((-len(bits)) % 6))
+
+    data = [
+        (bits[i + 0] << 5)
+        + (bits[i + 1] << 4)
+        + (bits[i + 2] << 3)
+        + (bits[i + 3] << 2)
+        + (bits[i + 4] << 1)
+        + (bits[i + 5] << 0)
+        for i in range(0, len(bits), 6)
+    ]
+
+    for d in data:
+        yield str.encode(chr(d + 63))
+    yield b"\n"


 @nx._dispatchable(graphs=None, returns_graph=True)
@@ -75,7 +135,68 @@ def from_sparse6_bytes(string):
            <https://users.cecs.anu.edu.au/~bdm/data/formats.html>

     """
-    pass
+    if string.startswith(b">>sparse6<<"):
+        string = string[11:]
+    if not string.startswith(b":"):
+        raise NetworkXError("Expected leading colon in sparse6")
+
+    chars = [c - 63 for c in string[1:]]
+    n, data = data_to_n(chars)
+    k = 1
+    while 1 << k < n:
+        k += 1
+
+    def parseData():
+        """Returns stream of pairs b[i], x[i] for sparse6 format."""
+        chunks = iter(data)
+        d = None  # partial data word
+        dLen = 0  # how many unparsed bits are left in d
+
+        while 1:
+            if dLen < 1:
+                try:
+                    d = next(chunks)
+                except StopIteration:
+                    return
+                dLen = 6
+            dLen -= 1
+            b = (d >> dLen) & 1  # grab top remaining bit
+
+            x = d & ((1 << dLen) - 1)  # partially built up value of x
+            xLen = dLen  # how many bits included so far in x
+            while xLen < k:  # now grab full chunks until we have enough
+                try:
+                    d = next(chunks)
+                except StopIteration:
+                    return
+                dLen = 6
+                x = (x << 6) + d
+                xLen += 6
+            x = x >> (xLen - k)  # shift back the extra bits
+            dLen = xLen - k
+            yield b, x
+
+    v = 0
+
+    G = nx.MultiGraph()
+    G.add_nodes_from(range(n))
+
+    multigraph = False
+    for b, x in parseData():
+        if b == 1:
+            v += 1
+        # padding with ones can cause overlarge number here
+        if x >= n or v >= n:
+            break
+        elif x > v:
+            v = x
+        else:
+            if G.has_edge(x, v):
+                multigraph = True
+            G.add_edge(x, v)
+    if not multigraph:
+        G = nx.Graph(G)
+    return G


 def to_sparse6_bytes(G, nodes=None, header=True):
@@ -122,10 +243,13 @@ def to_sparse6_bytes(G, nodes=None, header=True):
            <https://users.cecs.anu.edu.au/~bdm/data/formats.html>

     """
-    pass
+    if nodes is not None:
+        G = G.subgraph(nodes)
+    G = nx.convert_node_labels_to_integers(G, ordering="sorted")
+    return b"".join(_generate_sparse6_bytes(G, nodes, header))


-@open_file(0, mode='rb')
+@open_file(0, mode="rb")
 @nx._dispatchable(graphs=None, returns_graph=True)
 def read_sparse6(path):
     """Read an undirected graph in sparse6 format from path.
@@ -177,11 +301,20 @@ def read_sparse6(path):
            <https://users.cecs.anu.edu.au/~bdm/data/formats.html>

     """
-    pass
-
-
-@not_implemented_for('directed')
-@open_file(1, mode='wb')
+    glist = []
+    for line in path:
+        line = line.strip()
+        if not len(line):
+            continue
+        glist.append(from_sparse6_bytes(line))
+    if len(glist) == 1:
+        return glist[0]
+    else:
+        return glist
+
+
+@not_implemented_for("directed")
+@open_file(1, mode="wb")
 def write_sparse6(G, path, nodes=None, header=True):
     """Write graph G to given path in sparse6 format.

@@ -236,4 +369,8 @@ def write_sparse6(G, path, nodes=None, header=True):
            <https://users.cecs.anu.edu.au/~bdm/data/formats.html>

     """
-    pass
+    if nodes is not None:
+        G = G.subgraph(nodes)
+    G = nx.convert_node_labels_to_integers(G, ordering="sorted")
+    for b in _generate_sparse6_bytes(G, nodes, header):
+        path.write(b)
diff --git a/networkx/readwrite/text.py b/networkx/readwrite/text.py
index af38a5513..dc182f598 100644
--- a/networkx/readwrite/text.py
+++ b/networkx/readwrite/text.py
@@ -4,63 +4,80 @@ Text-based visual representations of graphs
 import sys
 import warnings
 from collections import defaultdict
+
 import networkx as nx
 from networkx.utils import open_file
-__all__ = ['forest_str', 'generate_network_text', 'write_network_text']
+
+__all__ = ["forest_str", "generate_network_text", "write_network_text"]


 class BaseGlyphs:
-    pass
+    @classmethod
+    def as_dict(cls):
+        return {
+            a: getattr(cls, a)
+            for a in dir(cls)
+            if not a.startswith("_") and a != "as_dict"
+        }


 class AsciiBaseGlyphs(BaseGlyphs):
-    empty: str = '+'
-    newtree_last: str = '+-- '
-    newtree_mid: str = '+-- '
-    endof_forest: str = '    '
-    within_forest: str = ':   '
-    within_tree: str = '|   '
+    empty: str = "+"
+    newtree_last: str = "+-- "
+    newtree_mid: str = "+-- "
+    endof_forest: str = "    "
+    within_forest: str = ":   "
+    within_tree: str = "|   "


 class AsciiDirectedGlyphs(AsciiBaseGlyphs):
-    last: str = 'L-> '
-    mid: str = '|-> '
-    backedge: str = '<-'
-    vertical_edge: str = '!'
+    last: str = "L-> "
+    mid: str = "|-> "
+    backedge: str = "<-"
+    vertical_edge: str = "!"


 class AsciiUndirectedGlyphs(AsciiBaseGlyphs):
-    last: str = 'L-- '
-    mid: str = '|-- '
-    backedge: str = '-'
-    vertical_edge: str = '|'
+    last: str = "L-- "
+    mid: str = "|-- "
+    backedge: str = "-"
+    vertical_edge: str = "|"


 class UtfBaseGlyphs(BaseGlyphs):
-    empty: str = '╙'
-    newtree_last: str = '╙── '
-    newtree_mid: str = '╟── '
-    endof_forest: str = '    '
-    within_forest: str = '╎   '
-    within_tree: str = '│   '
+    # Notes on available box and arrow characters
+    # https://en.wikipedia.org/wiki/Box-drawing_character
+    # https://stackoverflow.com/questions/2701192/triangle-arrow
+    empty: str = "╙"
+    newtree_last: str = "╙── "
+    newtree_mid: str = "╟── "
+    endof_forest: str = "    "
+    within_forest: str = "╎   "
+    within_tree: str = "│   "


 class UtfDirectedGlyphs(UtfBaseGlyphs):
-    last: str = '└─╼ '
-    mid: str = '├─╼ '
-    backedge: str = '╾'
-    vertical_edge: str = '╽'
+    last: str = "└─╼ "
+    mid: str = "├─╼ "
+    backedge: str = "╾"
+    vertical_edge: str = "╽"


 class UtfUndirectedGlyphs(UtfBaseGlyphs):
-    last: str = '└── '
-    mid: str = '├── '
-    backedge: str = '─'
-    vertical_edge: str = '│'
-
-
-def generate_network_text(graph, with_labels=True, sources=None, max_depth=
-    None, ascii_only=False, vertical_chains=False):
+    last: str = "└── "
+    mid: str = "├── "
+    backedge: str = "─"
+    vertical_edge: str = "│"
+
+
+def generate_network_text(
+    graph,
+    with_labels=True,
+    sources=None,
+    max_depth=None,
+    ascii_only=False,
+    vertical_chains=False,
+):
     """Generate lines in the "network text" format

     This works via a depth-first traversal of the graph and writing a line for
@@ -194,12 +211,225 @@ def generate_network_text(graph, with_labels=True, sources=None, max_depth=
             ├── E
             └── F
     """
-    pass
-
-
-@open_file(1, 'w')
-def write_network_text(graph, path=None, with_labels=True, sources=None,
-    max_depth=None, ascii_only=False, end='\n', vertical_chains=False):
+    from typing import Any, NamedTuple
+
+    class StackFrame(NamedTuple):
+        parent: Any
+        node: Any
+        indents: list
+        this_islast: bool
+        this_vertical: bool
+
+    collapse_attr = "collapse"
+
+    is_directed = graph.is_directed()
+
+    if is_directed:
+        glyphs = AsciiDirectedGlyphs if ascii_only else UtfDirectedGlyphs
+        succ = graph.succ
+        pred = graph.pred
+    else:
+        glyphs = AsciiUndirectedGlyphs if ascii_only else UtfUndirectedGlyphs
+        succ = graph.adj
+        pred = graph.adj
+
+    if isinstance(with_labels, str):
+        label_attr = with_labels
+    elif with_labels:
+        label_attr = "label"
+    else:
+        label_attr = None
+
+    if max_depth == 0:
+        yield glyphs.empty + " ..."
+    elif len(graph.nodes) == 0:
+        yield glyphs.empty
+    else:
+        # If the nodes to traverse are unspecified, find the minimal set of
+        # nodes that will reach the entire graph
+        if sources is None:
+            sources = _find_sources(graph)
+
+        # Populate the stack with each:
+        # 1. parent node in the DFS tree (or None for root nodes),
+        # 2. the current node in the DFS tree
+        # 2. a list of indentations indicating depth
+        # 3. a flag indicating if the node is the final one to be written.
+        # Reverse the stack so sources are popped in the correct order.
+        last_idx = len(sources) - 1
+        stack = [
+            StackFrame(None, node, [], (idx == last_idx), False)
+            for idx, node in enumerate(sources)
+        ][::-1]
+
+        num_skipped_children = defaultdict(lambda: 0)
+        seen_nodes = set()
+        while stack:
+            parent, node, indents, this_islast, this_vertical = stack.pop()
+
+            if node is not Ellipsis:
+                skip = node in seen_nodes
+                if skip:
+                    # Mark that we skipped a parent's child
+                    num_skipped_children[parent] += 1
+
+                if this_islast:
+                    # If we reached the last child of a parent, and we skipped
+                    # any of that parents children, then we should emit an
+                    # ellipsis at the end after this.
+                    if num_skipped_children[parent] and parent is not None:
+                        # Append the ellipsis to be emitted last
+                        next_islast = True
+                        try_frame = StackFrame(
+                            node, Ellipsis, indents, next_islast, False
+                        )
+                        stack.append(try_frame)
+
+                        # Redo this frame, but not as a last object
+                        next_islast = False
+                        try_frame = StackFrame(
+                            parent, node, indents, next_islast, this_vertical
+                        )
+                        stack.append(try_frame)
+                        continue
+
+                if skip:
+                    continue
+                seen_nodes.add(node)
+
+            if not indents:
+                # Top level items (i.e. trees in the forest) get different
+                # glyphs to indicate they are not actually connected
+                if this_islast:
+                    this_vertical = False
+                    this_prefix = indents + [glyphs.newtree_last]
+                    next_prefix = indents + [glyphs.endof_forest]
+                else:
+                    this_prefix = indents + [glyphs.newtree_mid]
+                    next_prefix = indents + [glyphs.within_forest]
+
+            else:
+                # Non-top-level items
+                if this_vertical:
+                    this_prefix = indents
+                    next_prefix = indents
+                else:
+                    if this_islast:
+                        this_prefix = indents + [glyphs.last]
+                        next_prefix = indents + [glyphs.endof_forest]
+                    else:
+                        this_prefix = indents + [glyphs.mid]
+                        next_prefix = indents + [glyphs.within_tree]
+
+            if node is Ellipsis:
+                label = " ..."
+                suffix = ""
+                children = []
+            else:
+                if label_attr is not None:
+                    label = str(graph.nodes[node].get(label_attr, node))
+                else:
+                    label = str(node)
+
+                # Determine if we want to show the children of this node.
+                if collapse_attr is not None:
+                    collapse = graph.nodes[node].get(collapse_attr, False)
+                else:
+                    collapse = False
+
+                # Determine:
+                # (1) children to traverse into after showing this node.
+                # (2) parents to immediately show to the right of this node.
+                if is_directed:
+                    # In the directed case we must show every successor node
+                    # note: it may be skipped later, but we don't have that
+                    # information here.
+                    children = list(succ[node])
+                    # In the directed case we must show every predecessor
+                    # except for parent we directly traversed from.
+                    handled_parents = {parent}
+                else:
+                    # Showing only the unseen children results in a more
+                    # concise representation for the undirected case.
+                    children = [
+                        child for child in succ[node] if child not in seen_nodes
+                    ]
+
+                    # In the undirected case, parents are also children, so we
+                    # only need to immediately show the ones we can no longer
+                    # traverse
+                    handled_parents = {*children, parent}
+
+                if max_depth is not None and len(indents) == max_depth - 1:
+                    # Use ellipsis to indicate we have reached maximum depth
+                    if children:
+                        children = [Ellipsis]
+                    handled_parents = {parent}
+
+                if collapse:
+                    # Collapsing a node is the same as reaching maximum depth
+                    if children:
+                        children = [Ellipsis]
+                    handled_parents = {parent}
+
+                # The other parents are other predecessors of this node that
+                # are not handled elsewhere.
+                other_parents = [p for p in pred[node] if p not in handled_parents]
+                if other_parents:
+                    if label_attr is not None:
+                        other_parents_labels = ", ".join(
+                            [
+                                str(graph.nodes[p].get(label_attr, p))
+                                for p in other_parents
+                            ]
+                        )
+                    else:
+                        other_parents_labels = ", ".join(
+                            [str(p) for p in other_parents]
+                        )
+                    suffix = " ".join(["", glyphs.backedge, other_parents_labels])
+                else:
+                    suffix = ""
+
+            # Emit the line for this node, this will be called for each node
+            # exactly once.
+            if this_vertical:
+                yield "".join(this_prefix + [glyphs.vertical_edge])
+
+            yield "".join(this_prefix + [label, suffix])
+
+            if vertical_chains:
+                if is_directed:
+                    num_children = len(set(children))
+                else:
+                    num_children = len(set(children) - {parent})
+                # The next node can be drawn vertically if it is the only
+                # remaining child of this node.
+                next_is_vertical = num_children == 1
+            else:
+                next_is_vertical = False
+
+            # Push children on the stack in reverse order so they are popped in
+            # the original order.
+            for idx, child in enumerate(children[::-1]):
+                next_islast = idx == 0
+                try_frame = StackFrame(
+                    node, child, next_prefix, next_islast, next_is_vertical
+                )
+                stack.append(try_frame)
+
+
+@open_file(1, "w")
+def write_network_text(
+    graph,
+    path=None,
+    with_labels=True,
+    sources=None,
+    max_depth=None,
+    ascii_only=False,
+    end="\n",
+    vertical_chains=False,
+):
     """Creates a nice text representation of a graph

     This works via a depth-first traversal of the graph and writing a line for
@@ -354,18 +584,67 @@ def write_network_text(graph, path=None, with_labels=True, sources=None,
         │   └─╼  ...
         └─╼  ...
     """
-    pass
+    if path is None:
+        # The path is unspecified, write to stdout
+        _write = sys.stdout.write
+    elif hasattr(path, "write"):
+        # The path is already an open file
+        _write = path.write
+    elif callable(path):
+        # The path is a custom callable
+        _write = path
+    else:
+        raise TypeError(type(path))
+
+    for line in generate_network_text(
+        graph,
+        with_labels=with_labels,
+        sources=sources,
+        max_depth=max_depth,
+        ascii_only=ascii_only,
+        vertical_chains=vertical_chains,
+    ):
+        _write(line + end)


 def _find_sources(graph):
     """
     Determine a minimal set of nodes such that the entire graph is reachable
     """
-    pass
-
-
-def forest_str(graph, with_labels=True, sources=None, write=None,
-    ascii_only=False):
+    # For each connected part of the graph, choose at least
+    # one node as a starting point, preferably without a parent
+    if graph.is_directed():
+        # Choose one node from each SCC with minimum in_degree
+        sccs = list(nx.strongly_connected_components(graph))
+        # condensing the SCCs forms a dag, the nodes in this graph with
+        # 0 in-degree correspond to the SCCs from which the minimum set
+        # of nodes from which all other nodes can be reached.
+        scc_graph = nx.condensation(graph, sccs)
+        supernode_to_nodes = {sn: [] for sn in scc_graph.nodes()}
+        # Note: the order of mapping differs between pypy and cpython
+        # so we have to loop over graph nodes for consistency
+        mapping = scc_graph.graph["mapping"]
+        for n in graph.nodes:
+            sn = mapping[n]
+            supernode_to_nodes[sn].append(n)
+        sources = []
+        for sn in scc_graph.nodes():
+            if scc_graph.in_degree[sn] == 0:
+                scc = supernode_to_nodes[sn]
+                node = min(scc, key=lambda n: graph.in_degree[n])
+                sources.append(node)
+    else:
+        # For undirected graph, the entire graph will be reachable as
+        # long as we consider one node from every connected component
+        sources = [
+            min(cc, key=lambda n: graph.degree[n])
+            for cc in nx.connected_components(graph)
+        ]
+        sources = sorted(sources, key=lambda n: graph.degree[n])
+    return sources
+
+
+def forest_str(graph, with_labels=True, sources=None, write=None, ascii_only=False):
     """Creates a nice utf8 representation of a forest

     This function has been superseded by
@@ -433,7 +712,35 @@ def forest_str(graph, with_labels=True, sources=None, write=None,
         L-- 1
             L-- 2
     """
-    pass
+    msg = (
+        "\nforest_str is deprecated as of version 3.1 and will be removed "
+        "in version 3.3. Use generate_network_text or write_network_text "
+        "instead.\n"
+    )
+    warnings.warn(msg, DeprecationWarning)
+
+    if len(graph.nodes) > 0:
+        if not nx.is_forest(graph):
+            raise nx.NetworkXNotImplemented("input must be a forest or the empty graph")
+
+    printbuf = []
+    if write is None:
+        _write = printbuf.append
+    else:
+        _write = write
+
+    write_network_text(
+        graph,
+        _write,
+        with_labels=with_labels,
+        sources=sources,
+        ascii_only=ascii_only,
+        end="",
+    )
+
+    if write is None:
+        # Only return a string if the custom write function was not specified
+        return "\n".join(printbuf)


 def _parse_network_text(lines):
@@ -455,4 +762,189 @@ def _parse_network_text(lines):
     G: NetworkX graph
         The graph corresponding to the lines in network text format.
     """
-    pass
+    from itertools import chain
+    from typing import Any, NamedTuple, Union
+
+    class ParseStackFrame(NamedTuple):
+        node: Any
+        indent: int
+        has_vertical_child: int | None
+
+    initial_line_iter = iter(lines)
+
+    is_ascii = None
+    is_directed = None
+
+    ##############
+    # Initial Pass
+    ##############
+
+    # Do an initial pass over the lines to determine what type of graph it is.
+    # Remember what these lines were, so we can reiterate over them in the
+    # parsing pass.
+    initial_lines = []
+    try:
+        first_line = next(initial_line_iter)
+    except StopIteration:
+        ...
+    else:
+        initial_lines.append(first_line)
+        # The first character indicates if it is an ASCII or UTF graph
+        first_char = first_line[0]
+        if first_char in {
+            UtfBaseGlyphs.empty,
+            UtfBaseGlyphs.newtree_mid[0],
+            UtfBaseGlyphs.newtree_last[0],
+        }:
+            is_ascii = False
+        elif first_char in {
+            AsciiBaseGlyphs.empty,
+            AsciiBaseGlyphs.newtree_mid[0],
+            AsciiBaseGlyphs.newtree_last[0],
+        }:
+            is_ascii = True
+        else:
+            raise AssertionError(f"Unexpected first character: {first_char}")
+
+    if is_ascii:
+        directed_glyphs = AsciiDirectedGlyphs.as_dict()
+        undirected_glyphs = AsciiUndirectedGlyphs.as_dict()
+    else:
+        directed_glyphs = UtfDirectedGlyphs.as_dict()
+        undirected_glyphs = UtfUndirectedGlyphs.as_dict()
+
+    # For both directed / undirected glyphs, determine which glyphs never
+    # appear as substrings in the other undirected / directed glyphs.  Glyphs
+    # with this property unambiguously indicates if a graph is directed /
+    # undirected.
+    directed_items = set(directed_glyphs.values())
+    undirected_items = set(undirected_glyphs.values())
+    unambiguous_directed_items = []
+    for item in directed_items:
+        other_items = undirected_items
+        other_supersets = [other for other in other_items if item in other]
+        if not other_supersets:
+            unambiguous_directed_items.append(item)
+    unambiguous_undirected_items = []
+    for item in undirected_items:
+        other_items = directed_items
+        other_supersets = [other for other in other_items if item in other]
+        if not other_supersets:
+            unambiguous_undirected_items.append(item)
+
+    for line in initial_line_iter:
+        initial_lines.append(line)
+        if any(item in line for item in unambiguous_undirected_items):
+            is_directed = False
+            break
+        elif any(item in line for item in unambiguous_directed_items):
+            is_directed = True
+            break
+
+    if is_directed is None:
+        # Not enough information to determine, choose undirected by default
+        is_directed = False
+
+    glyphs = directed_glyphs if is_directed else undirected_glyphs
+
+    # the backedge symbol by itself can be ambiguous, but with spaces around it
+    # becomes unambiguous.
+    backedge_symbol = " " + glyphs["backedge"] + " "
+
+    # Reconstruct an iterator over all of the lines.
+    parsing_line_iter = chain(initial_lines, initial_line_iter)
+
+    ##############
+    # Parsing Pass
+    ##############
+
+    edges = []
+    nodes = []
+    is_empty = None
+
+    noparent = object()  # sentinel value
+
+    # keep a stack of previous nodes that could be parents of subsequent nodes
+    stack = [ParseStackFrame(noparent, -1, None)]
+
+    for line in parsing_line_iter:
+        if line == glyphs["empty"]:
+            # If the line is the empty glyph, we are done.
+            # There shouldn't be anything else after this.
+            is_empty = True
+            continue
+
+        if backedge_symbol in line:
+            # This line has one or more backedges, separate those out
+            node_part, backedge_part = line.split(backedge_symbol)
+            backedge_nodes = [u.strip() for u in backedge_part.split(", ")]
+            # Now the node can be parsed
+            node_part = node_part.rstrip()
+            prefix, node = node_part.rsplit(" ", 1)
+            node = node.strip()
+            # Add the backedges to the edge list
+            edges.extend([(u, node) for u in backedge_nodes])
+        else:
+            # No backedge, the tail of this line is the node
+            prefix, node = line.rsplit(" ", 1)
+            node = node.strip()
+
+        prev = stack.pop()
+
+        if node in glyphs["vertical_edge"]:
+            # Previous node is still the previous node, but we know it will
+            # have exactly one child, which will need to have its nesting level
+            # adjusted.
+            modified_prev = ParseStackFrame(
+                prev.node,
+                prev.indent,
+                True,
+            )
+            stack.append(modified_prev)
+            continue
+
+        # The length of the string before the node characters give us a hint
+        # about our nesting level. The only case where this doesn't work is
+        # when there are vertical chains, which is handled explicitly.
+        indent = len(prefix)
+        curr = ParseStackFrame(node, indent, None)
+
+        if prev.has_vertical_child:
+            # In this case we know prev must be the parent of our current line,
+            # so we don't have to search the stack. (which is good because the
+            # indentation check wouldn't work in this case).
+            ...
+        else:
+            # If the previous node nesting-level is greater than the current
+            # nodes nesting-level than the previous node was the end of a path,
+            # and is not our parent. We can safely pop nodes off the stack
+            # until we find one with a comparable nesting-level, which is our
+            # parent.
+            while curr.indent <= prev.indent:
+                prev = stack.pop()
+
+        if node == "...":
+            # The current previous node is no longer a valid parent,
+            # keep it popped from the stack.
+            stack.append(prev)
+        else:
+            # The previous and current nodes may still be parents, so add them
+            # back onto the stack.
+            stack.append(prev)
+            stack.append(curr)
+
+            # Add the node and the edge to its parent to the node / edge lists.
+            nodes.append(curr.node)
+            if prev.node is not noparent:
+                edges.append((prev.node, curr.node))
+
+    if is_empty:
+        # Sanity check
+        assert len(nodes) == 0
+
+    # Reconstruct the graph
+    cls = nx.DiGraph if is_directed else nx.Graph
+    new = cls()
+    new.add_nodes_from(nodes)
+    new.add_edges_from(edges)
+    return new
diff --git a/networkx/relabel.py b/networkx/relabel.py
index c92e95f46..4b870f726 100644
--- a/networkx/relabel.py
+++ b/networkx/relabel.py
@@ -1,9 +1,11 @@
 import networkx as nx
-__all__ = ['convert_node_labels_to_integers', 'relabel_nodes']

+__all__ = ["convert_node_labels_to_integers", "relabel_nodes"]

-@nx._dispatchable(preserve_all_attrs=True, mutates_input={'not copy': 2},
-    returns_graph=True)
+
+@nx._dispatchable(
+    preserve_all_attrs=True, mutates_input={"not copy": 2}, returns_graph=True
+)
 def relabel_nodes(G, mapping, copy=True):
     """Relabel the nodes of the graph G according to a given mapping.

@@ -115,12 +117,116 @@ def relabel_nodes(G, mapping, copy=True):
     --------
     convert_node_labels_to_integers
     """
-    pass
+    # you can pass any callable e.g. f(old_label) -> new_label or
+    # e.g. str(old_label) -> new_label, but we'll just make a dictionary here regardless
+    m = {n: mapping(n) for n in G} if callable(mapping) else mapping
+
+    if copy:
+        return _relabel_copy(G, m)
+    else:
+        return _relabel_inplace(G, m)
+
+
+def _relabel_inplace(G, mapping):
+    if len(mapping.keys() & mapping.values()) > 0:
+        # labels sets overlap
+        # can we topological sort and still do the relabeling?
+        D = nx.DiGraph(list(mapping.items()))
+        D.remove_edges_from(nx.selfloop_edges(D))
+        try:
+            nodes = reversed(list(nx.topological_sort(D)))
+        except nx.NetworkXUnfeasible as err:
+            raise nx.NetworkXUnfeasible(
+                "The node label sets are overlapping and no ordering can "
+                "resolve the mapping. Use copy=True."
+            ) from err
+    else:
+        # non-overlapping label sets, sort them in the order of G nodes
+        nodes = [n for n in G if n in mapping]
+
+    multigraph = G.is_multigraph()
+    directed = G.is_directed()
+
+    for old in nodes:
+        # Test that old is in both mapping and G, otherwise ignore.
+        try:
+            new = mapping[old]
+            G.add_node(new, **G.nodes[old])
+        except KeyError:
+            continue
+        if new == old:
+            continue
+        if multigraph:
+            new_edges = [
+                (new, new if old == target else target, key, data)
+                for (_, target, key, data) in G.edges(old, data=True, keys=True)
+            ]
+            if directed:
+                new_edges += [
+                    (new if old == source else source, new, key, data)
+                    for (source, _, key, data) in G.in_edges(old, data=True, keys=True)
+                ]
+            # Ensure new edges won't overwrite existing ones
+            seen = set()
+            for i, (source, target, key, data) in enumerate(new_edges):
+                if target in G[source] and key in G[source][target]:
+                    new_key = 0 if not isinstance(key, int | float) else key
+                    while new_key in G[source][target] or (target, new_key) in seen:
+                        new_key += 1
+                    new_edges[i] = (source, target, new_key, data)
+                    seen.add((target, new_key))
+        else:
+            new_edges = [
+                (new, new if old == target else target, data)
+                for (_, target, data) in G.edges(old, data=True)
+            ]
+            if directed:
+                new_edges += [
+                    (new if old == source else source, new, data)
+                    for (source, _, data) in G.in_edges(old, data=True)
+                ]
+        G.remove_node(old)
+        G.add_edges_from(new_edges)
+    return G
+
+
+def _relabel_copy(G, mapping):
+    H = G.__class__()
+    H.add_nodes_from(mapping.get(n, n) for n in G)
+    H._node.update((mapping.get(n, n), d.copy()) for n, d in G.nodes.items())
+    if G.is_multigraph():
+        new_edges = [
+            (mapping.get(n1, n1), mapping.get(n2, n2), k, d.copy())
+            for (n1, n2, k, d) in G.edges(keys=True, data=True)
+        ]
+
+        # check for conflicting edge-keys
+        undirected = not G.is_directed()
+        seen_edges = set()
+        for i, (source, target, key, data) in enumerate(new_edges):
+            while (source, target, key) in seen_edges:
+                if not isinstance(key, int | float):
+                    key = 0
+                key += 1
+            seen_edges.add((source, target, key))
+            if undirected:
+                seen_edges.add((target, source, key))
+            new_edges[i] = (source, target, key, data)
+
+        H.add_edges_from(new_edges)
+    else:
+        H.add_edges_from(
+            (mapping.get(n1, n1), mapping.get(n2, n2), d.copy())
+            for (n1, n2, d) in G.edges(data=True)
+        )
+    H.graph.update(G.graph)
+    return H


 @nx._dispatchable(preserve_all_attrs=True, returns_graph=True)
-def convert_node_labels_to_integers(G, first_label=0, ordering='default',
-    label_attribute=None):
+def convert_node_labels_to_integers(
+    G, first_label=0, ordering="default", label_attribute=None
+):
     """Returns a copy of the graph G with the nodes relabeled using
     consecutive integers.

@@ -155,4 +261,25 @@ def convert_node_labels_to_integers(G, first_label=0, ordering='default',
     --------
     relabel_nodes
     """
-    pass
+    N = G.number_of_nodes() + first_label
+    if ordering == "default":
+        mapping = dict(zip(G.nodes(), range(first_label, N)))
+    elif ordering == "sorted":
+        nlist = sorted(G.nodes())
+        mapping = dict(zip(nlist, range(first_label, N)))
+    elif ordering == "increasing degree":
+        dv_pairs = [(d, n) for (n, d) in G.degree()]
+        dv_pairs.sort()  # in-place sort from lowest to highest degree
+        mapping = dict(zip([n for d, n in dv_pairs], range(first_label, N)))
+    elif ordering == "decreasing degree":
+        dv_pairs = [(d, n) for (n, d) in G.degree()]
+        dv_pairs.sort()  # in-place sort from lowest to highest degree
+        dv_pairs.reverse()
+        mapping = dict(zip([n for d, n in dv_pairs], range(first_label, N)))
+    else:
+        raise nx.NetworkXError(f"Unknown node ordering: {ordering}")
+    H = relabel_nodes(G, mapping)
+    # create node attribute with the old label
+    if label_attribute is not None:
+        nx.set_node_attributes(H, {v: k for k, v in mapping.items()}, label_attribute)
+    return H
diff --git a/networkx/utils/backends.py b/networkx/utils/backends.py
index 692ebe3c7..b48798d80 100644
--- a/networkx/utils/backends.py
+++ b/networkx/utils/backends.py
@@ -190,20 +190,23 @@ Notes
 -   A backend graph instance may have a ``G.__networkx_cache__`` dict to enable
     caching, and care should be taken to clear the cache when appropriate.
 """
+
 import inspect
 import itertools
 import os
 import warnings
 from functools import partial
 from importlib.metadata import entry_points
+
 import networkx as nx
+
 from .decorators import argmap
-__all__ = ['_dispatchable']
+
+__all__ = ["_dispatchable"]


 def _do_nothing():
     """This does nothing at all, yet it helps turn `_dispatchable` into functions."""
-    pass


 def _get_backends(group, *, load_and_call=False):
@@ -228,21 +231,77 @@ def _get_backends(group, *, load_and_call=False):
     The `nx-loopback` backend is removed if it exists, as it is only available during testing.
     A warning is displayed if an error occurs while loading a backend.
     """
-    pass
+    items = entry_points(group=group)
+    rv = {}
+    for ep in items:
+        if ep.name in rv:
+            warnings.warn(
+                f"networkx backend defined more than once: {ep.name}",
+                RuntimeWarning,
+                stacklevel=2,
+            )
+        elif load_and_call:
+            try:
+                rv[ep.name] = ep.load()()
+            except Exception as exc:
+                warnings.warn(
+                    f"Error encountered when loading info for backend {ep.name}: {exc}",
+                    RuntimeWarning,
+                    stacklevel=2,
+                )
+        else:
+            rv[ep.name] = ep
+    rv.pop("nx-loopback", None)
+    return rv
+

+backends = _get_backends("networkx.backends")
+backend_info = _get_backends("networkx.backend_info", load_and_call=True)

-backends = _get_backends('networkx.backends')
-backend_info = _get_backends('networkx.backend_info', load_and_call=True)
+# We must import from config after defining `backends` above
 from .configs import Config, config
-config.backend_priority = [x.strip() for x in os.environ.get(
-    'NETWORKX_BACKEND_PRIORITY', os.environ.get(
-    'NETWORKX_AUTOMATIC_BACKENDS', '')).split(',') if x.strip()]
-config.backends = Config(**{backend: ((cfg if isinstance((cfg := info[
-    'default_config']), Config) else Config(**cfg)) if 'default_config' in
-    info else Config()) for backend, info in backend_info.items()})
-type(config.backends
-    ).__doc__ = 'All installed NetworkX backends and their configs.'
-_loaded_backends = {}
+
+# Get default configuration from environment variables at import time
+config.backend_priority = [
+    x.strip()
+    for x in os.environ.get(
+        "NETWORKX_BACKEND_PRIORITY",
+        os.environ.get("NETWORKX_AUTOMATIC_BACKENDS", ""),
+    ).split(",")
+    if x.strip()
+]
+# Initialize default configuration for backends
+config.backends = Config(
+    **{
+        backend: (
+            cfg if isinstance(cfg := info["default_config"], Config) else Config(**cfg)
+        )
+        if "default_config" in info
+        else Config()
+        for backend, info in backend_info.items()
+    }
+)
+type(config.backends).__doc__ = "All installed NetworkX backends and their configs."
+
+# Load and cache backends on-demand
+_loaded_backends = {}  # type: ignore[var-annotated]
+
+
+def _always_run(name, args, kwargs):
+    return True
+
+
+def _load_backend(backend_name):
+    if backend_name in _loaded_backends:
+        return _loaded_backends[backend_name]
+    rv = _loaded_backends[backend_name] = backends[backend_name].load()
+    if not hasattr(rv, "can_run"):
+        rv.can_run = _always_run
+    if not hasattr(rv, "should_run"):
+        rv.should_run = _always_run
+    return rv
+
+
 _registered_algorithms = {}


@@ -259,14 +318,27 @@ class _dispatchable:
     For example: `PYTHONPATH=. pytest --backend graphblas --fallback-to-nx`
     Future work: add configuration to control these.
     """
-    _is_testing = False
-    _fallback_to_nx = os.environ.get('NETWORKX_FALLBACK_TO_NX', 'true').strip(
-        ).lower() == 'true'

-    def __new__(cls, func=None, *, name=None, graphs='G', edge_attrs=None,
-        node_attrs=None, preserve_edge_attrs=False, preserve_node_attrs=
-        False, preserve_graph_attrs=False, preserve_all_attrs=False,
-        mutates_input=False, returns_graph=False):
+    _is_testing = False
+    _fallback_to_nx = (
+        os.environ.get("NETWORKX_FALLBACK_TO_NX", "true").strip().lower() == "true"
+    )
+
+    def __new__(
+        cls,
+        func=None,
+        *,
+        name=None,
+        graphs="G",
+        edge_attrs=None,
+        node_attrs=None,
+        preserve_edge_attrs=False,
+        preserve_node_attrs=False,
+        preserve_graph_attrs=False,
+        preserve_all_attrs=False,
+        mutates_input=False,
+        returns_graph=False,
+    ):
         """A decorator that makes certain input graph types dispatch to ``func``'s
         backend implementation.

@@ -359,31 +431,46 @@ class _dispatchable:
             functions that return graphs.
         """
         if func is None:
-            return partial(_dispatchable, name=name, graphs=graphs,
-                edge_attrs=edge_attrs, node_attrs=node_attrs,
+            return partial(
+                _dispatchable,
+                name=name,
+                graphs=graphs,
+                edge_attrs=edge_attrs,
+                node_attrs=node_attrs,
                 preserve_edge_attrs=preserve_edge_attrs,
                 preserve_node_attrs=preserve_node_attrs,
                 preserve_graph_attrs=preserve_graph_attrs,
-                preserve_all_attrs=preserve_all_attrs, mutates_input=
-                mutates_input, returns_graph=returns_graph)
+                preserve_all_attrs=preserve_all_attrs,
+                mutates_input=mutates_input,
+                returns_graph=returns_graph,
+            )
         if isinstance(func, str):
-            raise TypeError("'name' and 'graphs' must be passed by keyword"
-                ) from None
+            raise TypeError("'name' and 'graphs' must be passed by keyword") from None
+        # If name not provided, use the name of the function
         if name is None:
             name = func.__name__
+
         self = object.__new__(cls)
+
+        # standard function-wrapping stuff
+        # __annotations__ not used
         self.__name__ = func.__name__
+        # self.__doc__ = func.__doc__  # __doc__ handled as cached property
         self.__defaults__ = func.__defaults__
+        # We "magically" add `backend=` keyword argument to allow backend to be specified
         if func.__kwdefaults__:
-            self.__kwdefaults__ = {**func.__kwdefaults__, 'backend': None}
+            self.__kwdefaults__ = {**func.__kwdefaults__, "backend": None}
         else:
-            self.__kwdefaults__ = {'backend': None}
+            self.__kwdefaults__ = {"backend": None}
         self.__module__ = func.__module__
         self.__qualname__ = func.__qualname__
         self.__dict__.update(func.__dict__)
         self.__wrapped__ = func
+
+        # Supplement docstring with backend info; compute and cache when needed
         self._orig_doc = func.__doc__
         self._cached_doc = None
+
         self.orig_func = func
         self.name = name
         self.edge_attrs = edge_attrs
@@ -392,62 +479,91 @@ class _dispatchable:
         self.preserve_node_attrs = preserve_node_attrs or preserve_all_attrs
         self.preserve_graph_attrs = preserve_graph_attrs or preserve_all_attrs
         self.mutates_input = mutates_input
+        # Keep `returns_graph` private for now, b/c we may extend info on return types
         self._returns_graph = returns_graph
+
         if edge_attrs is not None and not isinstance(edge_attrs, str | dict):
             raise TypeError(
-                f'Bad type for edge_attrs: {type(edge_attrs)}. Expected str or dict.'
-                ) from None
+                f"Bad type for edge_attrs: {type(edge_attrs)}. Expected str or dict."
+            ) from None
         if node_attrs is not None and not isinstance(node_attrs, str | dict):
             raise TypeError(
-                f'Bad type for node_attrs: {type(node_attrs)}. Expected str or dict.'
-                ) from None
+                f"Bad type for node_attrs: {type(node_attrs)}. Expected str or dict."
+            ) from None
         if not isinstance(self.preserve_edge_attrs, bool | str | dict):
             raise TypeError(
-                f'Bad type for preserve_edge_attrs: {type(self.preserve_edge_attrs)}. Expected bool, str, or dict.'
-                ) from None
+                f"Bad type for preserve_edge_attrs: {type(self.preserve_edge_attrs)}."
+                " Expected bool, str, or dict."
+            ) from None
         if not isinstance(self.preserve_node_attrs, bool | str | dict):
             raise TypeError(
-                f'Bad type for preserve_node_attrs: {type(self.preserve_node_attrs)}. Expected bool, str, or dict.'
-                ) from None
+                f"Bad type for preserve_node_attrs: {type(self.preserve_node_attrs)}."
+                " Expected bool, str, or dict."
+            ) from None
         if not isinstance(self.preserve_graph_attrs, bool | set):
             raise TypeError(
-                f'Bad type for preserve_graph_attrs: {type(self.preserve_graph_attrs)}. Expected bool or set.'
-                ) from None
+                f"Bad type for preserve_graph_attrs: {type(self.preserve_graph_attrs)}."
+                " Expected bool or set."
+            ) from None
         if not isinstance(self.mutates_input, bool | dict):
             raise TypeError(
-                f'Bad type for mutates_input: {type(self.mutates_input)}. Expected bool or dict.'
-                ) from None
+                f"Bad type for mutates_input: {type(self.mutates_input)}."
+                " Expected bool or dict."
+            ) from None
         if not isinstance(self._returns_graph, bool):
             raise TypeError(
-                f'Bad type for returns_graph: {type(self._returns_graph)}. Expected bool.'
-                ) from None
+                f"Bad type for returns_graph: {type(self._returns_graph)}."
+                " Expected bool."
+            ) from None
+
         if isinstance(graphs, str):
             graphs = {graphs: 0}
         elif graphs is None:
             pass
         elif not isinstance(graphs, dict):
             raise TypeError(
-                f'Bad type for graphs: {type(graphs)}. Expected str or dict.'
-                ) from None
+                f"Bad type for graphs: {type(graphs)}. Expected str or dict."
+            ) from None
         elif len(graphs) == 0:
-            raise KeyError("'graphs' must contain at least one variable name"
-                ) from None
+            raise KeyError("'graphs' must contain at least one variable name") from None
+
+        # This dict comprehension is complicated for better performance; equivalent shown below.
         self.optional_graphs = set()
         self.list_graphs = set()
         if graphs is None:
             self.graphs = {}
         else:
-            self.graphs = {(self.optional_graphs.add((val := k[:-1])) or
-                val if (last := k[-1]) == '?' else self.list_graphs.add((
-                val := k[1:-1])) or val if last == ']' else k): v for k, v in
-                graphs.items()}
+            self.graphs = {
+                self.optional_graphs.add(val := k[:-1]) or val
+                if (last := k[-1]) == "?"
+                else self.list_graphs.add(val := k[1:-1]) or val
+                if last == "]"
+                else k: v
+                for k, v in graphs.items()
+            }
+        # The above is equivalent to:
+        # self.optional_graphs = {k[:-1] for k in graphs if k[-1] == "?"}
+        # self.list_graphs = {k[1:-1] for k in graphs if k[-1] == "]"}
+        # self.graphs = {k[:-1] if k[-1] == "?" else k: v for k, v in graphs.items()}
+
+        # Compute and cache the signature on-demand
         self._sig = None
-        self.backends = {backend for backend, info in backend_info.items() if
-            'functions' in info and name in info['functions']}
+
+        # Which backends implement this function?
+        self.backends = {
+            backend
+            for backend, info in backend_info.items()
+            if "functions" in info and name in info["functions"]
+        }
+
         if name in _registered_algorithms:
             raise KeyError(
-                f'Algorithm already exists in dispatch registry: {name}'
-                ) from None
+                f"Algorithm already exists in dispatch registry: {name}"
+            ) from None
+        # Use the magic of `argmap` to turn `self` into a function. This does result
+        # in small additional overhead compared to calling `_dispatchable` directly,
+        # but `argmap` has the magical property that it can stack with other `argmap`
+        # decorators "for free". Being a function is better for REPRs and type-checkers.
         self = argmap(_do_nothing)(self)
         _registered_algorithms[name] = self
         return self
@@ -457,6 +573,7 @@ class _dispatchable:
         """If the cached documentation exists, it is returned.
         Otherwise, the documentation is generated using _make_doc() method,
         cached, and then returned."""
+
         if (rv := self._cached_doc) is not None:
             return rv
         rv = self._cached_doc = self._make_doc()
@@ -466,6 +583,7 @@ class _dispatchable:
     def __doc__(self, val):
         """Sets the original documentation to the given value and resets the
         cached documentation."""
+
         self._orig_doc = val
         self._cached_doc = None

@@ -473,131 +591,245 @@ class _dispatchable:
     def __signature__(self):
         """Return the signature of the original function, with the addition of
         the `backend` and `backend_kwargs` parameters."""
+
         if self._sig is None:
             sig = inspect.signature(self.orig_func)
-            if not any(p.kind == inspect.Parameter.VAR_KEYWORD for p in sig
-                .parameters.values()):
-                sig = sig.replace(parameters=[*sig.parameters.values(),
-                    inspect.Parameter('backend', inspect.Parameter.
-                    KEYWORD_ONLY, default=None), inspect.Parameter(
-                    'backend_kwargs', inspect.Parameter.VAR_KEYWORD)])
+            # `backend` is now a reserved argument used by dispatching.
+            # assert "backend" not in sig.parameters
+            if not any(
+                p.kind == inspect.Parameter.VAR_KEYWORD for p in sig.parameters.values()
+            ):
+                sig = sig.replace(
+                    parameters=[
+                        *sig.parameters.values(),
+                        inspect.Parameter(
+                            "backend", inspect.Parameter.KEYWORD_ONLY, default=None
+                        ),
+                        inspect.Parameter(
+                            "backend_kwargs", inspect.Parameter.VAR_KEYWORD
+                        ),
+                    ]
+                )
             else:
                 *parameters, var_keyword = sig.parameters.values()
-                sig = sig.replace(parameters=[*parameters, inspect.
-                    Parameter('backend', inspect.Parameter.KEYWORD_ONLY,
-                    default=None), var_keyword])
+                sig = sig.replace(
+                    parameters=[
+                        *parameters,
+                        inspect.Parameter(
+                            "backend", inspect.Parameter.KEYWORD_ONLY, default=None
+                        ),
+                        var_keyword,
+                    ]
+                )
             self._sig = sig
         return self._sig

     def __call__(self, /, *args, backend=None, **kwargs):
         """Returns the result of the original function, or the backend function if
         the backend is specified and that backend implements `func`."""
+
         if not backends:
+            # Fast path if no backends are installed
             return self.orig_func(*args, **kwargs)
+
+        # Use `backend_name` in this function instead of `backend`
         backend_name = backend
         if backend_name is not None and backend_name not in backends:
-            raise ImportError(f'Unable to load backend: {backend_name}')
+            raise ImportError(f"Unable to load backend: {backend_name}")
+
         graphs_resolved = {}
         for gname, pos in self.graphs.items():
             if pos < len(args):
                 if gname in kwargs:
-                    raise TypeError(
-                        f'{self.name}() got multiple values for {gname!r}')
+                    raise TypeError(f"{self.name}() got multiple values for {gname!r}")
                 val = args[pos]
             elif gname in kwargs:
                 val = kwargs[gname]
             elif gname not in self.optional_graphs:
                 raise TypeError(
-                    f'{self.name}() missing required graph argument: {gname}')
+                    f"{self.name}() missing required graph argument: {gname}"
+                )
             else:
                 continue
             if val is None:
                 if gname not in self.optional_graphs:
                     raise TypeError(
-                        f'{self.name}() required graph argument {gname!r} is None; must be a graph'
-                        )
+                        f"{self.name}() required graph argument {gname!r} is None; must be a graph"
+                    )
             else:
                 graphs_resolved[gname] = val
+
+        # Alternative to the above that does not check duplicated args or missing required graphs.
+        # graphs_resolved = {
+        #     val
+        #     for gname, pos in self.graphs.items()
+        #     if (val := args[pos] if pos < len(args) else kwargs.get(gname)) is not None
+        # }
+
+        # Check if any graph comes from a backend
         if self.list_graphs:
+            # Make sure we don't lose values by consuming an iterator
             args = list(args)
-            for gname in (self.list_graphs & graphs_resolved.keys()):
+            for gname in self.list_graphs & graphs_resolved.keys():
                 val = list(graphs_resolved[gname])
                 graphs_resolved[gname] = val
                 if gname in kwargs:
                     kwargs[gname] = val
                 else:
                     args[self.graphs[gname]] = val
-            has_backends = any(hasattr(g, '__networkx_backend__') if gname
-                 not in self.list_graphs else any(hasattr(g2,
-                '__networkx_backend__') for g2 in g) for gname, g in
-                graphs_resolved.items())
+
+            has_backends = any(
+                hasattr(g, "__networkx_backend__")
+                if gname not in self.list_graphs
+                else any(hasattr(g2, "__networkx_backend__") for g2 in g)
+                for gname, g in graphs_resolved.items()
+            )
             if has_backends:
-                graph_backend_names = {getattr(g, '__networkx_backend__',
-                    'networkx') for gname, g in graphs_resolved.items() if 
-                    gname not in self.list_graphs}
-                for gname in (self.list_graphs & graphs_resolved.keys()):
-                    graph_backend_names.update(getattr(g,
-                        '__networkx_backend__', 'networkx') for g in
-                        graphs_resolved[gname])
+                graph_backend_names = {
+                    getattr(g, "__networkx_backend__", "networkx")
+                    for gname, g in graphs_resolved.items()
+                    if gname not in self.list_graphs
+                }
+                for gname in self.list_graphs & graphs_resolved.keys():
+                    graph_backend_names.update(
+                        getattr(g, "__networkx_backend__", "networkx")
+                        for g in graphs_resolved[gname]
+                    )
         else:
-            has_backends = any(hasattr(g, '__networkx_backend__') for g in
-                graphs_resolved.values())
+            has_backends = any(
+                hasattr(g, "__networkx_backend__") for g in graphs_resolved.values()
+            )
             if has_backends:
-                graph_backend_names = {getattr(g, '__networkx_backend__',
-                    'networkx') for g in graphs_resolved.values()}
+                graph_backend_names = {
+                    getattr(g, "__networkx_backend__", "networkx")
+                    for g in graphs_resolved.values()
+                }
+
         backend_priority = config.backend_priority
         if self._is_testing and backend_priority and backend_name is None:
-            return self._convert_and_call_for_tests(backend_priority[0],
-                args, kwargs, fallback_to_nx=self._fallback_to_nx)
+            # Special path if we are running networkx tests with a backend.
+            # This even runs for (and handles) functions that mutate input graphs.
+            return self._convert_and_call_for_tests(
+                backend_priority[0],
+                args,
+                kwargs,
+                fallback_to_nx=self._fallback_to_nx,
+            )
+
         if has_backends:
-            backend_names = graph_backend_names - {'networkx'}
+            # Dispatchable graphs found! Dispatch to backend function.
+            # We don't handle calls with different backend graphs yet,
+            # but we may be able to convert additional networkx graphs.
+            backend_names = graph_backend_names - {"networkx"}
             if len(backend_names) != 1:
+                # Future work: convert between backends and run if multiple backends found
                 raise TypeError(
-                    f'{self.name}() graphs must all be from the same backend, found {backend_names}'
-                    )
+                    f"{self.name}() graphs must all be from the same backend, found {backend_names}"
+                )
             [graph_backend_name] = backend_names
             if backend_name is not None and backend_name != graph_backend_name:
+                # Future work: convert between backends to `backend_name` backend
                 raise TypeError(
-                    f'{self.name}() is unable to convert graph from backend {graph_backend_name!r} to the specified backend {backend_name!r}.'
-                    )
+                    f"{self.name}() is unable to convert graph from backend {graph_backend_name!r} "
+                    f"to the specified backend {backend_name!r}."
+                )
             if graph_backend_name not in backends:
-                raise ImportError(
-                    f'Unable to load backend: {graph_backend_name}')
-            if ('networkx' in graph_backend_names and graph_backend_name not in
-                backend_priority):
+                raise ImportError(f"Unable to load backend: {graph_backend_name}")
+            if (
+                "networkx" in graph_backend_names
+                and graph_backend_name not in backend_priority
+            ):
+                # Not configured to convert networkx graphs to this backend
                 raise TypeError(
-                    f'Unable to convert inputs and run {self.name}. {self.name}() has networkx and {graph_backend_name} graphs, but NetworkX is not configured to automatically convert graphs from networkx to {graph_backend_name}.'
-                    )
+                    f"Unable to convert inputs and run {self.name}. "
+                    f"{self.name}() has networkx and {graph_backend_name} graphs, but NetworkX is not "
+                    f"configured to automatically convert graphs from networkx to {graph_backend_name}."
+                )
             backend = _load_backend(graph_backend_name)
             if hasattr(backend, self.name):
-                if 'networkx' in graph_backend_names:
-                    return self._convert_and_call(graph_backend_name, args,
-                        kwargs, fallback_to_nx=self._fallback_to_nx)
+                if "networkx" in graph_backend_names:
+                    # We need to convert networkx graphs to backend graphs.
+                    # There is currently no need to check `self.mutates_input` here.
+                    return self._convert_and_call(
+                        graph_backend_name,
+                        args,
+                        kwargs,
+                        fallback_to_nx=self._fallback_to_nx,
+                    )
+                # All graphs are backend graphs--no need to convert!
                 return getattr(backend, self.name)(*args, **kwargs)
+            # Future work: try to convert and run with other backends in backend_priority
             raise nx.NetworkXNotImplemented(
-                f"'{self.name}' not implemented by {graph_backend_name}")
+                f"'{self.name}' not implemented by {graph_backend_name}"
+            )
+
+        # If backend was explicitly given by the user, so we need to use it no matter what
         if backend_name is not None:
-            return self._convert_and_call(backend_name, args, kwargs,
-                fallback_to_nx=False)
-        if not self._returns_graph and (not self.mutates_input or 
-            isinstance(self.mutates_input, dict) and any(not (args[arg_pos] if
-            len(args) > arg_pos else kwargs.get(arg_name[4:], True)) if
-            arg_name.startswith('not ') else (args[arg_pos] if len(args) >
-            arg_pos else kwargs.get(arg_name)) is not None for arg_name,
-            arg_pos in self.mutates_input.items())):
+            return self._convert_and_call(
+                backend_name, args, kwargs, fallback_to_nx=False
+            )
+
+        # Only networkx graphs; try to convert and run with a backend with automatic
+        # conversion, but don't do this by default for graph generators or loaders,
+        # or if the functions mutates an input graph or returns a graph.
+        # Only convert and run if `backend.should_run(...)` returns True.
+        if (
+            not self._returns_graph
+            and (
+                not self.mutates_input
+                or isinstance(self.mutates_input, dict)
+                # If `mutates_input` begins with "not ", then assume the argument is boolean,
+                # otherwise treat it as a node or edge attribute if it's not None.
+                and any(
+                    not (
+                        args[arg_pos]
+                        if len(args) > arg_pos
+                        else kwargs.get(arg_name[4:], True)
+                    )
+                    if arg_name.startswith("not ")
+                    else (
+                        args[arg_pos] if len(args) > arg_pos else kwargs.get(arg_name)
+                    )
+                    is not None
+                    for arg_name, arg_pos in self.mutates_input.items()
+                )
+            )
+        ):
+            # Should we warn or log if we don't convert b/c the input will be mutated?
             for backend_name in backend_priority:
                 if self._should_backend_run(backend_name, *args, **kwargs):
-                    return self._convert_and_call(backend_name, args,
-                        kwargs, fallback_to_nx=self._fallback_to_nx)
+                    return self._convert_and_call(
+                        backend_name,
+                        args,
+                        kwargs,
+                        fallback_to_nx=self._fallback_to_nx,
+                    )
+        # Default: run with networkx on networkx inputs
         return self.orig_func(*args, **kwargs)

     def _can_backend_run(self, backend_name, /, *args, **kwargs):
         """Can the specified backend run this algorithm with these arguments?"""
-        pass
+        backend = _load_backend(backend_name)
+        # `backend.can_run` and `backend.should_run` may return strings that describe
+        # why they can't or shouldn't be run. We plan to use the strings in the future.
+        return (
+            hasattr(backend, self.name)
+            and (can_run := backend.can_run(self.name, args, kwargs))
+            and not isinstance(can_run, str)
+        )

     def _should_backend_run(self, backend_name, /, *args, **kwargs):
         """Can/should the specified backend run this algorithm with these arguments?"""
-        pass
+        backend = _load_backend(backend_name)
+        # `backend.can_run` and `backend.should_run` may return strings that describe
+        # why they can't or shouldn't be run. We plan to use the strings in the future.
+        return (
+            hasattr(backend, self.name)
+            and (can_run := backend.can_run(self.name, args, kwargs))
+            and not isinstance(can_run, str)
+            and (should_run := backend.should_run(self.name, args, kwargs))
+            and not isinstance(should_run, str)
+        )

     def _convert_arguments(self, backend_name, args, kwargs, *, use_cache):
         """Convert graph arguments to the specified backend.
@@ -606,22 +838,686 @@ class _dispatchable:
         -------
         args tuple and kwargs dict
         """
-        pass
+        bound = self.__signature__.bind(*args, **kwargs)
+        bound.apply_defaults()
+        if not self.graphs:
+            bound_kwargs = bound.kwargs
+            del bound_kwargs["backend"]
+            return bound.args, bound_kwargs
+        # Convert graphs into backend graph-like object
+        # Include the edge and/or node labels if provided to the algorithm
+        preserve_edge_attrs = self.preserve_edge_attrs
+        edge_attrs = self.edge_attrs
+        if preserve_edge_attrs is False:
+            # e.g. `preserve_edge_attrs=False`
+            pass
+        elif preserve_edge_attrs is True:
+            # e.g. `preserve_edge_attrs=True`
+            edge_attrs = None
+        elif isinstance(preserve_edge_attrs, str):
+            if bound.arguments[preserve_edge_attrs] is True or callable(
+                bound.arguments[preserve_edge_attrs]
+            ):
+                # e.g. `preserve_edge_attrs="attr"` and `func(attr=True)`
+                # e.g. `preserve_edge_attrs="attr"` and `func(attr=myfunc)`
+                preserve_edge_attrs = True
+                edge_attrs = None
+            elif bound.arguments[preserve_edge_attrs] is False and (
+                isinstance(edge_attrs, str)
+                and edge_attrs == preserve_edge_attrs
+                or isinstance(edge_attrs, dict)
+                and preserve_edge_attrs in edge_attrs
+            ):
+                # e.g. `preserve_edge_attrs="attr"` and `func(attr=False)`
+                # Treat `False` argument as meaning "preserve_edge_data=False"
+                # and not `False` as the edge attribute to use.
+                preserve_edge_attrs = False
+                edge_attrs = None
+            else:
+                # e.g. `preserve_edge_attrs="attr"` and `func(attr="weight")`
+                preserve_edge_attrs = False
+        # Else: e.g. `preserve_edge_attrs={"G": {"weight": 1}}`

-    def _convert_and_call(self, backend_name, args, kwargs, *,
-        fallback_to_nx=False):
-        """Call this dispatchable function with a backend, converting graphs if necessary."""
-        pass
+        if edge_attrs is None:
+            # May have been set to None above b/c all attributes are preserved
+            pass
+        elif isinstance(edge_attrs, str):
+            if edge_attrs[0] == "[":
+                # e.g. `edge_attrs="[edge_attributes]"` (argument of list of attributes)
+                # e.g. `func(edge_attributes=["foo", "bar"])`
+                edge_attrs = {
+                    edge_attr: 1 for edge_attr in bound.arguments[edge_attrs[1:-1]]
+                }
+            elif callable(bound.arguments[edge_attrs]):
+                # e.g. `edge_attrs="weight"` and `func(weight=myfunc)`
+                preserve_edge_attrs = True
+                edge_attrs = None
+            elif bound.arguments[edge_attrs] is not None:
+                # e.g. `edge_attrs="weight"` and `func(weight="foo")` (default of 1)
+                edge_attrs = {bound.arguments[edge_attrs]: 1}
+            elif self.name == "to_numpy_array" and hasattr(
+                bound.arguments["dtype"], "names"
+            ):
+                # Custom handling: attributes may be obtained from `dtype`
+                edge_attrs = {
+                    edge_attr: 1 for edge_attr in bound.arguments["dtype"].names
+                }
+            else:
+                # e.g. `edge_attrs="weight"` and `func(weight=None)`
+                edge_attrs = None
+        else:
+            # e.g. `edge_attrs={"attr": "default"}` and `func(attr="foo", default=7)`
+            # e.g. `edge_attrs={"attr": 0}` and `func(attr="foo")`
+            edge_attrs = {
+                edge_attr: bound.arguments.get(val, 1) if isinstance(val, str) else val
+                for key, val in edge_attrs.items()
+                if (edge_attr := bound.arguments[key]) is not None
+            }
+
+        preserve_node_attrs = self.preserve_node_attrs
+        node_attrs = self.node_attrs
+        if preserve_node_attrs is False:
+            # e.g. `preserve_node_attrs=False`
+            pass
+        elif preserve_node_attrs is True:
+            # e.g. `preserve_node_attrs=True`
+            node_attrs = None
+        elif isinstance(preserve_node_attrs, str):
+            if bound.arguments[preserve_node_attrs] is True or callable(
+                bound.arguments[preserve_node_attrs]
+            ):
+                # e.g. `preserve_node_attrs="attr"` and `func(attr=True)`
+                # e.g. `preserve_node_attrs="attr"` and `func(attr=myfunc)`
+                preserve_node_attrs = True
+                node_attrs = None
+            elif bound.arguments[preserve_node_attrs] is False and (
+                isinstance(node_attrs, str)
+                and node_attrs == preserve_node_attrs
+                or isinstance(node_attrs, dict)
+                and preserve_node_attrs in node_attrs
+            ):
+                # e.g. `preserve_node_attrs="attr"` and `func(attr=False)`
+                # Treat `False` argument as meaning "preserve_node_data=False"
+                # and not `False` as the node attribute to use. Is this used?
+                preserve_node_attrs = False
+                node_attrs = None
+            else:
+                # e.g. `preserve_node_attrs="attr"` and `func(attr="weight")`
+                preserve_node_attrs = False
+        # Else: e.g. `preserve_node_attrs={"G": {"pos": None}}`
+
+        if node_attrs is None:
+            # May have been set to None above b/c all attributes are preserved
+            pass
+        elif isinstance(node_attrs, str):
+            if node_attrs[0] == "[":
+                # e.g. `node_attrs="[node_attributes]"` (argument of list of attributes)
+                # e.g. `func(node_attributes=["foo", "bar"])`
+                node_attrs = {
+                    node_attr: None for node_attr in bound.arguments[node_attrs[1:-1]]
+                }
+            elif callable(bound.arguments[node_attrs]):
+                # e.g. `node_attrs="weight"` and `func(weight=myfunc)`
+                preserve_node_attrs = True
+                node_attrs = None
+            elif bound.arguments[node_attrs] is not None:
+                # e.g. `node_attrs="weight"` and `func(weight="foo")`
+                node_attrs = {bound.arguments[node_attrs]: None}
+            else:
+                # e.g. `node_attrs="weight"` and `func(weight=None)`
+                node_attrs = None
+        else:
+            # e.g. `node_attrs={"attr": "default"}` and `func(attr="foo", default=7)`
+            # e.g. `node_attrs={"attr": 0}` and `func(attr="foo")`
+            node_attrs = {
+                node_attr: bound.arguments.get(val) if isinstance(val, str) else val
+                for key, val in node_attrs.items()
+                if (node_attr := bound.arguments[key]) is not None
+            }
+
+        preserve_graph_attrs = self.preserve_graph_attrs
+
+        # It should be safe to assume that we either have networkx graphs or backend graphs.
+        # Future work: allow conversions between backends.
+        for gname in self.graphs:
+            if gname in self.list_graphs:
+                bound.arguments[gname] = [
+                    self._convert_graph(
+                        backend_name,
+                        g,
+                        edge_attrs=edge_attrs,
+                        node_attrs=node_attrs,
+                        preserve_edge_attrs=preserve_edge_attrs,
+                        preserve_node_attrs=preserve_node_attrs,
+                        preserve_graph_attrs=preserve_graph_attrs,
+                        graph_name=gname,
+                        use_cache=use_cache,
+                    )
+                    if getattr(g, "__networkx_backend__", "networkx") == "networkx"
+                    else g
+                    for g in bound.arguments[gname]
+                ]
+            else:
+                graph = bound.arguments[gname]
+                if graph is None:
+                    if gname in self.optional_graphs:
+                        continue
+                    raise TypeError(
+                        f"Missing required graph argument `{gname}` in {self.name} function"
+                    )
+                if isinstance(preserve_edge_attrs, dict):
+                    preserve_edges = False
+                    edges = preserve_edge_attrs.get(gname, edge_attrs)
+                else:
+                    preserve_edges = preserve_edge_attrs
+                    edges = edge_attrs
+                if isinstance(preserve_node_attrs, dict):
+                    preserve_nodes = False
+                    nodes = preserve_node_attrs.get(gname, node_attrs)
+                else:
+                    preserve_nodes = preserve_node_attrs
+                    nodes = node_attrs
+                if isinstance(preserve_graph_attrs, set):
+                    preserve_graph = gname in preserve_graph_attrs
+                else:
+                    preserve_graph = preserve_graph_attrs
+                if getattr(graph, "__networkx_backend__", "networkx") == "networkx":
+                    bound.arguments[gname] = self._convert_graph(
+                        backend_name,
+                        graph,
+                        edge_attrs=edges,
+                        node_attrs=nodes,
+                        preserve_edge_attrs=preserve_edges,
+                        preserve_node_attrs=preserve_nodes,
+                        preserve_graph_attrs=preserve_graph,
+                        graph_name=gname,
+                        use_cache=use_cache,
+                    )
+        bound_kwargs = bound.kwargs
+        del bound_kwargs["backend"]
+        return bound.args, bound_kwargs
+
+    def _convert_graph(
+        self,
+        backend_name,
+        graph,
+        *,
+        edge_attrs,
+        node_attrs,
+        preserve_edge_attrs,
+        preserve_node_attrs,
+        preserve_graph_attrs,
+        graph_name,
+        use_cache,
+    ):
+        if (
+            use_cache
+            and (nx_cache := getattr(graph, "__networkx_cache__", None)) is not None
+        ):
+            cache = nx_cache.setdefault("backends", {}).setdefault(backend_name, {})
+            # edge_attrs: dict | None
+            # node_attrs: dict | None
+            # preserve_edge_attrs: bool (False if edge_attrs is not None)
+            # preserve_node_attrs: bool (False if node_attrs is not None)
+            # preserve_graph_attrs: bool
+            key = edge_key, node_key, graph_key = (
+                frozenset(edge_attrs.items())
+                if edge_attrs is not None
+                else preserve_edge_attrs,
+                frozenset(node_attrs.items())
+                if node_attrs is not None
+                else preserve_node_attrs,
+                preserve_graph_attrs,
+            )
+            if cache:
+                warning_message = (
+                    f"Using cached graph for {backend_name!r} backend in "
+                    f"call to {self.name}.\n\nFor the cache to be consistent "
+                    "(i.e., correct), the input graph must not have been "
+                    "manually mutated since the cached graph was created. "
+                    "Examples of manually mutating the graph data structures "
+                    "resulting in an inconsistent cache include:\n\n"
+                    "    >>> G[u][v][key] = val\n\n"
+                    "and\n\n"
+                    "    >>> for u, v, d in G.edges(data=True):\n"
+                    "    ...     d[key] = val\n\n"
+                    "Using methods such as `G.add_edge(u, v, weight=val)` "
+                    "will correctly clear the cache to keep it consistent. "
+                    "You may also use `G.__networkx_cache__.clear()` to "
+                    "manually clear the cache, or set `G.__networkx_cache__` "
+                    "to None to disable caching for G. Enable or disable "
+                    "caching via `nx.config.cache_converted_graphs` config."
+                )
+                # Do a simple search for a cached graph with compatible data.
+                # For example, if we need a single attribute, then it's okay
+                # to use a cached graph that preserved all attributes.
+                # This looks for an exact match first.
+                for compat_key in itertools.product(
+                    (edge_key, True) if edge_key is not True else (True,),
+                    (node_key, True) if node_key is not True else (True,),
+                    (graph_key, True) if graph_key is not True else (True,),
+                ):
+                    if (rv := cache.get(compat_key)) is not None:
+                        warnings.warn(warning_message)
+                        return rv
+                if edge_key is not True and node_key is not True:
+                    # Iterate over the items in `cache` to see if any are compatible.
+                    # For example, if no edge attributes are needed, then a graph
+                    # with any edge attribute will suffice. We use the same logic
+                    # below (but switched) to clear unnecessary items from the cache.
+                    # Use `list(cache.items())` to be thread-safe.
+                    for (ekey, nkey, gkey), val in list(cache.items()):
+                        if edge_key is False or ekey is True:
+                            pass
+                        elif (
+                            edge_key is True
+                            or ekey is False
+                            or not edge_key.issubset(ekey)
+                        ):
+                            continue
+                        if node_key is False or nkey is True:
+                            pass
+                        elif (
+                            node_key is True
+                            or nkey is False
+                            or not node_key.issubset(nkey)
+                        ):
+                            continue
+                        if graph_key and not gkey:
+                            continue
+                        warnings.warn(warning_message)
+                        return val
+
+        backend = _load_backend(backend_name)
+        rv = backend.convert_from_nx(
+            graph,
+            edge_attrs=edge_attrs,
+            node_attrs=node_attrs,
+            preserve_edge_attrs=preserve_edge_attrs,
+            preserve_node_attrs=preserve_node_attrs,
+            preserve_graph_attrs=preserve_graph_attrs,
+            name=self.name,
+            graph_name=graph_name,
+        )
+        if use_cache and nx_cache is not None:
+            # Remove old cached items that are no longer necessary since they
+            # are dominated/subsumed/outdated by what was just calculated.
+            # This uses the same logic as above, but with keys switched.
+            cache[key] = rv  # Set at beginning to be thread-safe
+            for cur_key in list(cache):
+                if cur_key == key:
+                    continue
+                ekey, nkey, gkey = cur_key
+                if ekey is False or edge_key is True:
+                    pass
+                elif ekey is True or edge_key is False or not ekey.issubset(edge_key):
+                    continue
+                if nkey is False or node_key is True:
+                    pass
+                elif nkey is True or node_key is False or not nkey.issubset(node_key):
+                    continue
+                if gkey and not graph_key:
+                    continue
+                cache.pop(cur_key, None)  # Use pop instead of del to be thread-safe

-    def _convert_and_call_for_tests(self, backend_name, args, kwargs, *,
-        fallback_to_nx=False):
+        return rv
+
+    def _convert_and_call(self, backend_name, args, kwargs, *, fallback_to_nx=False):
+        """Call this dispatchable function with a backend, converting graphs if necessary."""
+        backend = _load_backend(backend_name)
+        if not self._can_backend_run(backend_name, *args, **kwargs):
+            if fallback_to_nx:
+                return self.orig_func(*args, **kwargs)
+            msg = f"'{self.name}' not implemented by {backend_name}"
+            if hasattr(backend, self.name):
+                msg += " with the given arguments"
+            raise RuntimeError(msg)
+
+        try:
+            converted_args, converted_kwargs = self._convert_arguments(
+                backend_name, args, kwargs, use_cache=config.cache_converted_graphs
+            )
+            result = getattr(backend, self.name)(*converted_args, **converted_kwargs)
+        except (NotImplementedError, nx.NetworkXNotImplemented) as exc:
+            if fallback_to_nx:
+                return self.orig_func(*args, **kwargs)
+            raise
+
+        return result
+
+    def _convert_and_call_for_tests(
+        self, backend_name, args, kwargs, *, fallback_to_nx=False
+    ):
         """Call this dispatchable function with a backend; for use with testing."""
-        pass
+        backend = _load_backend(backend_name)
+        if not self._can_backend_run(backend_name, *args, **kwargs):
+            if fallback_to_nx or not self.graphs:
+                return self.orig_func(*args, **kwargs)
+
+            import pytest
+
+            msg = f"'{self.name}' not implemented by {backend_name}"
+            if hasattr(backend, self.name):
+                msg += " with the given arguments"
+            pytest.xfail(msg)
+
+        from collections.abc import Iterable, Iterator, Mapping
+        from copy import copy
+        from io import BufferedReader, BytesIO, StringIO, TextIOWrapper
+        from itertools import tee
+        from random import Random
+
+        import numpy as np
+        from numpy.random import Generator, RandomState
+        from scipy.sparse import sparray
+
+        # We sometimes compare the backend result to the original result,
+        # so we need two sets of arguments. We tee iterators and copy
+        # random state so that they may be used twice.
+        if not args:
+            args1 = args2 = args
+        else:
+            args1, args2 = zip(
+                *(
+                    (arg, copy(arg))
+                    if isinstance(
+                        arg, BytesIO | StringIO | Random | Generator | RandomState
+                    )
+                    else tee(arg)
+                    if isinstance(arg, Iterator)
+                    and not isinstance(arg, BufferedReader | TextIOWrapper)
+                    else (arg, arg)
+                    for arg in args
+                )
+            )
+        if not kwargs:
+            kwargs1 = kwargs2 = kwargs
+        else:
+            kwargs1, kwargs2 = zip(
+                *(
+                    ((k, v), (k, copy(v)))
+                    if isinstance(
+                        v, BytesIO | StringIO | Random | Generator | RandomState
+                    )
+                    else ((k, (teed := tee(v))[0]), (k, teed[1]))
+                    if isinstance(v, Iterator)
+                    and not isinstance(v, BufferedReader | TextIOWrapper)
+                    else ((k, v), (k, v))
+                    for k, v in kwargs.items()
+                )
+            )
+            kwargs1 = dict(kwargs1)
+            kwargs2 = dict(kwargs2)
+        try:
+            converted_args, converted_kwargs = self._convert_arguments(
+                backend_name, args1, kwargs1, use_cache=False
+            )
+            result = getattr(backend, self.name)(*converted_args, **converted_kwargs)
+        except (NotImplementedError, nx.NetworkXNotImplemented) as exc:
+            if fallback_to_nx:
+                return self.orig_func(*args2, **kwargs2)
+            import pytest
+
+            pytest.xfail(
+                exc.args[0] if exc.args else f"{self.name} raised {type(exc).__name__}"
+            )
+        # Verify that `self._returns_graph` is correct. This compares the return type
+        # to the type expected from `self._returns_graph`. This handles tuple and list
+        # return types, but *does not* catch functions that yield graphs.
+        if (
+            self._returns_graph
+            != (
+                isinstance(result, nx.Graph)
+                or hasattr(result, "__networkx_backend__")
+                or isinstance(result, tuple | list)
+                and any(
+                    isinstance(x, nx.Graph) or hasattr(x, "__networkx_backend__")
+                    for x in result
+                )
+            )
+            and not (
+                # May return Graph or None
+                self.name in {"check_planarity", "check_planarity_recursive"}
+                and any(x is None for x in result)
+            )
+            and not (
+                # May return Graph or dict
+                self.name in {"held_karp_ascent"}
+                and any(isinstance(x, dict) for x in result)
+            )
+            and self.name
+            not in {
+                # yields graphs
+                "all_triads",
+                "general_k_edge_subgraphs",
+                # yields graphs or arrays
+                "nonisomorphic_trees",
+            }
+        ):
+            raise RuntimeError(f"`returns_graph` is incorrect for {self.name}")
+
+        def check_result(val, depth=0):
+            if isinstance(val, np.number):
+                raise RuntimeError(
+                    f"{self.name} returned a numpy scalar {val} ({type(val)}, depth={depth})"
+                )
+            if isinstance(val, np.ndarray | sparray):
+                return
+            if isinstance(val, nx.Graph):
+                check_result(val._node, depth=depth + 1)
+                check_result(val._adj, depth=depth + 1)
+                return
+            if isinstance(val, Iterator):
+                raise NotImplementedError
+            if isinstance(val, Iterable) and not isinstance(val, str):
+                for x in val:
+                    check_result(x, depth=depth + 1)
+            if isinstance(val, Mapping):
+                for x in val.values():
+                    check_result(x, depth=depth + 1)
+
+        def check_iterator(it):
+            for val in it:
+                try:
+                    check_result(val)
+                except RuntimeError as exc:
+                    raise RuntimeError(
+                        f"{self.name} returned a numpy scalar {val} ({type(val)})"
+                    ) from exc
+                yield val
+
+        if self.name in {"from_edgelist"}:
+            # numpy scalars are explicitly given as values in some tests
+            pass
+        elif isinstance(result, Iterator):
+            result = check_iterator(result)
+        else:
+            try:
+                check_result(result)
+            except RuntimeError as exc:
+                raise RuntimeError(
+                    f"{self.name} returned a numpy scalar {result} ({type(result)})"
+                ) from exc
+            check_result(result)
+
+        if self.name in {
+            "edmonds_karp",
+            "barycenter",
+            "contracted_edge",
+            "contracted_nodes",
+            "stochastic_graph",
+            "relabel_nodes",
+            "maximum_branching",
+            "incremental_closeness_centrality",
+            "minimal_branching",
+            "minimum_spanning_arborescence",
+            "recursive_simple_cycles",
+            "connected_double_edge_swap",
+        }:
+            # Special-case algorithms that mutate input graphs
+            bound = self.__signature__.bind(*converted_args, **converted_kwargs)
+            bound.apply_defaults()
+            bound2 = self.__signature__.bind(*args2, **kwargs2)
+            bound2.apply_defaults()
+            if self.name in {
+                "minimal_branching",
+                "minimum_spanning_arborescence",
+                "recursive_simple_cycles",
+                "connected_double_edge_swap",
+            }:
+                G1 = backend.convert_to_nx(bound.arguments["G"])
+                G2 = bound2.arguments["G"]
+                G2._adj = G1._adj
+                nx._clear_cache(G2)
+            elif self.name == "edmonds_karp":
+                R1 = backend.convert_to_nx(bound.arguments["residual"])
+                R2 = bound2.arguments["residual"]
+                if R1 is not None and R2 is not None:
+                    for k, v in R1.edges.items():
+                        R2.edges[k]["flow"] = v["flow"]
+                    R2.graph.update(R1.graph)
+                    nx._clear_cache(R2)
+            elif self.name == "barycenter" and bound.arguments["attr"] is not None:
+                G1 = backend.convert_to_nx(bound.arguments["G"])
+                G2 = bound2.arguments["G"]
+                attr = bound.arguments["attr"]
+                for k, v in G1.nodes.items():
+                    G2.nodes[k][attr] = v[attr]
+                nx._clear_cache(G2)
+            elif (
+                self.name in {"contracted_nodes", "contracted_edge"}
+                and not bound.arguments["copy"]
+            ):
+                # Edges and nodes changed; node "contraction" and edge "weight" attrs
+                G1 = backend.convert_to_nx(bound.arguments["G"])
+                G2 = bound2.arguments["G"]
+                G2.__dict__.update(G1.__dict__)
+                nx._clear_cache(G2)
+            elif self.name == "stochastic_graph" and not bound.arguments["copy"]:
+                G1 = backend.convert_to_nx(bound.arguments["G"])
+                G2 = bound2.arguments["G"]
+                for k, v in G1.edges.items():
+                    G2.edges[k]["weight"] = v["weight"]
+                nx._clear_cache(G2)
+            elif (
+                self.name == "relabel_nodes"
+                and not bound.arguments["copy"]
+                or self.name in {"incremental_closeness_centrality"}
+            ):
+                G1 = backend.convert_to_nx(bound.arguments["G"])
+                G2 = bound2.arguments["G"]
+                if G1 is G2:
+                    return G2
+                G2._node.clear()
+                G2._node.update(G1._node)
+                G2._adj.clear()
+                G2._adj.update(G1._adj)
+                if hasattr(G1, "_pred") and hasattr(G2, "_pred"):
+                    G2._pred.clear()
+                    G2._pred.update(G1._pred)
+                if hasattr(G1, "_succ") and hasattr(G2, "_succ"):
+                    G2._succ.clear()
+                    G2._succ.update(G1._succ)
+                nx._clear_cache(G2)
+                if self.name == "relabel_nodes":
+                    return G2
+            return backend.convert_to_nx(result)
+
+        converted_result = backend.convert_to_nx(result)
+        if isinstance(converted_result, nx.Graph) and self.name not in {
+            "boykov_kolmogorov",
+            "preflow_push",
+            "quotient_graph",
+            "shortest_augmenting_path",
+            "spectral_graph_forge",
+            # We don't handle tempfile.NamedTemporaryFile arguments
+            "read_gml",
+            "read_graph6",
+            "read_sparse6",
+            # We don't handle io.BufferedReader or io.TextIOWrapper arguments
+            "bipartite_read_edgelist",
+            "read_adjlist",
+            "read_edgelist",
+            "read_graphml",
+            "read_multiline_adjlist",
+            "read_pajek",
+            "from_pydot",
+            "pydot_read_dot",
+            "agraph_read_dot",
+            # graph comparison fails b/c of nan values
+            "read_gexf",
+        }:
+            # For graph return types (e.g. generators), we compare that results are
+            # the same between the backend and networkx, then return the original
+            # networkx result so the iteration order will be consistent in tests.
+            G = self.orig_func(*args2, **kwargs2)
+            if not nx.utils.graphs_equal(G, converted_result):
+                assert G.number_of_nodes() == converted_result.number_of_nodes()
+                assert G.number_of_edges() == converted_result.number_of_edges()
+                assert G.graph == converted_result.graph
+                assert G.nodes == converted_result.nodes
+                assert G.adj == converted_result.adj
+                assert type(G) is type(converted_result)
+                raise AssertionError("Graphs are not equal")
+            return G
+        return converted_result

     def _make_doc(self):
         """Generate the backends section at the end for functions having an alternate
         backend implementation(s) using the `backend_info` entry-point."""
-        pass
+
+        if not self.backends:
+            return self._orig_doc
+        lines = [
+            "Backends",
+            "--------",
+        ]
+        for backend in sorted(self.backends):
+            info = backend_info[backend]
+            if "short_summary" in info:
+                lines.append(f"{backend} : {info['short_summary']}")
+            else:
+                lines.append(backend)
+            if "functions" not in info or self.name not in info["functions"]:
+                lines.append("")
+                continue
+
+            func_info = info["functions"][self.name]
+
+            # Renaming extra_docstring to additional_docs
+            if func_docs := (
+                func_info.get("additional_docs") or func_info.get("extra_docstring")
+            ):
+                lines.extend(
+                    f"  {line}" if line else line for line in func_docs.split("\n")
+                )
+                add_gap = True
+            else:
+                add_gap = False
+
+            # Renaming extra_parameters to additional_parameters
+            if extra_parameters := (
+                func_info.get("extra_parameters")
+                or func_info.get("additional_parameters")
+            ):
+                if add_gap:
+                    lines.append("")
+                lines.append("  Additional parameters:")
+                for param in sorted(extra_parameters):
+                    lines.append(f"    {param}")
+                    if desc := extra_parameters[param]:
+                        lines.append(f"      {desc}")
+                    lines.append("")
+            else:
+                lines.append("")
+
+            if func_url := func_info.get("url"):
+                lines.append(f"[`Source <{func_url}>`_]")
+                lines.append("")
+
+        lines.pop()  # Remove last empty line
+        to_add = "\n    ".join(lines)
+        return f"{self._orig_doc.rstrip()}\n\n    {to_add}"

     def __reduce__(self):
         """Allow this object to be serialized with pickle.
@@ -631,9 +1527,27 @@ class _dispatchable:
         return _restore_dispatchable, (self.name,)


-if os.environ.get('_NETWORKX_BUILDING_DOCS_'):
+def _restore_dispatchable(name):
+    return _registered_algorithms[name]
+
+
+if os.environ.get("_NETWORKX_BUILDING_DOCS_"):
+    # When building docs with Sphinx, use the original function with the
+    # dispatched __doc__, b/c Sphinx renders normal Python functions better.
+    # This doesn't show e.g. `*, backend=None, **backend_kwargs` in the
+    # signatures, which is probably okay. It does allow the docstring to be
+    # updated based on the installed backends.
     _orig_dispatchable = _dispatchable
-    _dispatchable.__doc__ = _orig_dispatchable.__new__.__doc__
+
+    def _dispatchable(func=None, **kwargs):  # type: ignore[no-redef]
+        if func is None:
+            return partial(_dispatchable, **kwargs)
+        dispatched_func = _orig_dispatchable(func, **kwargs)
+        func.__doc__ = dispatched_func.__doc__
+        return func
+
+    _dispatchable.__doc__ = _orig_dispatchable.__new__.__doc__  # type: ignore[method-assign,assignment]
     _sig = inspect.signature(_orig_dispatchable.__new__)
-    _dispatchable.__signature__ = _sig.replace(parameters=[v for k, v in
-        _sig.parameters.items() if k != 'cls'])
+    _dispatchable.__signature__ = _sig.replace(  # type: ignore[method-assign,assignment]
+        parameters=[v for k, v in _sig.parameters.items() if k != "cls"]
+    )
diff --git a/networkx/utils/configs.py b/networkx/utils/configs.py
index 78a483d3f..e61741e0a 100644
--- a/networkx/utils/configs.py
+++ b/networkx/utils/configs.py
@@ -2,7 +2,8 @@ import collections
 import os
 import typing
 from dataclasses import dataclass
-__all__ = ['Config', 'config']
+
+__all__ = ["Config", "config"]


 @dataclass(init=False, eq=False, slots=True, kw_only=True, match_args=False)
@@ -63,27 +64,36 @@ class Config:
     def __new__(cls, **kwargs):
         orig_class = cls
         if cls is Config:
-            cls = type(cls.__name__, (cls,), {'__annotations__': {key:
-                typing.Any for key in kwargs}})
-        cls = dataclass(eq=False, repr=cls._strict, slots=cls._strict,
-            kw_only=True, match_args=False)(cls)
+            # Enable the "simple" case of accepting config definition as keywords
+            cls = type(
+                cls.__name__,
+                (cls,),
+                {"__annotations__": {key: typing.Any for key in kwargs}},
+            )
+        cls = dataclass(
+            eq=False,
+            repr=cls._strict,
+            slots=cls._strict,
+            kw_only=True,
+            match_args=False,
+        )(cls)
         if not cls._strict:
             cls.__repr__ = _flexible_repr
-        cls._orig_class = orig_class
+        cls._orig_class = orig_class  # Save original class so we can pickle
         instance = object.__new__(cls)
         instance.__init__(**kwargs)
         return instance

     def _check_config(self, key, value):
         """Check whether config value is valid. This is useful for subclasses."""
-        pass

+    # Control behavior of attributes
     def __dir__(self):
         return self.__dataclass_fields__.keys()

     def __setattr__(self, key, value):
         if self._strict and key not in self.__dataclass_fields__:
-            raise AttributeError(f'Invalid config name: {key!r}')
+            raise AttributeError(f"Invalid config name: {key!r}")
         self._check_config(key, value)
         object.__setattr__(self, key, value)

@@ -91,25 +101,25 @@ class Config:
         if self._strict:
             raise TypeError(
                 f"Configuration items can't be deleted (can't delete {key!r})."
-                )
+            )
         object.__delattr__(self, key)

+    # Be a `collection.abc.Collection`
     def __contains__(self, key):
-        return (key in self.__dataclass_fields__ if self._strict else key in
-            self.__dict__)
+        return (
+            key in self.__dataclass_fields__ if self._strict else key in self.__dict__
+        )

     def __iter__(self):
-        return iter(self.__dataclass_fields__ if self._strict else self.
-            __dict__)
+        return iter(self.__dataclass_fields__ if self._strict else self.__dict__)

     def __len__(self):
-        return len(self.__dataclass_fields__ if self._strict else self.__dict__
-            )
+        return len(self.__dataclass_fields__ if self._strict else self.__dict__)

     def __reversed__(self):
-        return reversed(self.__dataclass_fields__ if self._strict else self
-            .__dict__)
+        return reversed(self.__dataclass_fields__ if self._strict else self.__dict__)

+    # Add dunder methods for `collections.abc.Mapping`
     def __getitem__(self, key):
         try:
             return getattr(self, key)
@@ -127,18 +137,46 @@ class Config:
             self.__delattr__(key)
         except AttributeError as err:
             raise KeyError(*err.args) from None
-    _ipython_key_completions_ = __dir__

+    _ipython_key_completions_ = __dir__  # config["<TAB>
+
+    # Go ahead and make it a `collections.abc.Mapping`
+    def get(self, key, default=None):
+        return getattr(self, key, default)
+
+    def items(self):
+        return collections.abc.ItemsView(self)
+
+    def keys(self):
+        return collections.abc.KeysView(self)
+
+    def values(self):
+        return collections.abc.ValuesView(self)
+
+    # dataclass can define __eq__ for us, but do it here so it works after pickling
     def __eq__(self, other):
         if not isinstance(other, Config):
             return NotImplemented
-        return self._orig_class == other._orig_class and self.items(
-            ) == other.items()
+        return self._orig_class == other._orig_class and self.items() == other.items()

+    # Make pickle work
     def __reduce__(self):
         return self._deserialize, (self._orig_class, dict(self))

+    @staticmethod
+    def _deserialize(cls, kwargs):
+        return cls(**kwargs)
+
+
+def _flexible_repr(self):
+    return (
+        f"{self.__class__.__qualname__}("
+        + ", ".join(f"{key}={val!r}" for key, val in self.__dict__.items())
+        + ")"
+    )
+

+# Register, b/c `Mapping.__subclasshook__` returns `NotImplemented`
 collections.abc.Mapping.register(Config)


@@ -181,11 +219,42 @@ class NetworkXConfig(Config):

     This is a global configuration. Use with caution when using from multiple threads.
     """
+
     backend_priority: list[str]
     backends: Config
     cache_converted_graphs: bool

+    def _check_config(self, key, value):
+        from .backends import backends

-config = NetworkXConfig(backend_priority=[], backends=Config(),
-    cache_converted_graphs=bool(os.environ.get(
-    'NETWORKX_CACHE_CONVERTED_GRAPHS', '')))
+        if key == "backend_priority":
+            if not (isinstance(value, list) and all(isinstance(x, str) for x in value)):
+                raise TypeError(
+                    f"{key!r} config must be a list of backend names; got {value!r}"
+                )
+            if missing := {x for x in value if x not in backends}:
+                missing = ", ".join(map(repr, sorted(missing)))
+                raise ValueError(f"Unknown backend when setting {key!r}: {missing}")
+        elif key == "backends":
+            if not (
+                isinstance(value, Config)
+                and all(isinstance(key, str) for key in value)
+                and all(isinstance(val, Config) for val in value.values())
+            ):
+                raise TypeError(
+                    f"{key!r} config must be a Config of backend configs; got {value!r}"
+                )
+            if missing := {x for x in value if x not in backends}:
+                missing = ", ".join(map(repr, sorted(missing)))
+                raise ValueError(f"Unknown backend when setting {key!r}: {missing}")
+        elif key == "cache_converted_graphs":
+            if not isinstance(value, bool):
+                raise TypeError(f"{key!r} config must be True or False; got {value!r}")
+
+
+# Backend configuration will be updated in backends.py
+config = NetworkXConfig(
+    backend_priority=[],
+    backends=Config(),
+    cache_converted_graphs=bool(os.environ.get("NETWORKX_CACHE_CONVERTED_GRAPHS", "")),
+)
diff --git a/networkx/utils/decorators.py b/networkx/utils/decorators.py
index 0e85b150b..205bf5005 100644
--- a/networkx/utils/decorators.py
+++ b/networkx/utils/decorators.py
@@ -11,11 +11,19 @@ from functools import wraps
 from inspect import Parameter, signature
 from os.path import splitext
 from pathlib import Path
+
 import networkx as nx
 from networkx.utils import create_py_random_state, create_random_state
-__all__ = ['not_implemented_for', 'open_file', 'nodes_or_number',
-    'np_random_state', 'py_random_state', 'argmap', 'deprecate_positional_args'
-    ]
+
+__all__ = [
+    "not_implemented_for",
+    "open_file",
+    "nodes_or_number",
+    "np_random_state",
+    "py_random_state",
+    "argmap",
+    "deprecate_positional_args",
+]


 def not_implemented_for(*graph_types):
@@ -62,14 +70,43 @@ def not_implemented_for(*graph_types):
        def sp_np_function(G):
            pass
     """
-    pass
-
-
-fopeners = {'.gz': gzip.open, '.gzip': gzip.open, '.bz2': bz2.BZ2File}
-_dispatch_dict = defaultdict(lambda : open, **fopeners)
-
-
-def open_file(path_arg, mode='r'):
+    if ("directed" in graph_types) and ("undirected" in graph_types):
+        raise ValueError("Function not implemented on directed AND undirected graphs?")
+    if ("multigraph" in graph_types) and ("graph" in graph_types):
+        raise ValueError("Function not implemented on graph AND multigraphs?")
+    if not set(graph_types) < {"directed", "undirected", "multigraph", "graph"}:
+        raise KeyError(
+            "use one or more of directed, undirected, multigraph, graph.  "
+            f"You used {graph_types}"
+        )
+
+    # 3-way logic: True if "directed" input, False if "undirected" input, else None
+    dval = ("directed" in graph_types) or "undirected" not in graph_types and None
+    mval = ("multigraph" in graph_types) or "graph" not in graph_types and None
+    errmsg = f"not implemented for {' '.join(graph_types)} type"
+
+    def _not_implemented_for(g):
+        if (mval is None or mval == g.is_multigraph()) and (
+            dval is None or dval == g.is_directed()
+        ):
+            raise nx.NetworkXNotImplemented(errmsg)
+
+        return g
+
+    return argmap(_not_implemented_for, 0)
+
+
+# To handle new extensions, define a function accepting a `path` and `mode`.
+# Then add the extension to _dispatch_dict.
+fopeners = {
+    ".gz": gzip.open,
+    ".gzip": gzip.open,
+    ".bz2": bz2.BZ2File,
+}
+_dispatch_dict = defaultdict(lambda: open, **fopeners)
+
+
+def open_file(path_arg, mode="r"):
     """Decorator to ensure clean opening and closing of files.

     Parameters
@@ -144,7 +181,25 @@ def open_file(path_arg, mode='r'):
     Instead, we use a try block, as shown above.
     When we exit the function, fobj will be closed, if it should be, by the decorator.
     """
-    pass
+
+    def _open_file(path):
+        # Now we have the path_arg. There are two types of input to consider:
+        #   1) string representing a path that should be opened
+        #   2) an already opened file object
+        if isinstance(path, str):
+            ext = splitext(path)[1]
+        elif isinstance(path, Path):
+            # path is a pathlib reference to a filename
+            ext = path.suffix
+            path = str(path)
+        else:
+            # could be None, or a file handle, in which case the algorithm will deal with it
+            return path, lambda: None
+
+        fobj = _dispatch_dict[ext](path, mode=mode)
+        return fobj, lambda: fobj.close()
+
+    return argmap(_open_file, path_arg, try_finally=True)


 def nodes_or_number(which_args):
@@ -192,7 +247,23 @@ def nodes_or_number(which_args):
            # presumably r is a number. It is not handled by this decorator.
            # n is converted to a list of nodes
     """
-    pass
+
+    def _nodes_or_number(n):
+        try:
+            nodes = list(range(n))
+        except TypeError:
+            nodes = tuple(n)
+        else:
+            if n < 0:
+                raise nx.NetworkXError(f"Negative number of nodes not valid: {n}")
+        return (n, nodes)
+
+    try:
+        iter_wa = iter(which_args)
+    except TypeError:
+        iter_wa = (which_args,)
+
+    return argmap(_nodes_or_number, *iter_wa)


 def np_random_state(random_state_argument):
@@ -239,7 +310,7 @@ def np_random_state(random_state_argument):
     --------
     py_random_state
     """
-    pass
+    return argmap(create_random_state, random_state_argument)


 def py_random_state(random_state_argument):
@@ -298,7 +369,8 @@ def py_random_state(random_state_argument):
     --------
     np_random_state
     """
-    pass
+
+    return argmap(create_py_random_state, random_state_argument)


 class argmap:
@@ -686,7 +758,11 @@ class argmap:
         [1] https://github.com/networkx/networkx/issues/4732

         """
-        pass
+        real_func = func.__argmap__.compile(func.__wrapped__)
+        func.__code__ = real_func.__code__
+        func.__globals__.update(real_func.__globals__)
+        func.__dict__.update(real_func.__dict__)
+        return func

     def __call__(self, f):
         """Construct a lazily decorated wrapper of f.
@@ -711,6 +787,8 @@ class argmap:

         def func(*args, __wrapper=None, **kwargs):
             return argmap._lazy_compile(__wrapper)(*args, **kwargs)
+
+        # standard function-wrapping stuff
         func.__name__ = f.__name__
         func.__doc__ = f.__doc__
         func.__defaults__ = f.__defaults__
@@ -719,17 +797,35 @@ class argmap:
         func.__qualname__ = f.__qualname__
         func.__dict__.update(f.__dict__)
         func.__wrapped__ = f
-        func.__kwdefaults__['_argmap__wrapper'] = func
+
+        # now that we've wrapped f, we may have picked up some __dict__ or
+        # __kwdefaults__ items that were set by a previous argmap.  Thus, we set
+        # these values after those update() calls.
+
+        # If we attempt to access func from within itself, that happens through
+        # a closure -- which trips an error when we replace func.__code__.  The
+        # standard workaround for functions which can't see themselves is to use
+        # a Y-combinator, as we do here.
+        func.__kwdefaults__["_argmap__wrapper"] = func
+
+        # this self-reference is here because functools.wraps preserves
+        # everything in __dict__, and we don't want to mistake a non-argmap
+        # wrapper for an argmap wrapper
         func.__self__ = func
+
+        # this is used to variously call self.assemble and self.compile
         func.__argmap__ = self
-        if hasattr(f, '__argmap__'):
+
+        if hasattr(f, "__argmap__"):
             func.__is_generator = f.__is_generator
         else:
             func.__is_generator = inspect.isgeneratorfunction(f)
+
         if self._finally and func.__is_generator:
-            raise nx.NetworkXError(
-                'argmap cannot decorate generators with try_finally')
+            raise nx.NetworkXError("argmap cannot decorate generators with try_finally")
+
         return func
+
     __count = 0

     @classmethod
@@ -750,8 +846,10 @@ class argmap:
         count : int
             An integer unique to this Python session (simply counts from zero)
         """
-        pass
-    _bad_chars = re.compile('[^a-zA-Z0-9_]')
+        cls.__count += 1
+        return cls.__count
+
+    _bad_chars = re.compile("[^a-zA-Z0-9_]")

     @classmethod
     def _name(cls, f):
@@ -769,7 +867,9 @@ class argmap:
             The mangled version of `f.__name__` (if `f.__name__` exists) or `f`

         """
-        pass
+        f = f.__name__ if hasattr(f, "__name__") else f
+        fname = re.sub(cls._bad_chars, "_", f)
+        return f"argmap_{fname}_{cls._count()}"

     def compile(self, f):
         """Compile the decorated function.
@@ -801,7 +901,23 @@ class argmap:
             The decorated file

         """
-        pass
+        sig, wrapped_name, functions, mapblock, finallys, mutable_args = self.assemble(
+            f
+        )
+
+        call = f"{sig.call_sig.format(wrapped_name)}#"
+        mut_args = f"{sig.args} = list({sig.args})" if mutable_args else ""
+        body = argmap._indent(sig.def_sig, mut_args, mapblock, call, finallys)
+        code = "\n".join(body)
+
+        locl = {}
+        globl = dict(functions.values())
+        filename = f"{self.__class__} compilation {self._count()}"
+        compiled = compile(code, filename, "exec")
+        exec(compiled, globl, locl)
+        func = locl[sig.name]
+        func._code = code
+        return func

     def assemble(self, f):
         """Collects components of the source for the decorated function wrapping f.
@@ -843,11 +959,111 @@ class argmap:
             via their indices. The compile method then turns the argument
             tuple into a list so that the arguments can be modified.
         """
-        pass
+
+        # first, we check if f is already argmapped -- if that's the case,
+        # build up the function recursively.
+        # > mapblock is generally a list of function calls of the sort
+        #     arg = func(arg)
+        # in addition to some try-blocks if needed.
+        # > finallys is a recursive list of finally blocks of the sort
+        #         finally:
+        #             close_func_1()
+        #     finally:
+        #         close_func_2()
+        # > functions is a dict of functions used in the scope of our decorated
+        # function. It will be used to construct globals used in compilation.
+        # We make functions[id(f)] = name_of_f, f to ensure that a given
+        # function is stored and named exactly once even if called by
+        # nested decorators.
+        if hasattr(f, "__argmap__") and f.__self__ is f:
+            (
+                sig,
+                wrapped_name,
+                functions,
+                mapblock,
+                finallys,
+                mutable_args,
+            ) = f.__argmap__.assemble(f.__wrapped__)
+            functions = dict(functions)  # shallow-copy just in case
+        else:
+            sig = self.signature(f)
+            wrapped_name = self._name(f)
+            mapblock, finallys = [], []
+            functions = {id(f): (wrapped_name, f)}
+            mutable_args = False
+
+        if id(self._func) in functions:
+            fname, _ = functions[id(self._func)]
+        else:
+            fname, _ = functions[id(self._func)] = self._name(self._func), self._func
+
+        # this is a bit complicated -- we can call functions with a variety of
+        # nested arguments, so long as their input and output are tuples with
+        # the same nested structure. e.g. ("a", "b") maps arguments a and b.
+        # A more complicated nesting like (0, (3, 4)) maps arguments 0, 3, 4
+        # expecting the mapping to output new values in the same nested shape.
+        # The ability to argmap multiple arguments was necessary for
+        # the decorator `nx.algorithms.community.quality.require_partition`, and
+        # while we're not taking full advantage of the ability to handle
+        # multiply-nested tuples, it was convenient to implement this in
+        # generality because the recursive call to `get_name` is necessary in
+        # any case.
+        applied = set()
+
+        def get_name(arg, first=True):
+            nonlocal mutable_args
+            if isinstance(arg, tuple):
+                name = ", ".join(get_name(x, False) for x in arg)
+                return name if first else f"({name})"
+            if arg in applied:
+                raise nx.NetworkXError(f"argument {arg} is specified multiple times")
+            applied.add(arg)
+            if arg in sig.names:
+                return sig.names[arg]
+            elif isinstance(arg, str):
+                if sig.kwargs is None:
+                    raise nx.NetworkXError(
+                        f"name {arg} is not a named parameter and this function doesn't have kwargs"
+                    )
+                return f"{sig.kwargs}[{arg!r}]"
+            else:
+                if sig.args is None:
+                    raise nx.NetworkXError(
+                        f"index {arg} not a parameter index and this function doesn't have args"
+                    )
+                mutable_args = True
+                return f"{sig.args}[{arg - sig.n_positional}]"
+
+        if self._finally:
+            # here's where we handle try_finally decorators.  Such a decorator
+            # returns a mapped argument and a function to be called in a
+            # finally block.  This feature was required by the open_file
+            # decorator.  The below generates the code
+            #
+            # name, final = func(name)                   #<--append to mapblock
+            # try:                                       #<--append to mapblock
+            #     ... more argmapping and try blocks
+            #     return WRAPPED_FUNCTION(...)
+            #     ... more finally blocks
+            # finally:                                   #<--prepend to finallys
+            #     final()                                #<--prepend to finallys
+            #
+            for a in self._args:
+                name = get_name(a)
+                final = self._name(name)
+                mapblock.append(f"{name}, {final} = {fname}({name})")
+                mapblock.append("try:")
+                finallys = ["finally:", f"{final}()#", "#", finallys]
+        else:
+            mapblock.extend(
+                f"{name} = {fname}({name})" for name in map(get_name, self._args)
+            )
+
+        return sig, wrapped_name, functions, mapblock, finallys, mutable_args

     @classmethod
     def signature(cls, f):
-        """Construct a Signature object describing `f`
+        r"""Construct a Signature object describing `f`

         Compute a Signature so that we can write a function wrapping f with
         the same signature and call-type.
@@ -872,16 +1088,84 @@ class argmap:
             call_sig : a string used as code to call the decorated function
             names : a dict keyed by argument name and index to the argument's name
             n_positional : the number of positional arguments in the signature
-            args : the name of the VAR_POSITIONAL argument if any, i.e. \\*theseargs
-            kwargs : the name of the VAR_KEYWORDS argument if any, i.e. \\*\\*kwargs
+            args : the name of the VAR_POSITIONAL argument if any, i.e. \*theseargs
+            kwargs : the name of the VAR_KEYWORDS argument if any, i.e. \*\*kwargs

         These named attributes of the signature are used in `assemble` and `compile`
         to construct a string of source code for the decorated function.

         """
-        pass
-    Signature = collections.namedtuple('Signature', ['name', 'signature',
-        'def_sig', 'call_sig', 'names', 'n_positional', 'args', 'kwargs'])
+        sig = inspect.signature(f, follow_wrapped=False)
+        def_sig = []
+        call_sig = []
+        names = {}
+
+        kind = None
+        args = None
+        kwargs = None
+        npos = 0
+        for i, param in enumerate(sig.parameters.values()):
+            # parameters can be position-only, keyword-or-position, keyword-only
+            # in any combination, but only in the order as above.  we do edge
+            # detection to add the appropriate punctuation
+            prev = kind
+            kind = param.kind
+            if prev == param.POSITIONAL_ONLY != kind:
+                # the last token was position-only, but this one isn't
+                def_sig.append("/")
+            if (
+                param.VAR_POSITIONAL
+                != prev
+                != param.KEYWORD_ONLY
+                == kind
+                != param.VAR_POSITIONAL
+            ):
+                # param is the first keyword-only arg and isn't starred
+                def_sig.append("*")
+
+            # star arguments as appropriate
+            if kind == param.VAR_POSITIONAL:
+                name = "*" + param.name
+                args = param.name
+                count = 0
+            elif kind == param.VAR_KEYWORD:
+                name = "**" + param.name
+                kwargs = param.name
+                count = 0
+            else:
+                names[i] = names[param.name] = param.name
+                name = param.name
+                count = 1
+
+            # assign to keyword-only args in the function call
+            if kind == param.KEYWORD_ONLY:
+                call_sig.append(f"{name} = {name}")
+            else:
+                npos += count
+                call_sig.append(name)
+
+            def_sig.append(name)
+
+        fname = cls._name(f)
+        def_sig = f'def {fname}({", ".join(def_sig)}):'
+
+        call_sig = f"return {{}}({', '.join(call_sig)})"
+
+        return cls.Signature(fname, sig, def_sig, call_sig, names, npos, args, kwargs)
+
+    Signature = collections.namedtuple(
+        "Signature",
+        [
+            "name",
+            "signature",
+            "def_sig",
+            "call_sig",
+            "names",
+            "n_positional",
+            "args",
+            "kwargs",
+        ],
+    )

     @staticmethod
     def _flatten(nestlist, visited):
@@ -901,8 +1185,17 @@ class argmap:
         Non-list objects contained in nestlist

         """
-        pass
-    _tabs = ' ' * 64
+        for thing in nestlist:
+            if isinstance(thing, list):
+                if id(thing) in visited:
+                    raise ValueError("A cycle was found in nestlist.  Be a tree.")
+                else:
+                    visited.add(id(thing))
+                yield from argmap._flatten(thing, visited)
+            else:
+                yield thing
+
+    _tabs = " " * 64

     @staticmethod
     def _indent(*lines):
@@ -939,9 +1232,13 @@ class argmap:
             finally:
              pass#'''
         """
-        pass
+        depth = 0
+        for line in argmap._flatten(lines, set()):
+            yield f"{argmap._tabs[:depth]}{line}"
+            depth += (line[-1:] == ":") - (line[-1:] == "#")


+# Vendored in from https://github.com/scikit-learn/scikit-learn/blob/8ed0270b99344cee9bb253cbfa1d986561ea6cd7/sklearn/utils/validation.py#L37C1-L90C44
 def deprecate_positional_args(func=None, *, version):
     """Decorator for methods that issues warnings for positional arguments.

@@ -955,4 +1252,44 @@ def deprecate_positional_args(func=None, *, version):
     version : callable, default="1.3"
         The version when positional arguments will result in error.
     """
-    pass
+
+    def _inner_deprecate_positional_args(f):
+        sig = signature(f)
+        kwonly_args = []
+        all_args = []
+
+        for name, param in sig.parameters.items():
+            if param.kind == Parameter.POSITIONAL_OR_KEYWORD:
+                all_args.append(name)
+            elif param.kind == Parameter.KEYWORD_ONLY:
+                kwonly_args.append(name)
+
+        @wraps(f)
+        def inner_f(*args, **kwargs):
+            extra_args = len(args) - len(all_args)
+            if extra_args <= 0:
+                return f(*args, **kwargs)
+
+            # extra_args > 0
+            args_msg = [
+                f"{name}={arg}"
+                for name, arg in zip(kwonly_args[:extra_args], args[-extra_args:])
+            ]
+            args_msg = ", ".join(args_msg)
+            warnings.warn(
+                (
+                    f"Pass {args_msg} as keyword args. From NetworkX version "
+                    f"{version} passing these as positional arguments "
+                    "will result in an error"
+                ),
+                FutureWarning,
+            )
+            kwargs.update(zip(sig.parameters, args))
+            return f(**kwargs)
+
+        return inner_f
+
+    if func is not None:
+        return _inner_deprecate_positional_args(func)
+
+    return _inner_deprecate_positional_args
diff --git a/networkx/utils/heaps.py b/networkx/utils/heaps.py
index 53979ae9f..3db279063 100644
--- a/networkx/utils/heaps.py
+++ b/networkx/utils/heaps.py
@@ -1,10 +1,13 @@
 """
 Min-heaps.
 """
+
 from heapq import heappop, heappush
 from itertools import count
+
 import networkx as nx
-__all__ = ['MinHeap', 'PairingHeap', 'BinaryHeap']
+
+__all__ = ["MinHeap", "PairingHeap", "BinaryHeap"]


 class MinHeap:
@@ -15,10 +18,10 @@ class MinHeap:
     value in an existing pair and deleting the minimum pair.
     """

-
     class _Item:
         """Used by subclassess to represent a key-value pair."""
-        __slots__ = 'key', 'value'
+
+        __slots__ = ("key", "value")

         def __init__(self, key, value):
             self.key = key
@@ -44,7 +47,7 @@ class MinHeap:
         NetworkXError
             If the heap is empty.
         """
-        pass
+        raise NotImplementedError

     def pop(self):
         """Delete the minimum pair in the heap.
@@ -59,7 +62,7 @@ class MinHeap:
         NetworkXError
             If the heap is empty.
         """
-        pass
+        raise NotImplementedError

     def get(self, key, default=None):
         """Returns the value associated with a key.
@@ -78,7 +81,7 @@ class MinHeap:
         value : object.
             The value associated with the key.
         """
-        pass
+        raise NotImplementedError

     def insert(self, key, value, allow_increase=False):
         """Insert a new key-value pair or modify the value in an existing
@@ -101,7 +104,7 @@ class MinHeap:
         decreased : bool
             True if a pair is inserted or the existing value is decreased.
         """
-        pass
+        raise NotImplementedError

     def __nonzero__(self):
         """Returns whether the heap if empty."""
@@ -129,20 +132,24 @@ class MinHeap:
 class PairingHeap(MinHeap):
     """A pairing heap."""

-
     class _Node(MinHeap._Item):
         """A node in a pairing heap.

         A tree in a pairing heap is stored using the left-child, right-sibling
         representation.
         """
-        __slots__ = 'left', 'next', 'prev', 'parent'
+
+        __slots__ = ("left", "next", "prev", "parent")

         def __init__(self, key, value):
             super().__init__(key, value)
+            # The leftmost child.
             self.left = None
+            # The next sibling.
             self.next = None
+            # The previous sibling.
             self.prev = None
+            # The parent.
             self.parent = None

     def __init__(self):
@@ -150,21 +157,125 @@ class PairingHeap(MinHeap):
         super().__init__()
         self._root = None

+    def min(self):
+        if self._root is None:
+            raise nx.NetworkXError("heap is empty.")
+        return (self._root.key, self._root.value)
+
+    def pop(self):
+        if self._root is None:
+            raise nx.NetworkXError("heap is empty.")
+        min_node = self._root
+        self._root = self._merge_children(self._root)
+        del self._dict[min_node.key]
+        return (min_node.key, min_node.value)
+
+    def get(self, key, default=None):
+        node = self._dict.get(key)
+        return node.value if node is not None else default
+
+    def insert(self, key, value, allow_increase=False):
+        node = self._dict.get(key)
+        root = self._root
+        if node is not None:
+            if value < node.value:
+                node.value = value
+                if node is not root and value < node.parent.value:
+                    self._cut(node)
+                    self._root = self._link(root, node)
+                return True
+            elif allow_increase and value > node.value:
+                node.value = value
+                child = self._merge_children(node)
+                # Nonstandard step: Link the merged subtree with the root. See
+                # below for the standard step.
+                if child is not None:
+                    self._root = self._link(self._root, child)
+                # Standard step: Perform a decrease followed by a pop as if the
+                # value were the smallest in the heap. Then insert the new
+                # value into the heap.
+                # if node is not root:
+                #     self._cut(node)
+                #     if child is not None:
+                #         root = self._link(root, child)
+                #     self._root = self._link(root, node)
+                # else:
+                #     self._root = (self._link(node, child)
+                #                   if child is not None else node)
+            return False
+        else:
+            # Insert a new key.
+            node = self._Node(key, value)
+            self._dict[key] = node
+            self._root = self._link(root, node) if root is not None else node
+            return True
+
     def _link(self, root, other):
         """Link two nodes, making the one with the smaller value the parent of
         the other.
         """
-        pass
+        if other.value < root.value:
+            root, other = other, root
+        next = root.left
+        other.next = next
+        if next is not None:
+            next.prev = other
+        other.prev = None
+        root.left = other
+        other.parent = root
+        return root

     def _merge_children(self, root):
         """Merge the subtrees of the root using the standard two-pass method.
         The resulting subtree is detached from the root.
         """
-        pass
+        node = root.left
+        root.left = None
+        if node is not None:
+            link = self._link
+            # Pass 1: Merge pairs of consecutive subtrees from left to right.
+            # At the end of the pass, only the prev pointers of the resulting
+            # subtrees have meaningful values. The other pointers will be fixed
+            # in pass 2.
+            prev = None
+            while True:
+                next = node.next
+                if next is None:
+                    node.prev = prev
+                    break
+                next_next = next.next
+                node = link(node, next)
+                node.prev = prev
+                prev = node
+                if next_next is None:
+                    break
+                node = next_next
+            # Pass 2: Successively merge the subtrees produced by pass 1 from
+            # right to left with the rightmost one.
+            prev = node.prev
+            while prev is not None:
+                prev_prev = prev.prev
+                node = link(prev, node)
+                prev = prev_prev
+            # Now node can become the new root. Its has no parent nor siblings.
+            node.prev = None
+            node.next = None
+            node.parent = None
+        return node

     def _cut(self, node):
         """Cut a node from its parent."""
-        pass
+        prev = node.prev
+        next = node.next
+        if prev is not None:
+            prev.next = next
+        else:
+            node.parent.left = next
+        node.prev = None
+        if next is not None:
+            next.prev = prev
+            node.next = None
+        node.parent = None


 class BinaryHeap(MinHeap):
@@ -175,3 +286,55 @@ class BinaryHeap(MinHeap):
         super().__init__()
         self._heap = []
         self._count = count()
+
+    def min(self):
+        dict = self._dict
+        if not dict:
+            raise nx.NetworkXError("heap is empty")
+        heap = self._heap
+        pop = heappop
+        # Repeatedly remove stale key-value pairs until a up-to-date one is
+        # met.
+        while True:
+            value, _, key = heap[0]
+            if key in dict and value == dict[key]:
+                break
+            pop(heap)
+        return (key, value)
+
+    def pop(self):
+        dict = self._dict
+        if not dict:
+            raise nx.NetworkXError("heap is empty")
+        heap = self._heap
+        pop = heappop
+        # Repeatedly remove stale key-value pairs until a up-to-date one is
+        # met.
+        while True:
+            value, _, key = heap[0]
+            pop(heap)
+            if key in dict and value == dict[key]:
+                break
+        del dict[key]
+        return (key, value)
+
+    def get(self, key, default=None):
+        return self._dict.get(key, default)
+
+    def insert(self, key, value, allow_increase=False):
+        dict = self._dict
+        if key in dict:
+            old_value = dict[key]
+            if value < old_value or (allow_increase and value > old_value):
+                # Since there is no way to efficiently obtain the location of a
+                # key-value pair in the heap, insert a new pair even if ones
+                # with the same key may already be present. Deem the old ones
+                # as stale and skip them when the minimum pair is queried.
+                dict[key] = value
+                heappush(self._heap, (value, next(self._count), key))
+                return value < old_value
+            return False
+        else:
+            dict[key] = value
+            heappush(self._heap, (value, next(self._count), key))
+            return True
diff --git a/networkx/utils/mapped_queue.py b/networkx/utils/mapped_queue.py
index f98621d8f..afb97404c 100644
--- a/networkx/utils/mapped_queue.py
+++ b/networkx/utils/mapped_queue.py
@@ -1,7 +1,9 @@
 """Priority queue class with updatable priorities.
 """
+
 import heapq
-__all__ = ['MappedQueue']
+
+__all__ = ["MappedQueue"]


 class _HeapElement:
@@ -26,7 +28,8 @@ class _HeapElement:
     as a tiebreaker. This provides compatibility with older systems that
     use tuples to combine priority and elements.
     """
-    __slots__ = ['priority', 'element', '_hash']
+
+    __slots__ = ["priority", "element", "_hash"]

     def __init__(self, priority, element):
         self.priority = priority
@@ -38,13 +41,14 @@ class _HeapElement:
             other_priority = other.priority
         except AttributeError:
             return self.priority < other
+        # assume comparing to another _HeapElement
         if self.priority == other_priority:
             try:
                 return self.element < other.element
             except TypeError as err:
                 raise TypeError(
-                    'Consider using a tuple, with a priority value that can be compared.'
-                    )
+                    "Consider using a tuple, with a priority value that can be compared."
+                )
         return self.priority < other_priority

     def __gt__(self, other):
@@ -52,13 +56,14 @@ class _HeapElement:
             other_priority = other.priority
         except AttributeError:
             return self.priority > other
+        # assume comparing to another _HeapElement
         if self.priority == other_priority:
             try:
                 return self.element > other.element
             except TypeError as err:
                 raise TypeError(
-                    'Consider using a tuple, with a priority value that can be compared.'
-                    )
+                    "Consider using a tuple, with a priority value that can be compared."
+                )
         return self.priority > other_priority

     def __eq__(self, other):
@@ -81,7 +86,7 @@ class _HeapElement:
             yield self.element

     def __repr__(self):
-        return f'_HeapElement({self.priority}, {self.element})'
+        return f"_HeapElement({self.priority}, {self.element})"


 class MappedQueue:
@@ -159,26 +164,78 @@ class MappedQueue:

     def _heapify(self):
         """Restore heap invariant and recalculate map."""
-        pass
+        heapq.heapify(self.heap)
+        self.position = {elt: pos for pos, elt in enumerate(self.heap)}
+        if len(self.heap) != len(self.position):
+            raise AssertionError("Heap contains duplicate elements")

     def __len__(self):
         return len(self.heap)

     def push(self, elt, priority=None):
         """Add an element to the queue."""
-        pass
+        if priority is not None:
+            elt = _HeapElement(priority, elt)
+        # If element is already in queue, do nothing
+        if elt in self.position:
+            return False
+        # Add element to heap and dict
+        pos = len(self.heap)
+        self.heap.append(elt)
+        self.position[elt] = pos
+        # Restore invariant by sifting down
+        self._siftdown(0, pos)
+        return True

     def pop(self):
         """Remove and return the smallest element in the queue."""
-        pass
+        # Remove smallest element
+        elt = self.heap[0]
+        del self.position[elt]
+        # If elt is last item, remove and return
+        if len(self.heap) == 1:
+            self.heap.pop()
+            return elt
+        # Replace root with last element
+        last = self.heap.pop()
+        self.heap[0] = last
+        self.position[last] = 0
+        # Restore invariant by sifting up
+        self._siftup(0)
+        # Return smallest element
+        return elt

     def update(self, elt, new, priority=None):
         """Replace an element in the queue with a new one."""
-        pass
+        if priority is not None:
+            new = _HeapElement(priority, new)
+        # Replace
+        pos = self.position[elt]
+        self.heap[pos] = new
+        del self.position[elt]
+        self.position[new] = pos
+        # Restore invariant by sifting up
+        self._siftup(pos)

     def remove(self, elt):
         """Remove an element from the queue."""
-        pass
+        # Find and remove element
+        try:
+            pos = self.position[elt]
+            del self.position[elt]
+        except KeyError:
+            # Not in queue
+            raise
+        # If elt is last item, remove and return
+        if pos == len(self.heap) - 1:
+            self.heap.pop()
+            return
+        # Replace elt with last element
+        last = self.heap.pop()
+        self.heap[pos] = last
+        self.position[last] = pos
+        # Restore invariant by sifting up
+        self._siftup(pos)

     def _siftup(self, pos):
         """Move smaller child up until hitting a leaf.
@@ -186,7 +243,38 @@ class MappedQueue:
         Built to mimic code for heapq._siftup
         only updating position dict too.
         """
-        pass
+        heap, position = self.heap, self.position
+        end_pos = len(heap)
+        startpos = pos
+        newitem = heap[pos]
+        # Shift up the smaller child until hitting a leaf
+        child_pos = (pos << 1) + 1  # start with leftmost child position
+        while child_pos < end_pos:
+            # Set child_pos to index of smaller child.
+            child = heap[child_pos]
+            right_pos = child_pos + 1
+            if right_pos < end_pos:
+                right = heap[right_pos]
+                if not child < right:
+                    child = right
+                    child_pos = right_pos
+            # Move the smaller child up.
+            heap[pos] = child
+            position[child] = pos
+            pos = child_pos
+            child_pos = (pos << 1) + 1
+        # pos is a leaf position. Put newitem there, and bubble it up
+        # to its final resting place (by sifting its parents down).
+        while pos > 0:
+            parent_pos = (pos - 1) >> 1
+            parent = heap[parent_pos]
+            if not newitem < parent:
+                break
+            heap[pos] = parent
+            position[parent] = pos
+            pos = parent_pos
+        heap[pos] = newitem
+        position[newitem] = pos

     def _siftdown(self, start_pos, pos):
         """Restore invariant. keep swapping with parent until smaller.
@@ -194,4 +282,17 @@ class MappedQueue:
         Built to mimic code for heapq._siftdown
         only updating position dict too.
         """
-        pass
+        heap, position = self.heap, self.position
+        newitem = heap[pos]
+        # Follow the path to the root, moving parents down until finding a place
+        # newitem fits.
+        while pos > start_pos:
+            parent_pos = (pos - 1) >> 1
+            parent = heap[parent_pos]
+            if not newitem < parent:
+                break
+            heap[pos] = parent
+            position[parent] = pos
+            pos = parent_pos
+        heap[pos] = newitem
+        position[newitem] = pos
diff --git a/networkx/utils/misc.py b/networkx/utils/misc.py
index a6dba26aa..096e46ab6 100644
--- a/networkx/utils/misc.py
+++ b/networkx/utils/misc.py
@@ -10,6 +10,7 @@ can be accessed, for example, as
 >>> networkx.utils.arbitrary_element({5, 1, 7})  # doctest: +SKIP
 1
 """
+
 import random
 import sys
 import uuid
@@ -17,17 +18,44 @@ import warnings
 from collections import defaultdict, deque
 from collections.abc import Iterable, Iterator, Sized
 from itertools import chain, tee
+
 import networkx as nx
-__all__ = ['flatten', 'make_list_of_ints', 'dict_to_numpy_array',
-    'arbitrary_element', 'pairwise', 'groups', 'create_random_state',
-    'create_py_random_state', 'PythonRandomInterface',
-    'PythonRandomViaNumpyBits', 'nodes_equal', 'edges_equal',
-    'graphs_equal', '_clear_cache']
+
+__all__ = [
+    "flatten",
+    "make_list_of_ints",
+    "dict_to_numpy_array",
+    "arbitrary_element",
+    "pairwise",
+    "groups",
+    "create_random_state",
+    "create_py_random_state",
+    "PythonRandomInterface",
+    "PythonRandomViaNumpyBits",
+    "nodes_equal",
+    "edges_equal",
+    "graphs_equal",
+    "_clear_cache",
+]
+
+
+# some cookbook stuff
+# used in deciding whether something is a bunch of nodes, edges, etc.
+# see G.add_nodes and others in Graph Class in networkx/base.py


 def flatten(obj, result=None):
     """Return flattened version of (possibly nested) iterable object."""
-    pass
+    if not isinstance(obj, Iterable | Sized) or isinstance(obj, str):
+        return obj
+    if result is None:
+        result = []
+    for item in obj:
+        if not isinstance(item, Iterable | Sized) or isinstance(item, str):
+            result.append(item)
+        else:
+            flatten(item, result)
+    return tuple(result)


 def make_list_of_ints(sequence):
@@ -39,13 +67,42 @@ def make_list_of_ints(sequence):
     If sequence is a list, the non-int values are replaced with ints.
     So, no new list is created
     """
-    pass
+    if not isinstance(sequence, list):
+        result = []
+        for i in sequence:
+            errmsg = f"sequence is not all integers: {i}"
+            try:
+                ii = int(i)
+            except ValueError:
+                raise nx.NetworkXError(errmsg) from None
+            if ii != i:
+                raise nx.NetworkXError(errmsg)
+            result.append(ii)
+        return result
+    # original sequence is a list... in-place conversion to ints
+    for indx, i in enumerate(sequence):
+        errmsg = f"sequence is not all integers: {i}"
+        if isinstance(i, int):
+            continue
+        try:
+            ii = int(i)
+        except ValueError:
+            raise nx.NetworkXError(errmsg) from None
+        if ii != i:
+            raise nx.NetworkXError(errmsg)
+        sequence[indx] = ii
+    return sequence


 def dict_to_numpy_array(d, mapping=None):
     """Convert a dictionary of dictionaries to a numpy array
     with optional mapping."""
-    pass
+    try:
+        return _dict_to_numpy_array2(d, mapping)
+    except (AttributeError, TypeError):
+        # AttributeError is when no mapping was provided and v.keys() fails.
+        # TypeError is when a mapping was provided and d[k1][k2] fails.
+        return _dict_to_numpy_array1(d, mapping)


 def _dict_to_numpy_array2(d, mapping=None):
@@ -53,12 +110,37 @@ def _dict_to_numpy_array2(d, mapping=None):
     with optional mapping.

     """
-    pass
+    import numpy as np
+
+    if mapping is None:
+        s = set(d.keys())
+        for k, v in d.items():
+            s.update(v.keys())
+        mapping = dict(zip(s, range(len(s))))
+    n = len(mapping)
+    a = np.zeros((n, n))
+    for k1, i in mapping.items():
+        for k2, j in mapping.items():
+            try:
+                a[i, j] = d[k1][k2]
+            except KeyError:
+                pass
+    return a


 def _dict_to_numpy_array1(d, mapping=None):
     """Convert a dictionary of numbers to a 1d numpy array with optional mapping."""
-    pass
+    import numpy as np
+
+    if mapping is None:
+        s = set(d.keys())
+        mapping = dict(zip(s, range(len(s))))
+    n = len(mapping)
+    a = np.zeros(n)
+    for k1, i in mapping.items():
+        i = mapping[k1]
+        a[i] = d[k1]
+    return a


 def arbitrary_element(iterable):
@@ -124,12 +206,20 @@ def arbitrary_element(iterable):
         1

     """
-    pass
+    if isinstance(iterable, Iterator):
+        raise ValueError("cannot return an arbitrary item from an iterator")
+    # Another possible implementation is ``for x in iterable: return x``.
+    return next(iter(iterable))


+# Recipe from the itertools documentation.
 def pairwise(iterable, cyclic=False):
-    """s -> (s0, s1), (s1, s2), (s2, s3), ..."""
-    pass
+    "s -> (s0, s1), (s1, s2), (s2, s3), ..."
+    a, b = tee(iterable)
+    first = next(b, None)
+    if cyclic is True:
+        return zip(a, chain(b, (first,)))
+    return zip(a, b)


 def groups(many_to_one):
@@ -148,7 +238,10 @@ def groups(many_to_one):
     >>> groups(many_to_one)  # doctest: +SKIP
     {1: {'a', 'b'}, 2: {'c'}, 3: {'e', 'd'}}
     """
-    pass
+    one_to_many = defaultdict(set)
+    for v, k in many_to_one.items():
+        one_to_many[k].add(v)
+    return dict(one_to_many)


 def create_random_state(random_state=None):
@@ -164,7 +257,21 @@ def create_random_state(random_state=None):
         if None or numpy.random, return the global random number generator used
         by numpy.random.
     """
-    pass
+    import numpy as np
+
+    if random_state is None or random_state is np.random:
+        return np.random.mtrand._rand
+    if isinstance(random_state, np.random.RandomState):
+        return random_state
+    if isinstance(random_state, int):
+        return np.random.RandomState(random_state)
+    if isinstance(random_state, np.random.Generator):
+        return random_state
+    msg = (
+        f"{random_state} cannot be used to create a numpy.random.RandomState or\n"
+        "numpy.random.Generator instance"
+    )
+    raise ValueError(msg)


 class PythonRandomViaNumpyBits(random.Random):
@@ -189,27 +296,42 @@ class PythonRandomViaNumpyBits(random.Random):
         try:
             import numpy as np
         except ImportError:
-            msg = 'numpy not found, only random.random available.'
+            msg = "numpy not found, only random.random available."
             warnings.warn(msg, ImportWarning)
+
         if rng is None:
             self._rng = np.random.mtrand._rand
         else:
             self._rng = rng
+
+        # Not necessary, given our overriding of gauss() below, but it's
+        # in the superclass and nominally public, so initialize it here.
         self.gauss_next = None

     def random(self):
         """Get the next random number in the range 0.0 <= X < 1.0."""
-        pass
+        return self._rng.random()

     def getrandbits(self, k):
         """getrandbits(k) -> x.  Generates an int with k random bits."""
-        pass
+        if k < 0:
+            raise ValueError("number of bits must be non-negative")
+        numbytes = (k + 7) // 8  # bits / 8 and rounded up
+        x = int.from_bytes(self._rng.bytes(numbytes), "big")
+        return x >> (numbytes * 8 - k)  # trim excess bits
+
+    def getstate(self):
+        return self._rng.__getstate__()
+
+    def setstate(self, state):
+        self._rng.__setstate__(state)

     def seed(self, *args, **kwds):
-        """Do nothing override method."""
-        pass
+        "Do nothing override method."
+        raise NotImplementedError("seed() not implemented in PythonRandomViaNumpyBits")


+##################################################################
 class PythonRandomInterface:
     """PythonRandomInterface is included for backward compatibility
     New code should use PythonRandomViaNumpyBits instead.
@@ -219,13 +341,86 @@ class PythonRandomInterface:
         try:
             import numpy as np
         except ImportError:
-            msg = 'numpy not found, only random.random available.'
+            msg = "numpy not found, only random.random available."
             warnings.warn(msg, ImportWarning)
+
         if rng is None:
             self._rng = np.random.mtrand._rand
         else:
             self._rng = rng

+    def random(self):
+        return self._rng.random()
+
+    def uniform(self, a, b):
+        return a + (b - a) * self._rng.random()
+
+    def randrange(self, a, b=None):
+        import numpy as np
+
+        if b is None:
+            a, b = 0, a
+        if b > 9223372036854775807:  # from np.iinfo(np.int64).max
+            tmp_rng = PythonRandomViaNumpyBits(self._rng)
+            return tmp_rng.randrange(a, b)
+
+        if isinstance(self._rng, np.random.Generator):
+            return self._rng.integers(a, b)
+        return self._rng.randint(a, b)
+
+    # NOTE: the numpy implementations of `choice` don't support strings, so
+    # this cannot be replaced with self._rng.choice
+    def choice(self, seq):
+        import numpy as np
+
+        if isinstance(self._rng, np.random.Generator):
+            idx = self._rng.integers(0, len(seq))
+        else:
+            idx = self._rng.randint(0, len(seq))
+        return seq[idx]
+
+    def gauss(self, mu, sigma):
+        return self._rng.normal(mu, sigma)
+
+    def shuffle(self, seq):
+        return self._rng.shuffle(seq)
+
+    #    Some methods don't match API for numpy RandomState.
+    #    Commented out versions are not used by NetworkX
+
+    def sample(self, seq, k):
+        return self._rng.choice(list(seq), size=(k,), replace=False)
+
+    def randint(self, a, b):
+        import numpy as np
+
+        if b > 9223372036854775807:  # from np.iinfo(np.int64).max
+            tmp_rng = PythonRandomViaNumpyBits(self._rng)
+            return tmp_rng.randint(a, b)
+
+        if isinstance(self._rng, np.random.Generator):
+            return self._rng.integers(a, b + 1)
+        return self._rng.randint(a, b + 1)
+
+    #    exponential as expovariate with 1/argument,
+    def expovariate(self, scale):
+        return self._rng.exponential(1 / scale)
+
+    #    pareto as paretovariate with 1/argument,
+    def paretovariate(self, shape):
+        return self._rng.pareto(shape)
+
+
+#    weibull as weibullvariate multiplied by beta,
+#    def weibullvariate(self, alpha, beta):
+#        return self._rng.weibull(alpha) * beta
+#
+#    def triangular(self, low, high, mode):
+#        return self._rng.triangular(low, mode, high)
+#
+#    def choices(self, seq, weights=None, cum_weights=None, k=1):
+#        return self._rng.choice(seq
+

 def create_py_random_state(random_state=None):
     """Returns a random.Random instance depending on input.
@@ -264,7 +459,33 @@ def create_py_random_state(random_state=None):
       wrapper as well. We use it only used if passed a (non-default) `np.RandomState`
       instance pre-initialized from a seed. Otherwise the newer wrapper is used.
     """
-    pass
+    if random_state is None or random_state is random:
+        return random._inst
+    if isinstance(random_state, random.Random):
+        return random_state
+    if isinstance(random_state, int):
+        return random.Random(random_state)
+
+    try:
+        import numpy as np
+    except ImportError:
+        pass
+    else:
+        if isinstance(random_state, PythonRandomInterface | PythonRandomViaNumpyBits):
+            return random_state
+        if isinstance(random_state, np.random.Generator):
+            return PythonRandomViaNumpyBits(random_state)
+        if random_state is np.random:
+            return PythonRandomViaNumpyBits(np.random.mtrand._rand)
+
+        if isinstance(random_state, np.random.RandomState):
+            if random_state is np.random.mtrand._rand:
+                return PythonRandomViaNumpyBits(random_state)
+            # Only need older interface if specially constructed RandomState used
+            return PythonRandomInterface(random_state)
+
+    msg = f"{random_state} cannot be used to generate a random.Random instance"
+    raise ValueError(msg)


 def nodes_equal(nodes1, nodes2):
@@ -283,7 +504,15 @@ def nodes_equal(nodes1, nodes2):
     bool
         True if nodes are equal, False otherwise.
     """
-    pass
+    nlist1 = list(nodes1)
+    nlist2 = list(nodes2)
+    try:
+        d1 = dict(nlist1)
+        d2 = dict(nlist2)
+    except (ValueError, TypeError):
+        d1 = dict.fromkeys(nlist1)
+        d2 = dict.fromkeys(nlist2)
+    return d1 == d2


 def edges_equal(edges1, edges2):
@@ -305,7 +534,40 @@ def edges_equal(edges1, edges2):
     bool
         True if edges are equal, False otherwise.
     """
-    pass
+    from collections import defaultdict
+
+    d1 = defaultdict(dict)
+    d2 = defaultdict(dict)
+    c1 = 0
+    for c1, e in enumerate(edges1):
+        u, v = e[0], e[1]
+        data = [e[2:]]
+        if v in d1[u]:
+            data = d1[u][v] + data
+        d1[u][v] = data
+        d1[v][u] = data
+    c2 = 0
+    for c2, e in enumerate(edges2):
+        u, v = e[0], e[1]
+        data = [e[2:]]
+        if v in d2[u]:
+            data = d2[u][v] + data
+        d2[u][v] = data
+        d2[v][u] = data
+    if c1 != c2:
+        return False
+    # can check one direction because lengths are the same.
+    for n, nbrdict in d1.items():
+        for nbr, datalist in nbrdict.items():
+            if n not in d2:
+                return False
+            if nbr not in d2[n]:
+                return False
+            d2datalist = d2[n][nbr]
+            for data in datalist:
+                if datalist.count(data) != d2datalist.count(data):
+                    return False
+    return True


 def graphs_equal(graph1, graph2):
@@ -323,7 +585,11 @@ def graphs_equal(graph1, graph2):
     bool
         True if graphs are equal, False otherwise.
     """
-    pass
+    return (
+        graph1.adj == graph2.adj
+        and graph1.nodes == graph2.nodes
+        and graph1.graph == graph2.graph
+    )


 def _clear_cache(G):
@@ -331,4 +597,5 @@ def _clear_cache(G):

     Caching is controlled via ``nx.config.cache_converted_graphs`` configuration.
     """
-    pass
+    if cache := getattr(G, "__networkx_cache__", None):
+        cache.clear()
diff --git a/networkx/utils/random_sequence.py b/networkx/utils/random_sequence.py
index 403d9033f..20a7b5e0a 100644
--- a/networkx/utils/random_sequence.py
+++ b/networkx/utils/random_sequence.py
@@ -2,10 +2,23 @@
 Utilities for generating random numbers, random sequences, and
 random selections.
 """
+
 import networkx as nx
 from networkx.utils import py_random_state
-__all__ = ['powerlaw_sequence', 'zipf_rv', 'cumulative_distribution',
-    'discrete_sequence', 'random_weighted_sample', 'weighted_choice']
+
+__all__ = [
+    "powerlaw_sequence",
+    "zipf_rv",
+    "cumulative_distribution",
+    "discrete_sequence",
+    "random_weighted_sample",
+    "weighted_choice",
+]
+
+
+# The same helpers for choosing random sequences from distributions
+# uses Python's random module
+# https://docs.python.org/3/library/random.html


 @py_random_state(2)
@@ -13,20 +26,20 @@ def powerlaw_sequence(n, exponent=2.0, seed=None):
     """
     Return sample sequence of length n from a power law distribution.
     """
-    pass
+    return [seed.paretovariate(exponent - 1) for i in range(n)]


 @py_random_state(2)
 def zipf_rv(alpha, xmin=1, seed=None):
-    """Returns a random value chosen from the Zipf distribution.
+    r"""Returns a random value chosen from the Zipf distribution.

     The return value is an integer drawn from the probability distribution

     .. math::

-        p(x)=\\frac{x^{-\\alpha}}{\\zeta(\\alpha, x_{\\min})},
+        p(x)=\frac{x^{-\alpha}}{\zeta(\alpha, x_{\min})},

-    where $\\zeta(\\alpha, x_{\\min})$ is the Hurwitz zeta function.
+    where $\zeta(\alpha, x_{\min})$ is the Hurwitz zeta function.

     Parameters
     ----------
@@ -65,12 +78,30 @@ def zipf_rv(alpha, xmin=1, seed=None):
     .. [1] Luc Devroye, Non-Uniform Random Variate Generation,
        Springer-Verlag, New York, 1986.
     """
-    pass
+    if xmin < 1:
+        raise ValueError("xmin < 1")
+    if alpha <= 1:
+        raise ValueError("a <= 1.0")
+    a1 = alpha - 1.0
+    b = 2**a1
+    while True:
+        u = 1.0 - seed.random()  # u in (0,1]
+        v = seed.random()  # v in [0,1)
+        x = int(xmin * u ** -(1.0 / a1))
+        t = (1.0 + (1.0 / x)) ** a1
+        if v * x * (t - 1.0) / (b - 1.0) <= t / b:
+            break
+    return x


 def cumulative_distribution(distribution):
     """Returns normalized cumulative distribution from discrete distribution."""
-    pass
+
+    cdf = [0.0]
+    psum = sum(distribution)
+    for i in range(len(distribution)):
+        cdf.append(cdf[i] + distribution[i] / psum)
+    return cdf


 @py_random_state(3)
@@ -86,7 +117,23 @@ def discrete_sequence(n, distribution=None, cdistribution=None, seed=None):
     cdistribution = normalized discrete cumulative distribution

     """
-    pass
+    import bisect
+
+    if cdistribution is not None:
+        cdf = cdistribution
+    elif distribution is not None:
+        cdf = cumulative_distribution(distribution)
+    else:
+        raise nx.NetworkXError(
+            "discrete_sequence: distribution or cdistribution missing"
+        )
+
+    # get a uniform random number
+    inputseq = [seed.random() for i in range(n)]
+
+    # choose from CDF
+    seq = [bisect.bisect_left(cdf, s) - 1 for s in inputseq]
+    return seq


 @py_random_state(2)
@@ -95,7 +142,12 @@ def random_weighted_sample(mapping, k, seed=None):

     The input is a dictionary of items with weights as values.
     """
-    pass
+    if k > len(mapping):
+        raise ValueError("sample larger than population")
+    sample = set()
+    while len(sample) < k:
+        sample.add(weighted_choice(mapping, seed))
+    return list(sample)


 @py_random_state(1)
@@ -104,4 +156,9 @@ def weighted_choice(mapping, seed=None):

     The input is a dictionary of items with weights as values.
     """
-    pass
+    # use roulette method
+    rnd = seed.random() * sum(mapping.values())
+    for k, w in mapping.items():
+        rnd -= w
+        if rnd < 0:
+            return k
diff --git a/networkx/utils/rcm.py b/networkx/utils/rcm.py
index fa7b20746..f9e1bfee6 100644
--- a/networkx/utils/rcm.py
+++ b/networkx/utils/rcm.py
@@ -3,9 +3,12 @@ Cuthill-McKee ordering of graph nodes to produce sparse matrices
 """
 from collections import deque
 from operator import itemgetter
+
 import networkx as nx
+
 from ..utils import arbitrary_element
-__all__ = ['cuthill_mckee_ordering', 'reverse_cuthill_mckee_ordering']
+
+__all__ = ["cuthill_mckee_ordering", "reverse_cuthill_mckee_ordering"]


 def cuthill_mckee_ordering(G, heuristic=None):
@@ -61,7 +64,8 @@ def cuthill_mckee_ordering(G, heuristic=None):
     .. [2]  Steven S. Skiena. 1997. The Algorithm Design Manual.
        Springer-Verlag New York, Inc., New York, NY, USA.
     """
-    pass
+    for c in nx.connected_components(G):
+        yield from connected_cuthill_mckee_ordering(G.subgraph(c), heuristic)


 def reverse_cuthill_mckee_ordering(G, heuristic=None):
@@ -117,4 +121,38 @@ def reverse_cuthill_mckee_ordering(G, heuristic=None):
     .. [2]  Steven S. Skiena. 1997. The Algorithm Design Manual.
        Springer-Verlag New York, Inc., New York, NY, USA.
     """
-    pass
+    return reversed(list(cuthill_mckee_ordering(G, heuristic=heuristic)))
+
+
+def connected_cuthill_mckee_ordering(G, heuristic=None):
+    # the cuthill mckee algorithm for connected graphs
+    if heuristic is None:
+        start = pseudo_peripheral_node(G)
+    else:
+        start = heuristic(G)
+    visited = {start}
+    queue = deque([start])
+    while queue:
+        parent = queue.popleft()
+        yield parent
+        nd = sorted(G.degree(set(G[parent]) - visited), key=itemgetter(1))
+        children = [n for n, d in nd]
+        visited.update(children)
+        queue.extend(children)
+
+
+def pseudo_peripheral_node(G):
+    # helper for cuthill-mckee to find a node in a "pseudo peripheral pair"
+    # to use as good starting node
+    u = arbitrary_element(G)
+    lp = 0
+    v = u
+    while True:
+        spl = dict(nx.shortest_path_length(G, v))
+        l = max(spl.values())
+        if l <= lp:
+            break
+        lp = l
+        farthest = (n for n, dist in spl.items() if dist == l)
+        v, deg = min(G.degree(farthest), key=itemgetter(1))
+    return v
diff --git a/networkx/utils/union_find.py b/networkx/utils/union_find.py
index 4d9d7ad5f..2a07129f5 100644
--- a/networkx/utils/union_find.py
+++ b/networkx/utils/union_find.py
@@ -1,6 +1,7 @@
 """
 Union-find data structure.
 """
+
 from networkx.utils import groups


@@ -44,16 +45,22 @@ class UnionFind:

     def __getitem__(self, object):
         """Find and return the name of the set containing the object."""
+
+        # check for previously unknown object
         if object not in self.parents:
             self.parents[object] = object
             self.weights[object] = 1
             return object
+
+        # find path of objects leading to the root
         path = []
         root = self.parents[object]
         while root != object:
             path.append(object)
             object = root
             root = self.parents[object]
+
+        # compress the path and return
         for ancestor in path:
             self.parents[ancestor] = root
         return root
@@ -75,8 +82,25 @@ class UnionFind:
             [['x', 'y'], ['z']]

         """
-        pass
+        # Ensure fully pruned paths
+        for x in self.parents:
+            _ = self[x]  # Evaluated for side-effect only
+
+        yield from groups(self.parents).values()

     def union(self, *objects):
         """Find the sets containing the objects and merge them all."""
-        pass
+        # Find the heaviest root according to its weight.
+        roots = iter(
+            sorted(
+                {self[x] for x in objects}, key=lambda r: self.weights[r], reverse=True
+            )
+        )
+        try:
+            root = next(roots)
+        except StopIteration:
+            return
+
+        for r in roots:
+            self.weights[root] += self.weights[r]
+            self.parents[r] = root