Skip to content

back to Reference (Gold) summary

Reference (Gold): minitorch

Pytest Summary for test tests

status count
failed 211
xfailed 4
passed 15
total 230
collected 230

Failed pytests:

test_autodiff.py::test_chain_rule1

test_autodiff.py::test_chain_rule1
@pytest.mark.task1_3
    def test_chain_rule1() -> None:
        x = minitorch.Scalar(0.0)
        constant = minitorch.Scalar(
            0.0, ScalarHistory(Function1, ctx=Context(), inputs=[x, x])
        )
>       back = constant.chain_rule(d_output=5)

tests/test_autodiff.py:48: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Scalar(0.000000), d_output = 5

    def chain_rule(self, d_output: Any) -> Iterable[Tuple[Variable, Any]]:
        h = self.history
        assert h is not None
        assert h.last_fn is not None
        assert h.ctx is not None

        # TODO: Implement for Task 1.3.
>       raise NotImplementedError('Need to implement for Task 1.3')
E       NotImplementedError: Need to implement for Task 1.3

minitorch/scalar.py:177: NotImplementedError

test_autodiff.py::test_chain_rule2

test_autodiff.py::test_chain_rule2
@pytest.mark.task1_3
    def test_chain_rule2() -> None:
        var = minitorch.Scalar(0.0, ScalarHistory())
        constant = minitorch.Scalar(
            0.0, ScalarHistory(Function1, ctx=Context(), inputs=[var, var])
        )
>       back = constant.chain_rule(d_output=5)

tests/test_autodiff.py:58: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Scalar(0.000000), d_output = 5

    def chain_rule(self, d_output: Any) -> Iterable[Tuple[Variable, Any]]:
        h = self.history
        assert h is not None
        assert h.last_fn is not None
        assert h.ctx is not None

        # TODO: Implement for Task 1.3.
>       raise NotImplementedError('Need to implement for Task 1.3')
E       NotImplementedError: Need to implement for Task 1.3

minitorch/scalar.py:177: NotImplementedError

test_autodiff.py::test_chain_rule3

test_autodiff.py::test_chain_rule3
@pytest.mark.task1_3
    def test_chain_rule3() -> None:
        "Check that constrants are ignored and variables get derivatives."
        constant = 10
        var = minitorch.Scalar(5)

        y = Function2.apply(constant, var)

>       back = y.chain_rule(d_output=5)

tests/test_autodiff.py:73: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Scalar(60.000000), d_output = 5

    def chain_rule(self, d_output: Any) -> Iterable[Tuple[Variable, Any]]:
        h = self.history
        assert h is not None
        assert h.last_fn is not None
        assert h.ctx is not None

        # TODO: Implement for Task 1.3.
>       raise NotImplementedError('Need to implement for Task 1.3')
E       NotImplementedError: Need to implement for Task 1.3

minitorch/scalar.py:177: NotImplementedError

test_autodiff.py::test_chain_rule4

test_autodiff.py::test_chain_rule4
@pytest.mark.task1_3
    def test_chain_rule4() -> None:
        var1 = minitorch.Scalar(5)
        var2 = minitorch.Scalar(10)

        y = Function2.apply(var1, var2)

>       back = y.chain_rule(d_output=5)

tests/test_autodiff.py:88: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Scalar(55.000000), d_output = 5

    def chain_rule(self, d_output: Any) -> Iterable[Tuple[Variable, Any]]:
        h = self.history
        assert h is not None
        assert h.last_fn is not None
        assert h.ctx is not None

        # TODO: Implement for Task 1.3.
>       raise NotImplementedError('Need to implement for Task 1.3')
E       NotImplementedError: Need to implement for Task 1.3

minitorch/scalar.py:177: NotImplementedError

test_autodiff.py::test_backprop1

test_autodiff.py::test_backprop1
@pytest.mark.task1_4
    def test_backprop1() -> None:
        # Example 1: F1(0, v)
        var = minitorch.Scalar(0)
        var2 = Function1.apply(0, var)
>       var2.backward(d_output=5)

tests/test_autodiff.py:109: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
minitorch/scalar.py:189: in backward
    backpropagate(self, d_output)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

variable = Scalar(10.000000), deriv = 5

    def backpropagate(variable: Variable, deriv: Any) -> None:
        """
        Runs backpropagation on the computation graph in order to
        compute derivatives for the leave nodes.

        Args:
            variable: The right-most variable
            deriv  : Its derivative that we want to propagate backward to the leaves.

        No return. Should write to its results to the derivative values of each leaf through `accumulate_derivative`.
        """
        # TODO: Implement for Task 1.4.
>       raise NotImplementedError('Need to implement for Task 1.4')
E       NotImplementedError: Need to implement for Task 1.4

minitorch/autodiff.py:80: NotImplementedError

test_autodiff.py::test_backprop2

test_autodiff.py::test_backprop2
@pytest.mark.task1_4
    def test_backprop2() -> None:
        # Example 2: F1(0, 0)
        var = minitorch.Scalar(0)
        var2 = Function1.apply(0, var)
        var3 = Function1.apply(0, var2)
>       var3.backward(d_output=5)

tests/test_autodiff.py:119: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
minitorch/scalar.py:189: in backward
    backpropagate(self, d_output)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

variable = Scalar(20.000000), deriv = 5

    def backpropagate(variable: Variable, deriv: Any) -> None:
        """
        Runs backpropagation on the computation graph in order to
        compute derivatives for the leave nodes.

        Args:
            variable: The right-most variable
            deriv  : Its derivative that we want to propagate backward to the leaves.

        No return. Should write to its results to the derivative values of each leaf through `accumulate_derivative`.
        """
        # TODO: Implement for Task 1.4.
>       raise NotImplementedError('Need to implement for Task 1.4')
E       NotImplementedError: Need to implement for Task 1.4

minitorch/autodiff.py:80: NotImplementedError

test_autodiff.py::test_backprop3

test_autodiff.py::test_backprop3
@pytest.mark.task1_4
    def test_backprop3() -> None:
        # Example 3: F1(F1(0, v1), F1(0, v1))
        var1 = minitorch.Scalar(0)
        var2 = Function1.apply(0, var1)
        var3 = Function1.apply(0, var1)
        var4 = Function1.apply(var2, var3)
>       var4.backward(d_output=5)

tests/test_autodiff.py:130: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
minitorch/scalar.py:189: in backward
    backpropagate(self, d_output)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

variable = Scalar(30.000000), deriv = 5

    def backpropagate(variable: Variable, deriv: Any) -> None:
        """
        Runs backpropagation on the computation graph in order to
        compute derivatives for the leave nodes.

        Args:
            variable: The right-most variable
            deriv  : Its derivative that we want to propagate backward to the leaves.

        No return. Should write to its results to the derivative values of each leaf through `accumulate_derivative`.
        """
        # TODO: Implement for Task 1.4.
>       raise NotImplementedError('Need to implement for Task 1.4')
E       NotImplementedError: Need to implement for Task 1.4

minitorch/autodiff.py:80: NotImplementedError

test_autodiff.py::test_backprop4

test_autodiff.py::test_backprop4
@pytest.mark.task1_4
    def test_backprop4() -> None:
        # Example 4: F1(F1(0, v1), F1(0, v1))
        var0 = minitorch.Scalar(0)
        var1 = Function1.apply(0, var0)
        var2 = Function1.apply(0, var1)
        var3 = Function1.apply(0, var1)
        var4 = Function1.apply(var2, var3)
>       var4.backward(d_output=5)

tests/test_autodiff.py:142: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
minitorch/scalar.py:189: in backward
    backpropagate(self, d_output)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

variable = Scalar(50.000000), deriv = 5

    def backpropagate(variable: Variable, deriv: Any) -> None:
        """
        Runs backpropagation on the computation graph in order to
        compute derivatives for the leave nodes.

        Args:
            variable: The right-most variable
            deriv  : Its derivative that we want to propagate backward to the leaves.

        No return. Should write to its results to the derivative values of each leaf through `accumulate_derivative`.
        """
        # TODO: Implement for Task 1.4.
>       raise NotImplementedError('Need to implement for Task 1.4')
E       NotImplementedError: Need to implement for Task 1.4

minitorch/autodiff.py:80: NotImplementedError

test_conv.py::test_conv1d_simple

test_conv.py::test_conv1d_simple
@pytest.mark.task4_1
    def test_conv1d_simple() -> None:
>       t = minitorch.tensor([0, 1, 2, 3]).view(1, 1, 4)

tests/test_conv.py:12: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
minitorch/tensor_functions.py:366: in tensor
    return _tensor(cur, tuple(shape2), backend=backend, requires_grad=requires_grad)
minitorch/tensor_functions.py:332: in _tensor
    tensor = minitorch.Tensor.make(ls, shape, backend=backend)
minitorch/tensor.py:264: in make
    return Tensor(TensorData(storage, shape, strides), backend=backend)
minitorch/tensor_data.py:147: in __init__
    self.size = int(prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (4,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_conv.py::test_conv1d

test_conv.py::test_conv1d
@pytest.mark.task4_1
>   @given(tensors(shape=(1, 1, 6)), tensors(shape=(1, 1, 4)))

tests/test_conv.py:24: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1, 1, 4)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_conv.py::test_conv1d_channel

test_conv.py::test_conv1d_channel
@pytest.mark.task4_1
>   @given(tensors(shape=(2, 2, 6)), tensors(shape=(3, 2, 2)))

tests/test_conv.py:31: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (3, 2, 2)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_conv.py::test_conv

test_conv.py::test_conv
@pytest.mark.task4_2
>   @given(tensors(shape=(1, 1, 6, 6)), tensors(shape=(1, 1, 2, 4)))

tests/test_conv.py:38: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1, 1, 2, 4)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_conv.py::test_conv_batch

test_conv.py::test_conv_batch
@pytest.mark.task4_2
>   @given(tensors(shape=(2, 1, 6, 6)), tensors(shape=(1, 1, 2, 4)))

tests/test_conv.py:44: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1, 1, 2, 4)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_conv.py::test_conv_channel

test_conv.py::test_conv_channel
@pytest.mark.task4_2
>   @given(tensors(shape=(2, 2, 6, 6)), tensors(shape=(3, 2, 2, 4)))

tests/test_conv.py:51: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (3, 2, 2, 4)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_conv.py::test_conv2

test_conv.py::test_conv2
@pytest.mark.task4_2
    def test_conv2() -> None:
>       t = minitorch.tensor([[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3]]).view(
            1, 1, 4, 4
        )

tests/test_conv.py:59: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
minitorch/tensor_functions.py:366: in tensor
    return _tensor(cur, tuple(shape2), backend=backend, requires_grad=requires_grad)
minitorch/tensor_functions.py:332: in _tensor
    tensor = minitorch.Tensor.make(ls, shape, backend=backend)
minitorch/tensor.py:264: in make
    return Tensor(TensorData(storage, shape, strides), backend=backend)
minitorch/tensor_data.py:147: in __init__
    self.size = int(prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (4, 4)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_module.py::test_stacked_demo

test_module.py::test_stacked_demo
@pytest.mark.task0_4
    def test_stacked_demo() -> None:
        "Check that each of the properties match"
        mod = ModuleA1()
>       np = dict(mod.named_parameters())

tests/test_module.py:49: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = ModuleA1(
  (a): ModuleA2()
  (b): ModuleA3(
    (c): ModuleA4()
  )
)

    def named_parameters(self) -> Sequence[Tuple[str, Parameter]]:
        """
        Collect all the parameters of this module and its descendents.


        Returns:
            The name and `Parameter` of each ancestor parameter.
        """
        # TODO: Implement for Task 0.4.
>       raise NotImplementedError('Need to implement for Task 0.4')
E       NotImplementedError: Need to implement for Task 0.4

minitorch/module.py:51: NotImplementedError

test_module.py::test_module

test_module.py::test_module
@pytest.mark.task0_4
>   @given(med_ints, med_ints)

tests/test_module.py:96: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_module.py:100: in test_module
    module.eval()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Module2(
  (module_c): Module3()
)

    def eval(self) -> None:
        "Set the mode of this module and all descendent modules to `eval`."
        # TODO: Implement for Task 0.4.
>       raise NotImplementedError('Need to implement for Task 0.4')
E       NotImplementedError: Need to implement for Task 0.4
E       Falsifying example: test_module(
E           size_b=1, size_a=1,
E       )

minitorch/module.py:40: NotImplementedError

test_module.py::test_stacked_module

test_module.py::test_stacked_module
@pytest.mark.task0_4
>   @given(med_ints, med_ints, small_floats)

tests/test_module.py:117: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_module.py:121: in test_stacked_module
    module.eval()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Module1(
  (module_a): Module2(
    (module_c): Module3()
  )
  (module_b): Module2(
    (module_c): Module3()
  )
)

    def eval(self) -> None:
        "Set the mode of this module and all descendent modules to `eval`."
        # TODO: Implement for Task 0.4.
>       raise NotImplementedError('Need to implement for Task 0.4')
E       NotImplementedError: Need to implement for Task 0.4
E       Falsifying example: test_stacked_module(
E           val=0.0, size_b=1, size_a=1,
E       )

minitorch/module.py:40: NotImplementedError

test_module.py::test_module_fail_forward

test_module.py::test_module_fail_forward
@pytest.mark.task0_4
    @pytest.mark.xfail
    def test_module_fail_forward() -> None:
        mod = minitorch.Module()
>       mod()

tests/test_module.py:154: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Module(), args = (), kwargs = {}

    def __call__(self, *args: Any, **kwargs: Any) -> Any:
>       return self.forward(*args, **kwargs)
E       TypeError: 'NoneType' object is not callable

minitorch/module.py:90: TypeError

test_modules.py::test_linear

test_modules.py::test_linear
@given(lists(scalars(), max_size=10), integers(min_value=5, max_value=20))
>   def test_linear(inputs: List[Scalar], out_size: int) -> None:

tests/test_modules.py:61: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_modules.py:65: in test_linear
    lin2.forward(mid)
tests/test_modules.py:56: in forward
    y[j] = y[j] + x * self.weights[i][j].value
minitorch/scalar.py:86: in __mul__
    return Mul.apply(self, b)
minitorch/scalar_functions.py:63: in apply
    c = cls._forward(ctx, *raw_vals)
minitorch/scalar_functions.py:45: in _forward
    return cls.forward(ctx, *inps)  # type: ignore
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ctx = Context(no_grad=False, saved_values=()), a = 0.6888437030500962
b = -0.19013172509917142

    @staticmethod
    def forward(ctx: Context, a: float, b: float) -> float:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_linear(
E           out_size=5, inputs=[],
E       )

minitorch/scalar_functions.py:107: NotImplementedError

test_modules.py::test_nn_size

test_modules.py::test_nn_size
def test_nn_size() -> None:
        model = Network2()
>       assert len(model.parameters()) == (
            len(model.layer1.parameters()) + len(model.layer2.parameters())
        )

tests/test_modules.py:85: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Network2(
  (layer1): ScalarLinear()
  (layer2): ScalarLinear()
)

    def parameters(self) -> Sequence[Parameter]:
        "Enumerate over all the parameters of this module and its descendents."
        # TODO: Implement for Task 0.4.
>       raise NotImplementedError('Need to implement for Task 0.4')
E       NotImplementedError: Need to implement for Task 0.4

minitorch/module.py:56: NotImplementedError

test_nn.py::test_avg

test_nn.py::test_avg
@pytest.mark.task4_3
>   @given(tensors(shape=(1, 1, 4, 4)))

tests/test_nn.py:12: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1, 1, 4, 4)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_nn.py::test_max

test_nn.py::test_max
@pytest.mark.task4_4
>   @given(tensors(shape=(2, 3, 4)))

tests/test_nn.py:32: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (2, 3, 4)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_nn.py::test_max_pool

test_nn.py::test_max_pool
@pytest.mark.task4_4
>   @given(tensors(shape=(1, 1, 4, 4)))

tests/test_nn.py:39: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1, 1, 4, 4)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_nn.py::test_drop

test_nn.py::test_drop
@pytest.mark.task4_4
>   @given(tensors())

tests/test_nn.py:60: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_nn.py::test_softmax

test_nn.py::test_softmax
@pytest.mark.task4_4
>   @given(tensors(shape=(1, 1, 4, 4)))

tests/test_nn.py:73: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1, 1, 4, 4)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_nn.py::test_log_softmax

test_nn.py::test_log_softmax
@pytest.mark.task4_4
>   @given(tensors(shape=(1, 1, 4, 4)))

tests/test_nn.py:87: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1, 1, 4, 4)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_operators.py::test_same_as_python

test_operators.py::test_same_as_python
@pytest.mark.task0_1
>   @given(small_floats, small_floats)

tests/test_operators.py:34: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_operators.py:37: in test_same_as_python
    assert_close(mul(x, y), x * y)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

x = 0.0, y = 0.0

    def mul(x: float, y: float) -> float:
        "$f(x, y) = x * y$"
        # TODO: Implement for Task 0.1.
>       raise NotImplementedError('Need to implement for Task 0.1')
E       NotImplementedError: Need to implement for Task 0.1
E       Falsifying example: test_same_as_python(
E           y=0.0, x=0.0,
E       )

minitorch/operators.py:16: NotImplementedError

test_operators.py::test_relu

test_operators.py::test_relu
@pytest.mark.task0_1
>   @given(small_floats)

tests/test_operators.py:46: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_operators.py:49: in test_relu
    assert relu(a) == a
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

x = 1.0

    def relu(x: float) -> float:
        """
        $f(x) =$ x if x is greater than 0, else 0

        (See https://en.wikipedia.org/wiki/Rectifier_(neural_networks) .)
        """
        # TODO: Implement for Task 0.1.
>       raise NotImplementedError('Need to implement for Task 0.1')
E       NotImplementedError: Need to implement for Task 0.1
E       Falsifying example: test_relu(
E           a=1.0,
E       )

minitorch/operators.py:84: NotImplementedError

test_operators.py::test_relu_back

test_operators.py::test_relu_back
@pytest.mark.task0_1
>   @given(small_floats, small_floats)

tests/test_operators.py:55: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_operators.py:58: in test_relu_back
    assert relu_back(a, b) == b
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

x = 1.0, d = 0.0

    def relu_back(x: float, d: float) -> float:
        r"If $f = relu$ compute $d \times f'(x)$"
        # TODO: Implement for Task 0.1.
>       raise NotImplementedError('Need to implement for Task 0.1')
E       NotImplementedError: Need to implement for Task 0.1
E       Falsifying example: test_relu_back(
E           b=0.0, a=1.0,
E       )

minitorch/operators.py:121: NotImplementedError

test_operators.py::test_id

test_operators.py::test_id
@pytest.mark.task0_1
>   @given(small_floats)

tests/test_operators.py:64: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_operators.py:66: in test_id
    assert id(a) == a
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

x = 0.0

    def id(x: float) -> float:
        "$f(x) = x$"
        # TODO: Implement for Task 0.1.
>       raise NotImplementedError('Need to implement for Task 0.1')
E       NotImplementedError: Need to implement for Task 0.1
E       Falsifying example: test_id(
E           a=0.0,
E       )

minitorch/operators.py:22: NotImplementedError

test_operators.py::test_lt

test_operators.py::test_lt
@pytest.mark.task0_1
>   @given(small_floats)

tests/test_operators.py:70: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_operators.py:73: in test_lt
    assert lt(a - 1.0, a) == 1.0
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

x = -1.0, y = 0.0

    def lt(x: float, y: float) -> float:
        "$f(x) =$ 1.0 if x is less than y else 0.0"
        # TODO: Implement for Task 0.1.
>       raise NotImplementedError('Need to implement for Task 0.1')
E       NotImplementedError: Need to implement for Task 0.1
E       Falsifying example: test_lt(
E           a=0.0,
E       )

minitorch/operators.py:40: NotImplementedError

test_operators.py::test_max

test_operators.py::test_max
@pytest.mark.task0_1
>   @given(small_floats)

tests/test_operators.py:78: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_operators.py:80: in test_max
    assert max(a - 1.0, a) == a
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

x = -1.0, y = 0.0

    def max(x: float, y: float) -> float:
        "$f(x) =$ x if x is greater than y else y"
        # TODO: Implement for Task 0.1.
>       raise NotImplementedError('Need to implement for Task 0.1')
E       NotImplementedError: Need to implement for Task 0.1
E       Falsifying example: test_max(
E           a=0.0,
E       )

minitorch/operators.py:52: NotImplementedError

test_operators.py::test_eq

test_operators.py::test_eq
@pytest.mark.task0_1
>   @given(small_floats)

tests/test_operators.py:87: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_operators.py:89: in test_eq
    assert eq(a, a) == 1.0
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

x = 0.0, y = 0.0

    def eq(x: float, y: float) -> float:
        "$f(x) =$ 1.0 if x is equal to y else 0.0"
        # TODO: Implement for Task 0.1.
>       raise NotImplementedError('Need to implement for Task 0.1')
E       NotImplementedError: Need to implement for Task 0.1
E       Falsifying example: test_eq(
E           a=0.0,
E       )

minitorch/operators.py:46: NotImplementedError

test_operators.py::test_sigmoid

test_operators.py::test_sigmoid
@pytest.mark.task0_2
>   @given(small_floats)

tests/test_operators.py:102: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

a = 0.0

    @pytest.mark.task0_2
    @given(small_floats)
    def test_sigmoid(a: float) -> None:
        """Check properties of the sigmoid function, specifically
        * It is always between 0.0 and 1.0.
        * one minus sigmoid is the same as sigmoid of the negative
        * It crosses 0 at 0.5
        * It is  strictly increasing.
        """
        # TODO: Implement for Task 0.2.
>       raise NotImplementedError('Need to implement for Task 0.2')
E       NotImplementedError: Need to implement for Task 0.2
E       Falsifying example: test_sigmoid(
E           a=0.0,
E       )

tests/test_operators.py:111: NotImplementedError

test_operators.py::test_transitive

test_operators.py::test_transitive
@pytest.mark.task0_2
>   @given(small_floats, small_floats, small_floats)

tests/test_operators.py:115: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

a = 0.0, b = 0.0, c = 0.0

    @pytest.mark.task0_2
    @given(small_floats, small_floats, small_floats)
    def test_transitive(a: float, b: float, c: float) -> None:
        "Test the transitive property of less-than (a < b and b < c implies a < c)"
        # TODO: Implement for Task 0.2.
>       raise NotImplementedError('Need to implement for Task 0.2')
E       NotImplementedError: Need to implement for Task 0.2
E       Falsifying example: test_transitive(
E           c=0.0, b=0.0, a=0.0,
E       )

tests/test_operators.py:119: NotImplementedError

test_operators.py::test_symmetric

test_operators.py::test_symmetric
@pytest.mark.task0_2
    def test_symmetric() -> None:
        """
        Write a test that ensures that :func:`minitorch.operators.mul` is symmetric, i.e.
        gives the same value regardless of the order of its input.
        """
        # TODO: Implement for Task 0.2.
>       raise NotImplementedError('Need to implement for Task 0.2')
E       NotImplementedError: Need to implement for Task 0.2

tests/test_operators.py:129: NotImplementedError

test_operators.py::test_distribute

test_operators.py::test_distribute
@pytest.mark.task0_2
    def test_distribute() -> None:
        r"""
        Write a test that ensures that your operators distribute, i.e.
        :math:`z \times (x + y) = z \times x + z \times y`
        """
        # TODO: Implement for Task 0.2.
>       raise NotImplementedError('Need to implement for Task 0.2')
E       NotImplementedError: Need to implement for Task 0.2

tests/test_operators.py:139: NotImplementedError

test_operators.py::test_other

test_operators.py::test_other
@pytest.mark.task0_2
    def test_other() -> None:
        """
        Write a test that ensures some other property holds for your functions.
        """
        # TODO: Implement for Task 0.2.
>       raise NotImplementedError('Need to implement for Task 0.2')
E       NotImplementedError: Need to implement for Task 0.2

tests/test_operators.py:148: NotImplementedError

test_operators.py::test_zip_with

test_operators.py::test_zip_with
@pytest.mark.task0_3
>   @given(small_floats, small_floats, small_floats, small_floats)

tests/test_operators.py:158: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_operators.py:160: in test_zip_with
    x1, x2 = addLists([a, b], [c, d])
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls1 = [0.0, 0.0], ls2 = [0.0, 0.0]

    def addLists(ls1: Iterable[float], ls2: Iterable[float]) -> Iterable[float]:
        "Add the elements of `ls1` and `ls2` using `zipWith` and `add`"
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_zip_with(
E           d=0.0, c=0.0, b=0.0, a=0.0,
E       )

minitorch/operators.py:175: NotImplementedError

test_operators.py::test_sum_distribute

test_operators.py::test_sum_distribute
@pytest.mark.task0_3
>   @given(
        lists(small_floats, min_size=5, max_size=5),
        lists(small_floats, min_size=5, max_size=5),
    )

tests/test_operators.py:167: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls1 = [0.0, 0.0, 0.0, 0.0, 0.0], ls2 = [0.0, 0.0, 0.0, 0.0, 0.0]

    @pytest.mark.task0_3
    @given(
        lists(small_floats, min_size=5, max_size=5),
        lists(small_floats, min_size=5, max_size=5),
    )
    def test_sum_distribute(ls1: List[float], ls2: List[float]) -> None:
        """
        Write a test that ensures that the sum of `ls1` plus the sum of `ls2`
        is the same as the sum of each element of `ls1` plus each element of `ls2`.
        """
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_sum_distribute(
E           ls2=[0.0, 0.0, 0.0, 0.0, 0.0], ls1=[0.0, 0.0, 0.0, 0.0, 0.0],
E       )

tests/test_operators.py:177: NotImplementedError

test_operators.py::test_sum

test_operators.py::test_sum
@pytest.mark.task0_3
>   @given(lists(small_floats))

tests/test_operators.py:181: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_operators.py:183: in test_sum
    assert_close(sum(ls), sum(ls))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = []

    def sum(ls: Iterable[float]) -> float:
        "Sum up a list using `reduce` and `add`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_sum(
E           ls=[],
E       )

minitorch/operators.py:200: NotImplementedError

test_operators.py::test_prod

test_operators.py::test_prod
@pytest.mark.task0_3
>   @given(small_floats, small_floats, small_floats)

tests/test_operators.py:187: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_operators.py:189: in test_prod
    assert_close(prod([x, y, z]), x * y * z)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = [0.0, 0.0, 0.0]

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_prod(
E           z=0.0, y=0.0, x=0.0,
E       )

minitorch/operators.py:206: NotImplementedError

test_operators.py::test_negList

test_operators.py::test_negList
@pytest.mark.task0_3
>   @given(lists(small_floats))

tests/test_operators.py:193: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_operators.py:195: in test_negList
    check = negList(ls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = []

    def negList(ls: Iterable[float]) -> Iterable[float]:
        "Use `map` and `neg` to negate each element in `ls`"
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_negList(
E           ls=[],
E       )

minitorch/operators.py:149: NotImplementedError

test_operators.py::test_one_args[fn1]

test_operators.py::test_one_args[fn1]
fn = ('complex', )

    @given(small_floats)
>   @pytest.mark.parametrize("fn", one_arg)

tests/test_operators.py:209: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_operators.py:212: in test_one_args
    base_fn(t1)
minitorch/testing.py:119: in complex
    operators.relu(operators.relu(a * 10 + 7) * 6 + 5) * 10
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

x = 7.0

    def relu(x: float) -> float:
        """
        $f(x) =$ x if x is greater than 0, else 0

        (See https://en.wikipedia.org/wiki/Rectifier_(neural_networks) .)
        """
        # TODO: Implement for Task 0.1.
>       raise NotImplementedError('Need to implement for Task 0.1')
E       NotImplementedError: Need to implement for Task 0.1
E       Falsifying example: test_one_args(
E           t1=0.0, fn=('complex', complex),
E       )

minitorch/operators.py:84: NotImplementedError

test_operators.py::test_one_args[fn6]

test_operators.py::test_one_args[fn6]
fn = ('inv', )

    @given(small_floats)
>   @pytest.mark.parametrize("fn", one_arg)

tests/test_operators.py:209: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_operators.py:212: in test_one_args
    base_fn(t1)
minitorch/testing.py:49: in inv
    return operators.inv(a + 3.5)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

x = 3.5

    def inv(x: float) -> float:
        "$f(x) = 1/x$"
        # TODO: Implement for Task 0.1.
>       raise NotImplementedError('Need to implement for Task 0.1')
E       NotImplementedError: Need to implement for Task 0.1
E       Falsifying example: test_one_args(
E           t1=0.0, fn=('inv', inv),
E       )

minitorch/operators.py:109: NotImplementedError

test_operators.py::test_one_args[fn10]

test_operators.py::test_one_args[fn10]
fn = ('relu', )

    @given(small_floats)
>   @pytest.mark.parametrize("fn", one_arg)

tests/test_operators.py:209: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_operators.py:212: in test_one_args
    base_fn(t1)
minitorch/testing.py:64: in relu
    return operators.relu(a + 5.5)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

x = 5.5

    def relu(x: float) -> float:
        """
        $f(x) =$ x if x is greater than 0, else 0

        (See https://en.wikipedia.org/wiki/Rectifier_(neural_networks) .)
        """
        # TODO: Implement for Task 0.1.
>       raise NotImplementedError('Need to implement for Task 0.1')
E       NotImplementedError: Need to implement for Task 0.1
E       Falsifying example: test_one_args(
E           t1=0.0, fn=('relu', relu),
E       )

minitorch/operators.py:84: NotImplementedError

test_operators.py::test_one_args[fn11]

test_operators.py::test_one_args[fn11]
fn = ('sig', )

    @given(small_floats)
>   @pytest.mark.parametrize("fn", one_arg)

tests/test_operators.py:209: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_operators.py:212: in test_one_args
    base_fn(t1)
minitorch/testing.py:54: in sig
    return operators.sigmoid(a)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

x = 0.0

    def sigmoid(x: float) -> float:
        r"""
        $f(x) =  \frac{1.0}{(1.0 + e^{-x})}$

        (See https://en.wikipedia.org/wiki/Sigmoid_function )

        Calculate as

        $f(x) =  \frac{1.0}{(1.0 + e^{-x})}$ if x >=0 else $\frac{e^x}{(1.0 + e^{x})}$

        for stability.
        """
        # TODO: Implement for Task 0.1.
>       raise NotImplementedError('Need to implement for Task 0.1')
E       NotImplementedError: Need to implement for Task 0.1
E       Falsifying example: test_one_args(
E           t1=0.0, fn=('sig', sig),
E       )

minitorch/operators.py:74: NotImplementedError

test_operators.py::test_two_args[fn2]

test_operators.py::test_two_args[fn2]
fn = ('eq2', )

    @given(small_floats, small_floats)
>   @pytest.mark.parametrize("fn", two_arg)

tests/test_operators.py:216: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_operators.py:221: in test_two_args
    base_fn(t1, t2)
minitorch/testing.py:100: in eq2
    return operators.eq(a, (b + 5.5))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

x = 0.0, y = 5.5

    def eq(x: float, y: float) -> float:
        "$f(x) =$ 1.0 if x is equal to y else 0.0"
        # TODO: Implement for Task 0.1.
>       raise NotImplementedError('Need to implement for Task 0.1')
E       NotImplementedError: Need to implement for Task 0.1
E       Falsifying example: test_two_args(
E           t2=0.0, t1=0.0, fn=('eq2', eq2),
E       )

minitorch/operators.py:46: NotImplementedError

test_operators.py::test_two_args[fn3]

test_operators.py::test_two_args[fn3]
fn = ('gt2', )

    @given(small_floats, small_floats)
>   @pytest.mark.parametrize("fn", two_arg)

tests/test_operators.py:216: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_operators.py:221: in test_two_args
    base_fn(t1, t2)
minitorch/testing.py:92: in gt2
    return operators.lt(b, a + 1.2)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

x = 0.0, y = 1.2

    def lt(x: float, y: float) -> float:
        "$f(x) =$ 1.0 if x is less than y else 0.0"
        # TODO: Implement for Task 0.1.
>       raise NotImplementedError('Need to implement for Task 0.1')
E       NotImplementedError: Need to implement for Task 0.1
E       Falsifying example: test_two_args(
E           t2=0.0, t1=0.0, fn=('gt2', gt2),
E       )

minitorch/operators.py:40: NotImplementedError

test_operators.py::test_two_args[fn4]

test_operators.py::test_two_args[fn4]
fn = ('lt2', )

    @given(small_floats, small_floats)
>   @pytest.mark.parametrize("fn", two_arg)

tests/test_operators.py:216: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_operators.py:221: in test_two_args
    base_fn(t1, t2)
minitorch/testing.py:96: in lt2
    return operators.lt(a + 1.2, b)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

x = 1.2, y = 0.0

    def lt(x: float, y: float) -> float:
        "$f(x) =$ 1.0 if x is less than y else 0.0"
        # TODO: Implement for Task 0.1.
>       raise NotImplementedError('Need to implement for Task 0.1')
E       NotImplementedError: Need to implement for Task 0.1
E       Falsifying example: test_two_args(
E           t2=0.0, t1=0.0, fn=('lt2', lt2),
E       )

minitorch/operators.py:40: NotImplementedError

test_operators.py::test_backs

test_operators.py::test_backs
@given(small_floats, small_floats)
>   def test_backs(a: float, b: float) -> None:

tests/test_operators.py:225: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_operators.py:226: in test_backs
    relu_back(a, b)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

x = 0.0, d = 0.0

    def relu_back(x: float, d: float) -> float:
        r"If $f = relu$ compute $d \times f'(x)$"
        # TODO: Implement for Task 0.1.
>       raise NotImplementedError('Need to implement for Task 0.1')
E       NotImplementedError: Need to implement for Task 0.1
E       Falsifying example: test_backs(
E           b=0.0, a=0.0,
E       )

minitorch/operators.py:121: NotImplementedError

test_scalar.py::test_central_diff

test_scalar.py::test_central_diff
@pytest.mark.task1_1
    def test_central_diff() -> None:
>       d = central_difference(operators.id, 5, arg=0)

tests/test_scalar.py:35: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

f = , arg = 0, epsilon = 1e-06, vals = (5,)

    def central_difference(f: Any, *vals: Any, arg: int = 0, epsilon: float = 1e-6) -> Any:
        r"""
        Computes an approximation to the derivative of `f` with respect to one arg.

        See :doc:`derivative` or https://en.wikipedia.org/wiki/Finite_difference for more details.

        Args:
            f : arbitrary function from n-scalar args to one value
            *vals : n-float values $x_0 \ldots x_{n-1}$
            arg : the number $i$ of the arg to compute the derivative
            epsilon : a small constant

        Returns:
            An approximation of $f'_i(x_0, \ldots, x_{n-1})$
        """
        # TODO: Implement for Task 1.1.
>       raise NotImplementedError('Need to implement for Task 1.1')
E       NotImplementedError: Need to implement for Task 1.1

minitorch/autodiff.py:26: NotImplementedError

test_scalar.py::test_simple

test_scalar.py::test_simple
@given(small_floats, small_floats)
>   def test_simple(a: float, b: float) -> None:

tests/test_scalar.py:55: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:57: in test_simple
    c = Scalar(a) + Scalar(b)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Scalar(0.000000), b = Scalar(0.000000)

    def __add__(self, b: ScalarLike) -> Scalar:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_simple(
E           b=0.0, a=0.0,
E       )

minitorch/scalar.py:96: NotImplementedError

test_scalar.py::test_one_args[fn0]

test_scalar.py::test_one_args[fn0]
fn = ('addConstant', , )

    @given(small_scalars)
>   @pytest.mark.task1_2

tests/test_scalar.py:75: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:81: in test_one_args
    assert_close(scalar_fn(t1).data, base_fn(t1.data))
minitorch/testing.py:19: in addConstant
    return 5 + a
minitorch/scalar.py:122: in __radd__
    return self + b
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Scalar(0.000000), b = 5

    def __add__(self, b: ScalarLike) -> Scalar:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_one_args(
E           t1=Scalar(0.000000), fn=('addConstant', addConstant, addConstant),
E       )

minitorch/scalar.py:96: NotImplementedError

test_scalar.py::test_one_args[fn1]

test_scalar.py::test_one_args[fn1]
fn = ('complex', , )

    @given(small_scalars)
>   @pytest.mark.task1_2

tests/test_scalar.py:75: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:81: in test_one_args
    assert_close(scalar_fn(t1).data, base_fn(t1.data))
minitorch/testing.py:213: in complex
    return (((a * 10 + 7).relu() * 6 + 5).relu() * 10).sigmoid().log() / 50
minitorch/scalar.py:86: in __mul__
    return Mul.apply(self, b)
minitorch/scalar_functions.py:63: in apply
    c = cls._forward(ctx, *raw_vals)
minitorch/scalar_functions.py:45: in _forward
    return cls.forward(ctx, *inps)  # type: ignore
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ctx = Context(no_grad=False, saved_values=()), a = 0.0, b = 10

    @staticmethod
    def forward(ctx: Context, a: float, b: float) -> float:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_one_args(
E           t1=Scalar(0.000000), fn=('complex', complex, complex),
E       )

minitorch/scalar_functions.py:107: NotImplementedError

test_scalar.py::test_one_args[fn2]

test_scalar.py::test_one_args[fn2]
fn = ('cube', , )

    @given(small_scalars)
>   @pytest.mark.task1_2

tests/test_scalar.py:75: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:81: in test_one_args
    assert_close(scalar_fn(t1).data, base_fn(t1.data))
minitorch/testing.py:29: in cube
    return a * a * a
minitorch/scalar.py:86: in __mul__
    return Mul.apply(self, b)
minitorch/scalar_functions.py:63: in apply
    c = cls._forward(ctx, *raw_vals)
minitorch/scalar_functions.py:45: in _forward
    return cls.forward(ctx, *inps)  # type: ignore
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ctx = Context(no_grad=False, saved_values=()), a = 0.0, b = 0.0

    @staticmethod
    def forward(ctx: Context, a: float, b: float) -> float:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_one_args(
E           t1=Scalar(0.000000), fn=('cube', cube, cube),
E       )

minitorch/scalar_functions.py:107: NotImplementedError

test_scalar.py::test_one_args[fn3]

test_scalar.py::test_one_args[fn3]
fn = ('div', , )

    @given(small_scalars)
>   @pytest.mark.task1_2

tests/test_scalar.py:75: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:81: in test_one_args
    assert_close(scalar_fn(t1).data, base_fn(t1.data))
minitorch/testing.py:44: in div
    return a / 5
minitorch/scalar.py:89: in __truediv__
    return Mul.apply(self, Inv.apply(b))
minitorch/scalar_functions.py:63: in apply
    c = cls._forward(ctx, *raw_vals)
minitorch/scalar_functions.py:45: in _forward
    return cls.forward(ctx, *inps)  # type: ignore
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ctx = Context(no_grad=False, saved_values=()), a = 5

    @staticmethod
    def forward(ctx: Context, a: float) -> float:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_one_args(
E           t1=Scalar(0.000000), fn=('div', div, div),
E       )

minitorch/scalar_functions.py:121: NotImplementedError

test_scalar.py::test_one_args[fn4]

test_scalar.py::test_one_args[fn4]
fn = ('exp', , )

    @given(small_scalars)
>   @pytest.mark.task1_2

tests/test_scalar.py:75: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:81: in test_one_args
    assert_close(scalar_fn(t1).data, base_fn(t1.data))
minitorch/testing.py:181: in exp
    return (a - 200).exp()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Scalar(0.000000), b = 200

    def __sub__(self, b: ScalarLike) -> Scalar:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_one_args(
E           t1=Scalar(0.000000), fn=('exp', exp, exp),
E       )

minitorch/scalar.py:115: NotImplementedError

test_scalar.py::test_one_args[fn5]

test_scalar.py::test_one_args[fn5]
fn = ('explog', , )

    @given(small_scalars)
>   @pytest.mark.task1_2

tests/test_scalar.py:75: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:81: in test_one_args
    assert_close(scalar_fn(t1).data, base_fn(t1.data))
minitorch/testing.py:185: in explog
    return (a + 100000).log() + (a - 200).exp()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Scalar(0.000000), b = 100000

    def __add__(self, b: ScalarLike) -> Scalar:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_one_args(
E           t1=Scalar(0.000000), fn=('explog', explog, explog),
E       )

minitorch/scalar.py:96: NotImplementedError

test_scalar.py::test_one_args[fn6]

test_scalar.py::test_one_args[fn6]
fn = ('inv', , )

    @given(small_scalars)
>   @pytest.mark.task1_2

tests/test_scalar.py:75: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:81: in test_one_args
    assert_close(scalar_fn(t1).data, base_fn(t1.data))
minitorch/testing.py:165: in inv
    return 1.0 / (a + 3.5)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Scalar(0.000000), b = 3.5

    def __add__(self, b: ScalarLike) -> Scalar:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_one_args(
E           t1=Scalar(0.000000), fn=('inv', inv, inv),
E       )

minitorch/scalar.py:96: NotImplementedError

test_scalar.py::test_one_args[fn7]

test_scalar.py::test_one_args[fn7]
fn = ('log', , )

    @given(small_scalars)
>   @pytest.mark.task1_2

tests/test_scalar.py:75: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:81: in test_one_args
    assert_close(scalar_fn(t1).data, base_fn(t1.data))
minitorch/testing.py:173: in log
    return (x + 100000).log()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Scalar(0.000000), b = 100000

    def __add__(self, b: ScalarLike) -> Scalar:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_one_args(
E           t1=Scalar(0.000000), fn=('log', log, log),
E       )

minitorch/scalar.py:96: NotImplementedError

test_scalar.py::test_one_args[fn8]

test_scalar.py::test_one_args[fn8]
fn = ('multConstant', , )

    @given(small_scalars)
>   @pytest.mark.task1_2

tests/test_scalar.py:75: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:81: in test_one_args
    assert_close(scalar_fn(t1).data, base_fn(t1.data))
minitorch/testing.py:39: in multConstant
    return 5 * a
minitorch/scalar.py:125: in __rmul__
    return self * b
minitorch/scalar.py:86: in __mul__
    return Mul.apply(self, b)
minitorch/scalar_functions.py:63: in apply
    c = cls._forward(ctx, *raw_vals)
minitorch/scalar_functions.py:45: in _forward
    return cls.forward(ctx, *inps)  # type: ignore
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ctx = Context(no_grad=False, saved_values=()), a = 0.0, b = 5

    @staticmethod
    def forward(ctx: Context, a: float, b: float) -> float:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_one_args(
E           t1=Scalar(0.000000), fn=('multConstant', multConstant, multConstant),
E       )

minitorch/scalar_functions.py:107: NotImplementedError

test_scalar.py::test_one_args[fn9]

test_scalar.py::test_one_args[fn9]
fn = ('neg', , )

    @given(small_scalars)
>   @pytest.mark.task1_2

tests/test_scalar.py:75: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:81: in test_one_args
    assert_close(scalar_fn(t1).data, base_fn(t1.data))
minitorch/testing.py:14: in neg
    return -a
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Scalar(0.000000)

    def __neg__(self) -> Scalar:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_one_args(
E           t1=Scalar(0.000000), fn=('neg', neg, neg),
E       )

minitorch/scalar.py:119: NotImplementedError

test_scalar.py::test_one_args[fn10]

test_scalar.py::test_one_args[fn10]
fn = ('relu', , )

    @given(small_scalars)
>   @pytest.mark.task1_2

tests/test_scalar.py:75: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:81: in test_one_args
    assert_close(scalar_fn(t1).data, base_fn(t1.data))
minitorch/testing.py:177: in relu
    return (x + 5.5).relu()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Scalar(0.000000), b = 5.5

    def __add__(self, b: ScalarLike) -> Scalar:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_one_args(
E           t1=Scalar(0.000000), fn=('relu', relu, relu),
E       )

minitorch/scalar.py:96: NotImplementedError

test_scalar.py::test_one_args[fn11]

test_scalar.py::test_one_args[fn11]
fn = ('sig', , )

    @given(small_scalars)
>   @pytest.mark.task1_2

tests/test_scalar.py:75: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:81: in test_one_args
    assert_close(scalar_fn(t1).data, base_fn(t1.data))
minitorch/testing.py:169: in sig
    return x.sigmoid()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Scalar(0.000000)

    def sigmoid(self) -> Scalar:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_one_args(
E           t1=Scalar(0.000000), fn=('sig', sig, sig),
E       )

minitorch/scalar.py:137: NotImplementedError

test_scalar.py::test_one_args[fn12]

test_scalar.py::test_one_args[fn12]
fn = ('square', , )

    @given(small_scalars)
>   @pytest.mark.task1_2

tests/test_scalar.py:75: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:81: in test_one_args
    assert_close(scalar_fn(t1).data, base_fn(t1.data))
minitorch/testing.py:24: in square
    return a * a
minitorch/scalar.py:86: in __mul__
    return Mul.apply(self, b)
minitorch/scalar_functions.py:63: in apply
    c = cls._forward(ctx, *raw_vals)
minitorch/scalar_functions.py:45: in _forward
    return cls.forward(ctx, *inps)  # type: ignore
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ctx = Context(no_grad=False, saved_values=()), a = 0.0, b = 0.0

    @staticmethod
    def forward(ctx: Context, a: float, b: float) -> float:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_one_args(
E           t1=Scalar(0.000000), fn=('square', square, square),
E       )

minitorch/scalar_functions.py:107: NotImplementedError

test_scalar.py::test_one_args[fn13]

test_scalar.py::test_one_args[fn13]
fn = ('subConstant', , )

    @given(small_scalars)
>   @pytest.mark.task1_2

tests/test_scalar.py:75: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:81: in test_one_args
    assert_close(scalar_fn(t1).data, base_fn(t1.data))
minitorch/testing.py:34: in subConstant
    return a - 5
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Scalar(0.000000), b = 5

    def __sub__(self, b: ScalarLike) -> Scalar:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_one_args(
E           t1=Scalar(0.000000), fn=('subConstant', subConstant, subConstant),
E       )

minitorch/scalar.py:115: NotImplementedError

test_scalar.py::test_two_args[fn0]

test_scalar.py::test_two_args[fn0]
fn = ('add2', , )

    @given(small_scalars, small_scalars)
>   @pytest.mark.task1_2

tests/test_scalar.py:85: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:93: in test_two_args
    assert_close(scalar_fn(t1, t2).data, base_fn(t1.data, t2.data))
minitorch/testing.py:78: in add2
    return a + b
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Scalar(0.000000), b = Scalar(0.000000)

    def __add__(self, b: ScalarLike) -> Scalar:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_two_args(
E           t2=Scalar(0.000000), t1=Scalar(0.000000), fn=('add2', add2, add2),
E       )

minitorch/scalar.py:96: NotImplementedError

test_scalar.py::test_two_args[fn1]

test_scalar.py::test_two_args[fn1]
fn = ('div2', , )

    @given(small_scalars, small_scalars)
>   @pytest.mark.task1_2

tests/test_scalar.py:85: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:93: in test_two_args
    assert_close(scalar_fn(t1, t2).data, base_fn(t1.data, t2.data))
minitorch/testing.py:88: in div2
    return a / (b + 5.5)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Scalar(0.000000), b = 5.5

    def __add__(self, b: ScalarLike) -> Scalar:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_two_args(
E           t2=Scalar(0.000000), t1=Scalar(0.000000), fn=('div2', div2, div2),
E       )

minitorch/scalar.py:96: NotImplementedError

test_scalar.py::test_two_args[fn2]

test_scalar.py::test_two_args[fn2]
fn = ('eq2', , )

    @given(small_scalars, small_scalars)
>   @pytest.mark.task1_2

tests/test_scalar.py:85: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:93: in test_two_args
    assert_close(scalar_fn(t1, t2).data, base_fn(t1.data, t2.data))
minitorch/testing.py:201: in eq2
    return a == (b + 5.5)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Scalar(0.000000), b = 5.5

    def __add__(self, b: ScalarLike) -> Scalar:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_two_args(
E           t2=Scalar(0.000000), t1=Scalar(0.000000), fn=('eq2', eq2, eq2),
E       )

minitorch/scalar.py:96: NotImplementedError

test_scalar.py::test_two_args[fn3]

test_scalar.py::test_two_args[fn3]
fn = ('gt2', , )

    @given(small_scalars, small_scalars)
>   @pytest.mark.task1_2

tests/test_scalar.py:85: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:93: in test_two_args
    assert_close(scalar_fn(t1, t2).data, base_fn(t1.data, t2.data))
minitorch/testing.py:205: in gt2
    return a + 1.2 > b
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Scalar(0.000000), b = 1.2

    def __add__(self, b: ScalarLike) -> Scalar:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_two_args(
E           t2=Scalar(0.000000), t1=Scalar(0.000000), fn=('gt2', gt2, gt2),
E       )

minitorch/scalar.py:96: NotImplementedError

test_scalar.py::test_two_args[fn4]

test_scalar.py::test_two_args[fn4]
fn = ('lt2', , )

    @given(small_scalars, small_scalars)
>   @pytest.mark.task1_2

tests/test_scalar.py:85: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:93: in test_two_args
    assert_close(scalar_fn(t1, t2).data, base_fn(t1.data, t2.data))
minitorch/testing.py:209: in lt2
    return a + 1.2 < b
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Scalar(0.000000), b = 1.2

    def __add__(self, b: ScalarLike) -> Scalar:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_two_args(
E           t2=Scalar(0.000000), t1=Scalar(0.000000), fn=('lt2', lt2, lt2),
E       )

minitorch/scalar.py:96: NotImplementedError

test_scalar.py::test_two_args[fn5]

test_scalar.py::test_two_args[fn5]
fn = ('mul2', , )

    @given(small_scalars, small_scalars)
>   @pytest.mark.task1_2

tests/test_scalar.py:85: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:93: in test_two_args
    assert_close(scalar_fn(t1, t2).data, base_fn(t1.data, t2.data))
minitorch/testing.py:83: in mul2
    return a * b
minitorch/scalar.py:86: in __mul__
    return Mul.apply(self, b)
minitorch/scalar_functions.py:63: in apply
    c = cls._forward(ctx, *raw_vals)
minitorch/scalar_functions.py:45: in _forward
    return cls.forward(ctx, *inps)  # type: ignore
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ctx = Context(no_grad=False, saved_values=()), a = 0.0, b = 0.0

    @staticmethod
    def forward(ctx: Context, a: float, b: float) -> float:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_two_args(
E           t2=Scalar(0.000000), t1=Scalar(0.000000), fn=('mul2', mul2, mul2),
E       )

minitorch/scalar_functions.py:107: NotImplementedError

test_scalar.py::test_one_derivative[fn0]

test_scalar.py::test_one_derivative[fn0]
fn = ('addConstant', , )

    @given(small_scalars)
>   @pytest.mark.task1_4

tests/test_scalar.py:102: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:108: in test_one_derivative
    derivative_check(scalar_fn, t1)
minitorch/scalar.py:201: in derivative_check
    out = f(*scalars)
minitorch/testing.py:19: in addConstant
    return 5 + a
minitorch/scalar.py:122: in __radd__
    return self + b
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Scalar(0.000000), b = 5

    def __add__(self, b: ScalarLike) -> Scalar:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_one_derivative(
E           t1=Scalar(0.000000), fn=('addConstant', addConstant, addConstant),
E       )

minitorch/scalar.py:96: NotImplementedError

test_scalar.py::test_one_derivative[fn1]

test_scalar.py::test_one_derivative[fn1]
fn = ('complex', , )

    @given(small_scalars)
>   @pytest.mark.task1_4

tests/test_scalar.py:102: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:108: in test_one_derivative
    derivative_check(scalar_fn, t1)
minitorch/scalar.py:201: in derivative_check
    out = f(*scalars)
minitorch/testing.py:213: in complex
    return (((a * 10 + 7).relu() * 6 + 5).relu() * 10).sigmoid().log() / 50
minitorch/scalar.py:86: in __mul__
    return Mul.apply(self, b)
minitorch/scalar_functions.py:63: in apply
    c = cls._forward(ctx, *raw_vals)
minitorch/scalar_functions.py:45: in _forward
    return cls.forward(ctx, *inps)  # type: ignore
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ctx = Context(no_grad=False, saved_values=()), a = 0.0, b = 10

    @staticmethod
    def forward(ctx: Context, a: float, b: float) -> float:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_one_derivative(
E           t1=Scalar(0.000000), fn=('complex', complex, complex),
E       )

minitorch/scalar_functions.py:107: NotImplementedError

test_scalar.py::test_one_derivative[fn2]

test_scalar.py::test_one_derivative[fn2]
fn = ('cube', , )

    @given(small_scalars)
>   @pytest.mark.task1_4

tests/test_scalar.py:102: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:108: in test_one_derivative
    derivative_check(scalar_fn, t1)
minitorch/scalar.py:201: in derivative_check
    out = f(*scalars)
minitorch/testing.py:29: in cube
    return a * a * a
minitorch/scalar.py:86: in __mul__
    return Mul.apply(self, b)
minitorch/scalar_functions.py:63: in apply
    c = cls._forward(ctx, *raw_vals)
minitorch/scalar_functions.py:45: in _forward
    return cls.forward(ctx, *inps)  # type: ignore
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ctx = Context(no_grad=False, saved_values=()), a = 0.0, b = 0.0

    @staticmethod
    def forward(ctx: Context, a: float, b: float) -> float:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_one_derivative(
E           t1=Scalar(0.000000), fn=('cube', cube, cube),
E       )

minitorch/scalar_functions.py:107: NotImplementedError

test_scalar.py::test_one_derivative[fn3]

test_scalar.py::test_one_derivative[fn3]
fn = ('div', , )

    @given(small_scalars)
>   @pytest.mark.task1_4

tests/test_scalar.py:102: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:108: in test_one_derivative
    derivative_check(scalar_fn, t1)
minitorch/scalar.py:201: in derivative_check
    out = f(*scalars)
minitorch/testing.py:44: in div
    return a / 5
minitorch/scalar.py:89: in __truediv__
    return Mul.apply(self, Inv.apply(b))
minitorch/scalar_functions.py:63: in apply
    c = cls._forward(ctx, *raw_vals)
minitorch/scalar_functions.py:45: in _forward
    return cls.forward(ctx, *inps)  # type: ignore
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ctx = Context(no_grad=False, saved_values=()), a = 5

    @staticmethod
    def forward(ctx: Context, a: float) -> float:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_one_derivative(
E           t1=Scalar(0.000000), fn=('div', div, div),
E       )

minitorch/scalar_functions.py:121: NotImplementedError

test_scalar.py::test_one_derivative[fn4]

test_scalar.py::test_one_derivative[fn4]
fn = ('exp', , )

    @given(small_scalars)
>   @pytest.mark.task1_4

tests/test_scalar.py:102: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:108: in test_one_derivative
    derivative_check(scalar_fn, t1)
minitorch/scalar.py:201: in derivative_check
    out = f(*scalars)
minitorch/testing.py:181: in exp
    return (a - 200).exp()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Scalar(0.000000), b = 200

    def __sub__(self, b: ScalarLike) -> Scalar:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_one_derivative(
E           t1=Scalar(0.000000), fn=('exp', exp, exp),
E       )

minitorch/scalar.py:115: NotImplementedError

test_scalar.py::test_one_derivative[fn5]

test_scalar.py::test_one_derivative[fn5]
fn = ('explog', , )

    @given(small_scalars)
>   @pytest.mark.task1_4

tests/test_scalar.py:102: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:108: in test_one_derivative
    derivative_check(scalar_fn, t1)
minitorch/scalar.py:201: in derivative_check
    out = f(*scalars)
minitorch/testing.py:185: in explog
    return (a + 100000).log() + (a - 200).exp()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Scalar(0.000000), b = 100000

    def __add__(self, b: ScalarLike) -> Scalar:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_one_derivative(
E           t1=Scalar(0.000000), fn=('explog', explog, explog),
E       )

minitorch/scalar.py:96: NotImplementedError

test_scalar.py::test_one_derivative[fn6]

test_scalar.py::test_one_derivative[fn6]
fn = ('inv', , )

    @given(small_scalars)
>   @pytest.mark.task1_4

tests/test_scalar.py:102: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:108: in test_one_derivative
    derivative_check(scalar_fn, t1)
minitorch/scalar.py:201: in derivative_check
    out = f(*scalars)
minitorch/testing.py:165: in inv
    return 1.0 / (a + 3.5)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Scalar(0.000000), b = 3.5

    def __add__(self, b: ScalarLike) -> Scalar:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_one_derivative(
E           t1=Scalar(0.000000), fn=('inv', inv, inv),
E       )

minitorch/scalar.py:96: NotImplementedError

test_scalar.py::test_one_derivative[fn7]

test_scalar.py::test_one_derivative[fn7]
fn = ('log', , )

    @given(small_scalars)
>   @pytest.mark.task1_4

tests/test_scalar.py:102: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:108: in test_one_derivative
    derivative_check(scalar_fn, t1)
minitorch/scalar.py:201: in derivative_check
    out = f(*scalars)
minitorch/testing.py:173: in log
    return (x + 100000).log()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Scalar(0.000000), b = 100000

    def __add__(self, b: ScalarLike) -> Scalar:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_one_derivative(
E           t1=Scalar(0.000000), fn=('log', log, log),
E       )

minitorch/scalar.py:96: NotImplementedError

test_scalar.py::test_one_derivative[fn8]

test_scalar.py::test_one_derivative[fn8]
fn = ('multConstant', , )

    @given(small_scalars)
>   @pytest.mark.task1_4

tests/test_scalar.py:102: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:108: in test_one_derivative
    derivative_check(scalar_fn, t1)
minitorch/scalar.py:201: in derivative_check
    out = f(*scalars)
minitorch/testing.py:39: in multConstant
    return 5 * a
minitorch/scalar.py:125: in __rmul__
    return self * b
minitorch/scalar.py:86: in __mul__
    return Mul.apply(self, b)
minitorch/scalar_functions.py:63: in apply
    c = cls._forward(ctx, *raw_vals)
minitorch/scalar_functions.py:45: in _forward
    return cls.forward(ctx, *inps)  # type: ignore
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ctx = Context(no_grad=False, saved_values=()), a = 0.0, b = 5

    @staticmethod
    def forward(ctx: Context, a: float, b: float) -> float:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_one_derivative(
E           t1=Scalar(0.000000), fn=('multConstant', multConstant, multConstant),
E       )

minitorch/scalar_functions.py:107: NotImplementedError

test_scalar.py::test_one_derivative[fn9]

test_scalar.py::test_one_derivative[fn9]
fn = ('neg', , )

    @given(small_scalars)
>   @pytest.mark.task1_4

tests/test_scalar.py:102: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:108: in test_one_derivative
    derivative_check(scalar_fn, t1)
minitorch/scalar.py:201: in derivative_check
    out = f(*scalars)
minitorch/testing.py:14: in neg
    return -a
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Scalar(0.000000)

    def __neg__(self) -> Scalar:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_one_derivative(
E           t1=Scalar(0.000000), fn=('neg', neg, neg),
E       )

minitorch/scalar.py:119: NotImplementedError

test_scalar.py::test_one_derivative[fn10]

test_scalar.py::test_one_derivative[fn10]
fn = ('relu', , )

    @given(small_scalars)
>   @pytest.mark.task1_4

tests/test_scalar.py:102: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:108: in test_one_derivative
    derivative_check(scalar_fn, t1)
minitorch/scalar.py:201: in derivative_check
    out = f(*scalars)
minitorch/testing.py:177: in relu
    return (x + 5.5).relu()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Scalar(0.000000), b = 5.5

    def __add__(self, b: ScalarLike) -> Scalar:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_one_derivative(
E           t1=Scalar(0.000000), fn=('relu', relu, relu),
E       )

minitorch/scalar.py:96: NotImplementedError

test_scalar.py::test_one_derivative[fn11]

test_scalar.py::test_one_derivative[fn11]
fn = ('sig', , )

    @given(small_scalars)
>   @pytest.mark.task1_4

tests/test_scalar.py:102: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:108: in test_one_derivative
    derivative_check(scalar_fn, t1)
minitorch/scalar.py:201: in derivative_check
    out = f(*scalars)
minitorch/testing.py:169: in sig
    return x.sigmoid()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Scalar(0.000000)

    def sigmoid(self) -> Scalar:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_one_derivative(
E           t1=Scalar(0.000000), fn=('sig', sig, sig),
E       )

minitorch/scalar.py:137: NotImplementedError

test_scalar.py::test_one_derivative[fn12]

test_scalar.py::test_one_derivative[fn12]
fn = ('square', , )

    @given(small_scalars)
>   @pytest.mark.task1_4

tests/test_scalar.py:102: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:108: in test_one_derivative
    derivative_check(scalar_fn, t1)
minitorch/scalar.py:201: in derivative_check
    out = f(*scalars)
minitorch/testing.py:24: in square
    return a * a
minitorch/scalar.py:86: in __mul__
    return Mul.apply(self, b)
minitorch/scalar_functions.py:63: in apply
    c = cls._forward(ctx, *raw_vals)
minitorch/scalar_functions.py:45: in _forward
    return cls.forward(ctx, *inps)  # type: ignore
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ctx = Context(no_grad=False, saved_values=()), a = 0.0, b = 0.0

    @staticmethod
    def forward(ctx: Context, a: float, b: float) -> float:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_one_derivative(
E           t1=Scalar(0.000000), fn=('square', square, square),
E       )

minitorch/scalar_functions.py:107: NotImplementedError

test_scalar.py::test_one_derivative[fn13]

test_scalar.py::test_one_derivative[fn13]
fn = ('subConstant', , )

    @given(small_scalars)
>   @pytest.mark.task1_4

tests/test_scalar.py:102: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:108: in test_one_derivative
    derivative_check(scalar_fn, t1)
minitorch/scalar.py:201: in derivative_check
    out = f(*scalars)
minitorch/testing.py:34: in subConstant
    return a - 5
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Scalar(0.000000), b = 5

    def __sub__(self, b: ScalarLike) -> Scalar:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_one_derivative(
E           t1=Scalar(0.000000), fn=('subConstant', subConstant, subConstant),
E       )

minitorch/scalar.py:115: NotImplementedError

test_scalar.py::test_two_derivative[fn0]

test_scalar.py::test_two_derivative[fn0]
fn = ('add2', , )

    @given(small_scalars, small_scalars)
>   @pytest.mark.task1_4

tests/test_scalar.py:112: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:120: in test_two_derivative
    derivative_check(scalar_fn, t1, t2)
minitorch/scalar.py:201: in derivative_check
    out = f(*scalars)
minitorch/testing.py:78: in add2
    return a + b
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Scalar(0.000000), b = Scalar(0.000000)

    def __add__(self, b: ScalarLike) -> Scalar:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_two_derivative(
E           t2=Scalar(0.000000), t1=Scalar(0.000000), fn=('add2', add2, add2),
E       )

minitorch/scalar.py:96: NotImplementedError

test_scalar.py::test_two_derivative[fn1]

test_scalar.py::test_two_derivative[fn1]
fn = ('div2', , )

    @given(small_scalars, small_scalars)
>   @pytest.mark.task1_4

tests/test_scalar.py:112: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:120: in test_two_derivative
    derivative_check(scalar_fn, t1, t2)
minitorch/scalar.py:201: in derivative_check
    out = f(*scalars)
minitorch/testing.py:88: in div2
    return a / (b + 5.5)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Scalar(0.000000), b = 5.5

    def __add__(self, b: ScalarLike) -> Scalar:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_two_derivative(
E           t2=Scalar(0.000000), t1=Scalar(0.000000), fn=('div2', div2, div2),
E       )

minitorch/scalar.py:96: NotImplementedError

test_scalar.py::test_two_derivative[fn2]

test_scalar.py::test_two_derivative[fn2]
fn = ('eq2', , )

    @given(small_scalars, small_scalars)
>   @pytest.mark.task1_4

tests/test_scalar.py:112: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:120: in test_two_derivative
    derivative_check(scalar_fn, t1, t2)
minitorch/scalar.py:201: in derivative_check
    out = f(*scalars)
minitorch/testing.py:201: in eq2
    return a == (b + 5.5)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Scalar(0.000000), b = 5.5

    def __add__(self, b: ScalarLike) -> Scalar:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_two_derivative(
E           t2=Scalar(0.000000), t1=Scalar(0.000000), fn=('eq2', eq2, eq2),
E       )

minitorch/scalar.py:96: NotImplementedError

test_scalar.py::test_two_derivative[fn3]

test_scalar.py::test_two_derivative[fn3]
fn = ('gt2', , )

    @given(small_scalars, small_scalars)
>   @pytest.mark.task1_4

tests/test_scalar.py:112: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:120: in test_two_derivative
    derivative_check(scalar_fn, t1, t2)
minitorch/scalar.py:201: in derivative_check
    out = f(*scalars)
minitorch/testing.py:205: in gt2
    return a + 1.2 > b
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Scalar(0.000000), b = 1.2

    def __add__(self, b: ScalarLike) -> Scalar:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_two_derivative(
E           t2=Scalar(0.000000), t1=Scalar(0.000000), fn=('gt2', gt2, gt2),
E       )

minitorch/scalar.py:96: NotImplementedError

test_scalar.py::test_two_derivative[fn4]

test_scalar.py::test_two_derivative[fn4]
fn = ('lt2', , )

    @given(small_scalars, small_scalars)
>   @pytest.mark.task1_4

tests/test_scalar.py:112: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:120: in test_two_derivative
    derivative_check(scalar_fn, t1, t2)
minitorch/scalar.py:201: in derivative_check
    out = f(*scalars)
minitorch/testing.py:209: in lt2
    return a + 1.2 < b
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Scalar(0.000000), b = 1.2

    def __add__(self, b: ScalarLike) -> Scalar:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_two_derivative(
E           t2=Scalar(0.000000), t1=Scalar(0.000000), fn=('lt2', lt2, lt2),
E       )

minitorch/scalar.py:96: NotImplementedError

test_scalar.py::test_two_derivative[fn5]

test_scalar.py::test_two_derivative[fn5]
fn = ('mul2', , )

    @given(small_scalars, small_scalars)
>   @pytest.mark.task1_4

tests/test_scalar.py:112: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:120: in test_two_derivative
    derivative_check(scalar_fn, t1, t2)
minitorch/scalar.py:201: in derivative_check
    out = f(*scalars)
minitorch/testing.py:83: in mul2
    return a * b
minitorch/scalar.py:86: in __mul__
    return Mul.apply(self, b)
minitorch/scalar_functions.py:63: in apply
    c = cls._forward(ctx, *raw_vals)
minitorch/scalar_functions.py:45: in _forward
    return cls.forward(ctx, *inps)  # type: ignore
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ctx = Context(no_grad=False, saved_values=()), a = 0.0, b = 0.0

    @staticmethod
    def forward(ctx: Context, a: float, b: float) -> float:
        # TODO: Implement for Task 1.2.
>       raise NotImplementedError('Need to implement for Task 1.2')
E       NotImplementedError: Need to implement for Task 1.2
E       Falsifying example: test_two_derivative(
E           t2=Scalar(0.000000), t1=Scalar(0.000000), fn=('mul2', mul2, mul2),
E       )

minitorch/scalar_functions.py:107: NotImplementedError

test_tensor.py::test_create

test_tensor.py::test_create
@given(lists(small_floats, min_size=1))
>   def test_create(t1: List[float]) -> None:

tests/test_tensor.py:16: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor.py:18: in test_create
    t2 = tensor(t1)
minitorch/tensor_functions.py:366: in tensor
    return _tensor(cur, tuple(shape2), backend=backend, requires_grad=requires_grad)
minitorch/tensor_functions.py:332: in _tensor
    tensor = minitorch.Tensor.make(ls, shape, backend=backend)
minitorch/tensor.py:264: in make
    return Tensor(TensorData(storage, shape, strides), backend=backend)
minitorch/tensor_data.py:147: in __init__
    self.size = int(prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_create(
E           t1=[0.0],
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_one_args[fn0]

test_tensor.py::test_one_args[fn0]
fn = ('addConstant', , )

    @given(tensors())
>   @pytest.mark.task2_3

tests/test_tensor.py:24: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_one_args[fn1]

test_tensor.py::test_one_args[fn1]
fn = ('complex', , )

    @given(tensors())
>   @pytest.mark.task2_3

tests/test_tensor.py:24: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_one_args[fn2]

test_tensor.py::test_one_args[fn2]
fn = ('cube', , )

    @given(tensors())
>   @pytest.mark.task2_3

tests/test_tensor.py:24: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_one_args[fn3]

test_tensor.py::test_one_args[fn3]
fn = ('div', , )

    @given(tensors())
>   @pytest.mark.task2_3

tests/test_tensor.py:24: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_one_args[fn4]

test_tensor.py::test_one_args[fn4]
fn = ('exp', , )

    @given(tensors())
>   @pytest.mark.task2_3

tests/test_tensor.py:24: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_one_args[fn5]

test_tensor.py::test_one_args[fn5]
fn = ('explog', , )

    @given(tensors())
>   @pytest.mark.task2_3

tests/test_tensor.py:24: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_one_args[fn6]

test_tensor.py::test_one_args[fn6]
fn = ('inv', , )

    @given(tensors())
>   @pytest.mark.task2_3

tests/test_tensor.py:24: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_one_args[fn7]

test_tensor.py::test_one_args[fn7]
fn = ('log', , )

    @given(tensors())
>   @pytest.mark.task2_3

tests/test_tensor.py:24: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_one_args[fn8]

test_tensor.py::test_one_args[fn8]
fn = ('multConstant', , )

    @given(tensors())
>   @pytest.mark.task2_3

tests/test_tensor.py:24: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_one_args[fn9]

test_tensor.py::test_one_args[fn9]
fn = ('neg', , )

    @given(tensors())
>   @pytest.mark.task2_3

tests/test_tensor.py:24: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_one_args[fn10]

test_tensor.py::test_one_args[fn10]
fn = ('relu', , )

    @given(tensors())
>   @pytest.mark.task2_3

tests/test_tensor.py:24: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_one_args[fn11]

test_tensor.py::test_one_args[fn11]
fn = ('sig', , )

    @given(tensors())
>   @pytest.mark.task2_3

tests/test_tensor.py:24: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_one_args[fn12]

test_tensor.py::test_one_args[fn12]
fn = ('square', , )

    @given(tensors())
>   @pytest.mark.task2_3

tests/test_tensor.py:24: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_one_args[fn13]

test_tensor.py::test_one_args[fn13]
fn = ('subConstant', , )

    @given(tensors())
>   @pytest.mark.task2_3

tests/test_tensor.py:24: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_two_args[fn0]

test_tensor.py::test_two_args[fn0]
fn = ('add2', , )

    @given(shaped_tensors(2))
>   @pytest.mark.task2_3

tests/test_tensor.py:37: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_two_args[fn1]

test_tensor.py::test_two_args[fn1]
fn = ('div2', , )

    @given(shaped_tensors(2))
>   @pytest.mark.task2_3

tests/test_tensor.py:37: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_two_args[fn2]

test_tensor.py::test_two_args[fn2]
fn = ('eq2', , )

    @given(shaped_tensors(2))
>   @pytest.mark.task2_3

tests/test_tensor.py:37: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_two_args[fn3]

test_tensor.py::test_two_args[fn3]
fn = ('gt2', , )

    @given(shaped_tensors(2))
>   @pytest.mark.task2_3

tests/test_tensor.py:37: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_two_args[fn4]

test_tensor.py::test_two_args[fn4]
fn = ('lt2', , )

    @given(shaped_tensors(2))
>   @pytest.mark.task2_3

tests/test_tensor.py:37: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_two_args[fn5]

test_tensor.py::test_two_args[fn5]
fn = ('mul2', , )

    @given(shaped_tensors(2))
>   @pytest.mark.task2_3

tests/test_tensor.py:37: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_one_derivative[fn0]

test_tensor.py::test_one_derivative[fn0]
fn = ('addConstant', , )

    @given(tensors())
>   @pytest.mark.task2_4

tests/test_tensor.py:51: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_one_derivative[fn1]

test_tensor.py::test_one_derivative[fn1]
fn = ('complex', , )

    @given(tensors())
>   @pytest.mark.task2_4

tests/test_tensor.py:51: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_one_derivative[fn2]

test_tensor.py::test_one_derivative[fn2]
fn = ('cube', , )

    @given(tensors())
>   @pytest.mark.task2_4

tests/test_tensor.py:51: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_one_derivative[fn3]

test_tensor.py::test_one_derivative[fn3]
fn = ('div', , )

    @given(tensors())
>   @pytest.mark.task2_4

tests/test_tensor.py:51: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_one_derivative[fn4]

test_tensor.py::test_one_derivative[fn4]
fn = ('exp', , )

    @given(tensors())
>   @pytest.mark.task2_4

tests/test_tensor.py:51: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_one_derivative[fn5]

test_tensor.py::test_one_derivative[fn5]
fn = ('explog', , )

    @given(tensors())
>   @pytest.mark.task2_4

tests/test_tensor.py:51: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_one_derivative[fn6]

test_tensor.py::test_one_derivative[fn6]
fn = ('inv', , )

    @given(tensors())
>   @pytest.mark.task2_4

tests/test_tensor.py:51: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_one_derivative[fn7]

test_tensor.py::test_one_derivative[fn7]
fn = ('log', , )

    @given(tensors())
>   @pytest.mark.task2_4

tests/test_tensor.py:51: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_one_derivative[fn8]

test_tensor.py::test_one_derivative[fn8]
fn = ('multConstant', , )

    @given(tensors())
>   @pytest.mark.task2_4

tests/test_tensor.py:51: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_one_derivative[fn9]

test_tensor.py::test_one_derivative[fn9]
fn = ('neg', , )

    @given(tensors())
>   @pytest.mark.task2_4

tests/test_tensor.py:51: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_one_derivative[fn10]

test_tensor.py::test_one_derivative[fn10]
fn = ('relu', , )

    @given(tensors())
>   @pytest.mark.task2_4

tests/test_tensor.py:51: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_one_derivative[fn11]

test_tensor.py::test_one_derivative[fn11]
fn = ('sig', , )

    @given(tensors())
>   @pytest.mark.task2_4

tests/test_tensor.py:51: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_one_derivative[fn12]

test_tensor.py::test_one_derivative[fn12]
fn = ('square', , )

    @given(tensors())
>   @pytest.mark.task2_4

tests/test_tensor.py:51: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_one_derivative[fn13]

test_tensor.py::test_one_derivative[fn13]
fn = ('subConstant', , )

    @given(tensors())
>   @pytest.mark.task2_4

tests/test_tensor.py:51: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_permute

test_tensor.py::test_permute
@given(data(), tensors())
>   @pytest.mark.task2_4

tests/test_tensor.py:62: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_grad_size

test_tensor.py::test_grad_size
def test_grad_size() -> None:
        "Test the size of the gradient (from @WannaFy)"
>       a = tensor([1], requires_grad=True)

tests/test_tensor.py:75: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
minitorch/tensor_functions.py:366: in tensor
    return _tensor(cur, tuple(shape2), backend=backend, requires_grad=requires_grad)
minitorch/tensor_functions.py:332: in _tensor
    tensor = minitorch.Tensor.make(ls, shape, backend=backend)
minitorch/tensor.py:264: in make
    return Tensor(TensorData(storage, shape, strides), backend=backend)
minitorch/tensor_data.py:147: in __init__
    self.size = int(prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_grad_reduce[fn0]

test_tensor.py::test_grad_reduce[fn0]
fn = ('mean_full_red', , )

    @given(tensors())
>   @pytest.mark.task2_4

tests/test_tensor.py:89: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_grad_reduce[fn1]

test_tensor.py::test_grad_reduce[fn1]
fn = ('mean_red', , )

    @given(tensors())
>   @pytest.mark.task2_4

tests/test_tensor.py:89: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_grad_reduce[fn2]

test_tensor.py::test_grad_reduce[fn2]
fn = ('sum_red', , )

    @given(tensors())
>   @pytest.mark.task2_4

tests/test_tensor.py:89: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_two_grad[fn0]

test_tensor.py::test_two_grad[fn0]
fn = ('add2', , )

    @given(shaped_tensors(2))
>   @pytest.mark.task2_4

tests/test_tensor.py:101: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_two_grad[fn1]

test_tensor.py::test_two_grad[fn1]
fn = ('div2', , )

    @given(shaped_tensors(2))
>   @pytest.mark.task2_4

tests/test_tensor.py:101: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_two_grad[fn2]

test_tensor.py::test_two_grad[fn2]
fn = ('eq2', , )

    @given(shaped_tensors(2))
>   @pytest.mark.task2_4

tests/test_tensor.py:101: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_two_grad[fn3]

test_tensor.py::test_two_grad[fn3]
fn = ('gt2', , )

    @given(shaped_tensors(2))
>   @pytest.mark.task2_4

tests/test_tensor.py:101: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_two_grad[fn4]

test_tensor.py::test_two_grad[fn4]
fn = ('lt2', , )

    @given(shaped_tensors(2))
>   @pytest.mark.task2_4

tests/test_tensor.py:101: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_two_grad[fn5]

test_tensor.py::test_two_grad[fn5]
fn = ('mul2', , )

    @given(shaped_tensors(2))
>   @pytest.mark.task2_4

tests/test_tensor.py:101: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_two_grad_broadcast[fn0]

test_tensor.py::test_two_grad_broadcast[fn0]
fn = ('add2', , )

    @given(shaped_tensors(2))
>   @pytest.mark.task2_4

tests/test_tensor.py:113: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_two_grad_broadcast[fn1]

test_tensor.py::test_two_grad_broadcast[fn1]
fn = ('div2', , )

    @given(shaped_tensors(2))
>   @pytest.mark.task2_4

tests/test_tensor.py:113: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_two_grad_broadcast[fn2]

test_tensor.py::test_two_grad_broadcast[fn2]
fn = ('eq2', , )

    @given(shaped_tensors(2))
>   @pytest.mark.task2_4

tests/test_tensor.py:113: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_two_grad_broadcast[fn3]

test_tensor.py::test_two_grad_broadcast[fn3]
fn = ('gt2', , )

    @given(shaped_tensors(2))
>   @pytest.mark.task2_4

tests/test_tensor.py:113: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_two_grad_broadcast[fn4]

test_tensor.py::test_two_grad_broadcast[fn4]
fn = ('lt2', , )

    @given(shaped_tensors(2))
>   @pytest.mark.task2_4

tests/test_tensor.py:113: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_two_grad_broadcast[fn5]

test_tensor.py::test_two_grad_broadcast[fn5]
fn = ('mul2', , )

    @given(shaped_tensors(2))
>   @pytest.mark.task2_4

tests/test_tensor.py:113: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_fromlist

test_tensor.py::test_fromlist
def test_fromlist() -> None:
        "Test longer from list conversion"
>       t = tensor([[2, 3, 4], [4, 5, 7]])

tests/test_tensor.py:131: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
minitorch/tensor_functions.py:366: in tensor
    return _tensor(cur, tuple(shape2), backend=backend, requires_grad=requires_grad)
minitorch/tensor_functions.py:332: in _tensor
    tensor = minitorch.Tensor.make(ls, shape, backend=backend)
minitorch/tensor.py:264: in make
    return Tensor(TensorData(storage, shape, strides), backend=backend)
minitorch/tensor_data.py:147: in __init__
    self.size = int(prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (2, 3)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_view

test_tensor.py::test_view
def test_view() -> None:
        "Test view"
>       t = tensor([[2, 3, 4], [4, 5, 7]])

tests/test_tensor.py:139: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
minitorch/tensor_functions.py:366: in tensor
    return _tensor(cur, tuple(shape2), backend=backend, requires_grad=requires_grad)
minitorch/tensor_functions.py:332: in _tensor
    tensor = minitorch.Tensor.make(ls, shape, backend=backend)
minitorch/tensor.py:264: in make
    return Tensor(TensorData(storage, shape, strides), backend=backend)
minitorch/tensor_data.py:147: in __init__
    self.size = int(prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (2, 3)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_back_view

test_tensor.py::test_back_view
@given(tensors())
>   def test_back_view(t1: Tensor) -> None:

tests/test_tensor.py:152: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_permute_view

test_tensor.py::test_permute_view
@pytest.mark.xfail
    def test_permute_view() -> None:
>       t = tensor([[2, 3, 4], [4, 5, 7]])

tests/test_tensor.py:164: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
minitorch/tensor_functions.py:366: in tensor
    return _tensor(cur, tuple(shape2), backend=backend, requires_grad=requires_grad)
minitorch/tensor_functions.py:332: in _tensor
    tensor = minitorch.Tensor.make(ls, shape, backend=backend)
minitorch/tensor.py:264: in make
    return Tensor(TensorData(storage, shape, strides), backend=backend)
minitorch/tensor_data.py:147: in __init__
    self.size = int(prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (2, 3)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_index

test_tensor.py::test_index
@pytest.mark.xfail
    def test_index() -> None:
>       t = tensor([[2, 3, 4], [4, 5, 7]])

tests/test_tensor.py:172: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
minitorch/tensor_functions.py:366: in tensor
    return _tensor(cur, tuple(shape2), backend=backend, requires_grad=requires_grad)
minitorch/tensor_functions.py:332: in _tensor
    tensor = minitorch.Tensor.make(ls, shape, backend=backend)
minitorch/tensor.py:264: in make
    return Tensor(TensorData(storage, shape, strides), backend=backend)
minitorch/tensor_data.py:147: in __init__
    self.size = int(prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (2, 3)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_fromnumpy

test_tensor.py::test_fromnumpy
def test_fromnumpy() -> None:
>       t = tensor([[2, 3, 4], [4, 5, 7]])

tests/test_tensor.py:178: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
minitorch/tensor_functions.py:366: in tensor
    return _tensor(cur, tuple(shape2), backend=backend, requires_grad=requires_grad)
minitorch/tensor_functions.py:332: in _tensor
    tensor = minitorch.Tensor.make(ls, shape, backend=backend)
minitorch/tensor.py:264: in make
    return Tensor(TensorData(storage, shape, strides), backend=backend)
minitorch/tensor_data.py:147: in __init__
    self.size = int(prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (2, 3)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_reduce_forward_one_dim

test_tensor.py::test_reduce_forward_one_dim
@pytest.mark.task2_3
    def test_reduce_forward_one_dim() -> None:
        # shape (3, 2)
>       t = tensor([[2, 3], [4, 6], [5, 7]])

tests/test_tensor.py:193: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
minitorch/tensor_functions.py:366: in tensor
    return _tensor(cur, tuple(shape2), backend=backend, requires_grad=requires_grad)
minitorch/tensor_functions.py:332: in _tensor
    tensor = minitorch.Tensor.make(ls, shape, backend=backend)
minitorch/tensor.py:264: in make
    return Tensor(TensorData(storage, shape, strides), backend=backend)
minitorch/tensor_data.py:147: in __init__
    self.size = int(prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (3, 2)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_reduce_forward_one_dim_2

test_tensor.py::test_reduce_forward_one_dim_2
@pytest.mark.task2_3
    def test_reduce_forward_one_dim_2() -> None:
        # shape (3, 2)
>       t = tensor([[2, 3], [4, 6], [5, 7]])

tests/test_tensor.py:206: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
minitorch/tensor_functions.py:366: in tensor
    return _tensor(cur, tuple(shape2), backend=backend, requires_grad=requires_grad)
minitorch/tensor_functions.py:332: in _tensor
    tensor = minitorch.Tensor.make(ls, shape, backend=backend)
minitorch/tensor.py:264: in make
    return Tensor(TensorData(storage, shape, strides), backend=backend)
minitorch/tensor_data.py:147: in __init__
    self.size = int(prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (3, 2)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor.py::test_reduce_forward_all_dims

test_tensor.py::test_reduce_forward_all_dims
@pytest.mark.task2_3
    def test_reduce_forward_all_dims() -> None:
        # shape (3, 2)
>       t = tensor([[2, 3], [4, 6], [5, 7]])

tests/test_tensor.py:219: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
minitorch/tensor_functions.py:366: in tensor
    return _tensor(cur, tuple(shape2), backend=backend, requires_grad=requires_grad)
minitorch/tensor_functions.py:332: in _tensor
    tensor = minitorch.Tensor.make(ls, shape, backend=backend)
minitorch/tensor.py:264: in make
    return Tensor(TensorData(storage, shape, strides), backend=backend)
minitorch/tensor_data.py:147: in __init__
    self.size = int(prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (3, 2)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor_data.py::test_layout

test_tensor_data.py::test_layout
@pytest.mark.task2_1
    def test_layout() -> None:
        "Test basis properties of layout and strides"
        data = [0] * 3 * 5
>       tensor_data = minitorch.TensorData(data, (3, 5), (5, 1))

tests/test_tensor_data.py:19: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
minitorch/tensor_data.py:147: in __init__
    self.size = int(prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (3, 5)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor_data.py::test_layout_bad

test_tensor_data.py::test_layout_bad
@pytest.mark.xfail
    def test_layout_bad() -> None:
        "Test basis properties of layout and strides"
        data = [0] * 3 * 5
>       minitorch.TensorData(data, (3, 5), (6,))

tests/test_tensor_data.py:39: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = 
storage = [0, 0, 0, 0, 0, 0, ...], shape = (3, 5), strides = (6,)

    def __init__(
        self,
        storage: Union[Sequence[float], Storage],
        shape: UserShape,
        strides: Optional[UserStrides] = None,
    ):
        if isinstance(storage, np.ndarray):
            self._storage = storage
        else:
            self._storage = array(storage, dtype=float64)

        if strides is None:
            strides = strides_from_shape(shape)

        assert isinstance(strides, tuple), "Strides must be tuple"
        assert isinstance(shape, tuple), "Shape must be tuple"
        if len(strides) != len(shape):
>           raise IndexingError(f"Len of strides {strides} must match {shape}.")
E           minitorch.tensor_data.IndexingError: Len of strides (6,) must match (3, 5).

minitorch/tensor_data.py:142: IndexingError

test_tensor_data.py::test_enumeration

test_tensor_data.py::test_enumeration
@pytest.mark.task2_1
>   @given(tensor_data())

tests/test_tensor_data.py:43: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor_data.py::test_index

test_tensor_data.py::test_index
@pytest.mark.task2_1
>   @given(tensor_data())

tests/test_tensor_data.py:61: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor_data.py::test_permute

test_tensor_data.py::test_permute
@pytest.mark.task2_1
>   @given(data())

tests/test_tensor_data.py:81: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_data.py:83: in test_permute
    td = data.draw(tensor_data())
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_permute(
E           data=data(...),
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_data.py::test_shape_broadcast

test_tensor_data.py::test_shape_broadcast
@pytest.mark.task2_2
    def test_shape_broadcast() -> None:
>       c = minitorch.shape_broadcast((1,), (5, 5))

tests/test_tensor_data.py:99: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

shape1 = (1,), shape2 = (5, 5)

    def shape_broadcast(shape1: UserShape, shape2: UserShape) -> UserShape:
        """
        Broadcast two shapes to create a new union shape.

        Args:
            shape1 : first shape
            shape2 : second shape

        Returns:
            broadcasted shape

        Raises:
            IndexingError : if cannot broadcast
        """
        # TODO: Implement for Task 2.2.
>       raise NotImplementedError('Need to implement for Task 2.2')
E       NotImplementedError: Need to implement for Task 2.2

minitorch/tensor_data.py:105: NotImplementedError

test_tensor_data.py::test_string

test_tensor_data.py::test_string
@given(tensor_data())
>   def test_string(tensor_data: TensorData) -> None:

tests/test_tensor_data.py:124: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_create[fast]

test_tensor_general.py::test_create[fast]
backend = 'fast'

    @given(lists(small_floats, min_size=1))
>   @pytest.mark.parametrize("backend", backend_tests)

tests/test_tensor_general.py:45: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:48: in test_create
    t2 = minitorch.tensor(t1, backend=shared[backend])
minitorch/tensor_functions.py:366: in tensor
    return _tensor(cur, tuple(shape2), backend=backend, requires_grad=requires_grad)
minitorch/tensor_functions.py:332: in _tensor
    tensor = minitorch.Tensor.make(ls, shape, backend=backend)
minitorch/tensor.py:264: in make
    return Tensor(TensorData(storage, shape, strides), backend=backend)
minitorch/tensor_data.py:147: in __init__
    self.size = int(prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_create(
E           t1=[0.0], backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_one_args[fast-fn0]

test_tensor_general.py::test_one_args[fast-fn0]
fn = ('addConstant', , )
backend = 'fast'

    @given(data())
>   @settings(max_examples=100)

tests/test_tensor_general.py:54: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:63: in test_one_args
    t1 = data.draw(tensors(backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_one_args(
E           data=data(...),
E           fn=('addConstant', addConstant, addConstant),
E           backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_one_args[fast-fn1]

test_tensor_general.py::test_one_args[fast-fn1]
fn = ('complex', , )
backend = 'fast'

    @given(data())
>   @settings(max_examples=100)

tests/test_tensor_general.py:54: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:63: in test_one_args
    t1 = data.draw(tensors(backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_one_args(
E           data=data(...), fn=('complex', complex, complex), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_one_args[fast-fn2]

test_tensor_general.py::test_one_args[fast-fn2]
fn = ('cube', , )
backend = 'fast'

    @given(data())
>   @settings(max_examples=100)

tests/test_tensor_general.py:54: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:63: in test_one_args
    t1 = data.draw(tensors(backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_one_args(
E           data=data(...), fn=('cube', cube, cube), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_one_args[fast-fn3]

test_tensor_general.py::test_one_args[fast-fn3]
fn = ('div', , )
backend = 'fast'

    @given(data())
>   @settings(max_examples=100)

tests/test_tensor_general.py:54: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:63: in test_one_args
    t1 = data.draw(tensors(backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_one_args(
E           data=data(...), fn=('div', div, div), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_one_args[fast-fn4]

test_tensor_general.py::test_one_args[fast-fn4]
fn = ('exp', , )
backend = 'fast'

    @given(data())
>   @settings(max_examples=100)

tests/test_tensor_general.py:54: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:63: in test_one_args
    t1 = data.draw(tensors(backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_one_args(
E           data=data(...), fn=('exp', exp, exp), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_one_args[fast-fn5]

test_tensor_general.py::test_one_args[fast-fn5]
fn = ('explog', , )
backend = 'fast'

    @given(data())
>   @settings(max_examples=100)

tests/test_tensor_general.py:54: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:63: in test_one_args
    t1 = data.draw(tensors(backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_one_args(
E           data=data(...), fn=('explog', explog, explog), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_one_args[fast-fn6]

test_tensor_general.py::test_one_args[fast-fn6]
fn = ('inv', , )
backend = 'fast'

    @given(data())
>   @settings(max_examples=100)

tests/test_tensor_general.py:54: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:63: in test_one_args
    t1 = data.draw(tensors(backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_one_args(
E           data=data(...), fn=('inv', inv, inv), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_one_args[fast-fn7]

test_tensor_general.py::test_one_args[fast-fn7]
fn = ('log', , )
backend = 'fast'

    @given(data())
>   @settings(max_examples=100)

tests/test_tensor_general.py:54: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:63: in test_one_args
    t1 = data.draw(tensors(backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_one_args(
E           data=data(...), fn=('log', log, log), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_one_args[fast-fn8]

test_tensor_general.py::test_one_args[fast-fn8]
fn = ('multConstant', , )
backend = 'fast'

    @given(data())
>   @settings(max_examples=100)

tests/test_tensor_general.py:54: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:63: in test_one_args
    t1 = data.draw(tensors(backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_one_args(
E           data=data(...),
E           fn=('multConstant', multConstant, multConstant),
E           backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_one_args[fast-fn9]

test_tensor_general.py::test_one_args[fast-fn9]
fn = ('neg', , )
backend = 'fast'

    @given(data())
>   @settings(max_examples=100)

tests/test_tensor_general.py:54: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:63: in test_one_args
    t1 = data.draw(tensors(backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_one_args(
E           data=data(...), fn=('neg', neg, neg), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_one_args[fast-fn10]

test_tensor_general.py::test_one_args[fast-fn10]
fn = ('relu', , )
backend = 'fast'

    @given(data())
>   @settings(max_examples=100)

tests/test_tensor_general.py:54: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:63: in test_one_args
    t1 = data.draw(tensors(backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_one_args(
E           data=data(...), fn=('relu', relu, relu), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_one_args[fast-fn11]

test_tensor_general.py::test_one_args[fast-fn11]
fn = ('sig', , )
backend = 'fast'

    @given(data())
>   @settings(max_examples=100)

tests/test_tensor_general.py:54: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:63: in test_one_args
    t1 = data.draw(tensors(backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_one_args(
E           data=data(...), fn=('sig', sig, sig), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_one_args[fast-fn12]

test_tensor_general.py::test_one_args[fast-fn12]
fn = ('square', , )
backend = 'fast'

    @given(data())
>   @settings(max_examples=100)

tests/test_tensor_general.py:54: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:63: in test_one_args
    t1 = data.draw(tensors(backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_one_args(
E           data=data(...), fn=('square', square, square), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_one_args[fast-fn13]

test_tensor_general.py::test_one_args[fast-fn13]
fn = ('subConstant', , )
backend = 'fast'

    @given(data())
>   @settings(max_examples=100)

tests/test_tensor_general.py:54: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:63: in test_one_args
    t1 = data.draw(tensors(backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_one_args(
E           data=data(...),
E           fn=('subConstant', subConstant, subConstant),
E           backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_two_args[fast-fn0]

test_tensor_general.py::test_two_args[fast-fn0]
fn = ('add2', , )
backend = 'fast'

    @given(data())
>   @settings(max_examples=100)

tests/test_tensor_general.py:71: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:80: in test_two_args
    t1, t2 = data.draw(shaped_tensors(2, backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_two_args(
E           data=data(...), fn=('add2', add2, add2), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_two_args[fast-fn1]

test_tensor_general.py::test_two_args[fast-fn1]
fn = ('div2', , )
backend = 'fast'

    @given(data())
>   @settings(max_examples=100)

tests/test_tensor_general.py:71: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:80: in test_two_args
    t1, t2 = data.draw(shaped_tensors(2, backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_two_args(
E           data=data(...), fn=('div2', div2, div2), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_two_args[fast-fn2]

test_tensor_general.py::test_two_args[fast-fn2]
fn = ('eq2', , )
backend = 'fast'

    @given(data())
>   @settings(max_examples=100)

tests/test_tensor_general.py:71: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:80: in test_two_args
    t1, t2 = data.draw(shaped_tensors(2, backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_two_args(
E           data=data(...), fn=('eq2', eq2, eq2), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_two_args[fast-fn3]

test_tensor_general.py::test_two_args[fast-fn3]
fn = ('gt2', , )
backend = 'fast'

    @given(data())
>   @settings(max_examples=100)

tests/test_tensor_general.py:71: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:80: in test_two_args
    t1, t2 = data.draw(shaped_tensors(2, backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_two_args(
E           data=data(...), fn=('gt2', gt2, gt2), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_two_args[fast-fn4]

test_tensor_general.py::test_two_args[fast-fn4]
fn = ('lt2', , )
backend = 'fast'

    @given(data())
>   @settings(max_examples=100)

tests/test_tensor_general.py:71: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:80: in test_two_args
    t1, t2 = data.draw(shaped_tensors(2, backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_two_args(
E           data=data(...), fn=('lt2', lt2, lt2), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_two_args[fast-fn5]

test_tensor_general.py::test_two_args[fast-fn5]
fn = ('mul2', , )
backend = 'fast'

    @given(data())
>   @settings(max_examples=100)

tests/test_tensor_general.py:71: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:80: in test_two_args
    t1, t2 = data.draw(shaped_tensors(2, backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_two_args(
E           data=data(...), fn=('mul2', mul2, mul2), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_one_derivative[fast-fn0]

test_tensor_general.py::test_one_derivative[fast-fn0]
fn = ('addConstant', , )
backend = 'fast'

    @given(data())
>   @pytest.mark.parametrize("fn", one_arg)

tests/test_tensor_general.py:88: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:96: in test_one_derivative
    t1 = data.draw(tensors(backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_one_derivative(
E           data=data(...),
E           fn=('addConstant', addConstant, addConstant),
E           backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_one_derivative[fast-fn1]

test_tensor_general.py::test_one_derivative[fast-fn1]
fn = ('complex', , )
backend = 'fast'

    @given(data())
>   @pytest.mark.parametrize("fn", one_arg)

tests/test_tensor_general.py:88: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:96: in test_one_derivative
    t1 = data.draw(tensors(backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_one_derivative(
E           data=data(...), fn=('complex', complex, complex), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_one_derivative[fast-fn2]

test_tensor_general.py::test_one_derivative[fast-fn2]
fn = ('cube', , )
backend = 'fast'

    @given(data())
>   @pytest.mark.parametrize("fn", one_arg)

tests/test_tensor_general.py:88: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:96: in test_one_derivative
    t1 = data.draw(tensors(backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_one_derivative(
E           data=data(...), fn=('cube', cube, cube), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_one_derivative[fast-fn3]

test_tensor_general.py::test_one_derivative[fast-fn3]
fn = ('div', , )
backend = 'fast'

    @given(data())
>   @pytest.mark.parametrize("fn", one_arg)

tests/test_tensor_general.py:88: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:96: in test_one_derivative
    t1 = data.draw(tensors(backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_one_derivative(
E           data=data(...), fn=('div', div, div), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_one_derivative[fast-fn4]

test_tensor_general.py::test_one_derivative[fast-fn4]
fn = ('exp', , )
backend = 'fast'

    @given(data())
>   @pytest.mark.parametrize("fn", one_arg)

tests/test_tensor_general.py:88: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:96: in test_one_derivative
    t1 = data.draw(tensors(backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_one_derivative(
E           data=data(...), fn=('exp', exp, exp), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_one_derivative[fast-fn5]

test_tensor_general.py::test_one_derivative[fast-fn5]
fn = ('explog', , )
backend = 'fast'

    @given(data())
>   @pytest.mark.parametrize("fn", one_arg)

tests/test_tensor_general.py:88: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:96: in test_one_derivative
    t1 = data.draw(tensors(backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_one_derivative(
E           data=data(...), fn=('explog', explog, explog), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_one_derivative[fast-fn6]

test_tensor_general.py::test_one_derivative[fast-fn6]
fn = ('inv', , )
backend = 'fast'

    @given(data())
>   @pytest.mark.parametrize("fn", one_arg)

tests/test_tensor_general.py:88: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:96: in test_one_derivative
    t1 = data.draw(tensors(backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_one_derivative(
E           data=data(...), fn=('inv', inv, inv), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_one_derivative[fast-fn7]

test_tensor_general.py::test_one_derivative[fast-fn7]
fn = ('log', , )
backend = 'fast'

    @given(data())
>   @pytest.mark.parametrize("fn", one_arg)

tests/test_tensor_general.py:88: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:96: in test_one_derivative
    t1 = data.draw(tensors(backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_one_derivative(
E           data=data(...), fn=('log', log, log), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_one_derivative[fast-fn8]

test_tensor_general.py::test_one_derivative[fast-fn8]
fn = ('multConstant', , )
backend = 'fast'

    @given(data())
>   @pytest.mark.parametrize("fn", one_arg)

tests/test_tensor_general.py:88: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:96: in test_one_derivative
    t1 = data.draw(tensors(backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_one_derivative(
E           data=data(...),
E           fn=('multConstant', multConstant, multConstant),
E           backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_one_derivative[fast-fn9]

test_tensor_general.py::test_one_derivative[fast-fn9]
fn = ('neg', , )
backend = 'fast'

    @given(data())
>   @pytest.mark.parametrize("fn", one_arg)

tests/test_tensor_general.py:88: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:96: in test_one_derivative
    t1 = data.draw(tensors(backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_one_derivative(
E           data=data(...), fn=('neg', neg, neg), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_one_derivative[fast-fn10]

test_tensor_general.py::test_one_derivative[fast-fn10]
fn = ('relu', , )
backend = 'fast'

    @given(data())
>   @pytest.mark.parametrize("fn", one_arg)

tests/test_tensor_general.py:88: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:96: in test_one_derivative
    t1 = data.draw(tensors(backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_one_derivative(
E           data=data(...), fn=('relu', relu, relu), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_one_derivative[fast-fn11]

test_tensor_general.py::test_one_derivative[fast-fn11]
fn = ('sig', , )
backend = 'fast'

    @given(data())
>   @pytest.mark.parametrize("fn", one_arg)

tests/test_tensor_general.py:88: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:96: in test_one_derivative
    t1 = data.draw(tensors(backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_one_derivative(
E           data=data(...), fn=('sig', sig, sig), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_one_derivative[fast-fn12]

test_tensor_general.py::test_one_derivative[fast-fn12]
fn = ('square', , )
backend = 'fast'

    @given(data())
>   @pytest.mark.parametrize("fn", one_arg)

tests/test_tensor_general.py:88: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:96: in test_one_derivative
    t1 = data.draw(tensors(backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_one_derivative(
E           data=data(...), fn=('square', square, square), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_one_derivative[fast-fn13]

test_tensor_general.py::test_one_derivative[fast-fn13]
fn = ('subConstant', , )
backend = 'fast'

    @given(data())
>   @pytest.mark.parametrize("fn", one_arg)

tests/test_tensor_general.py:88: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:96: in test_one_derivative
    t1 = data.draw(tensors(backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_one_derivative(
E           data=data(...),
E           fn=('subConstant', subConstant, subConstant),
E           backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_two_grad[fast-fn0]

test_tensor_general.py::test_two_grad[fast-fn0]
fn = ('add2', , )
backend = 'fast'

    @given(data())
>   @settings(max_examples=50)

tests/test_tensor_general.py:102: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:111: in test_two_grad
    t1, t2 = data.draw(shaped_tensors(2, backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_two_grad(
E           data=data(...), fn=('add2', add2, add2), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_two_grad[fast-fn1]

test_tensor_general.py::test_two_grad[fast-fn1]
fn = ('div2', , )
backend = 'fast'

    @given(data())
>   @settings(max_examples=50)

tests/test_tensor_general.py:102: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:111: in test_two_grad
    t1, t2 = data.draw(shaped_tensors(2, backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_two_grad(
E           data=data(...), fn=('div2', div2, div2), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_two_grad[fast-fn2]

test_tensor_general.py::test_two_grad[fast-fn2]
fn = ('eq2', , )
backend = 'fast'

    @given(data())
>   @settings(max_examples=50)

tests/test_tensor_general.py:102: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:111: in test_two_grad
    t1, t2 = data.draw(shaped_tensors(2, backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_two_grad(
E           data=data(...), fn=('eq2', eq2, eq2), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_two_grad[fast-fn3]

test_tensor_general.py::test_two_grad[fast-fn3]
fn = ('gt2', , )
backend = 'fast'

    @given(data())
>   @settings(max_examples=50)

tests/test_tensor_general.py:102: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:111: in test_two_grad
    t1, t2 = data.draw(shaped_tensors(2, backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_two_grad(
E           data=data(...), fn=('gt2', gt2, gt2), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_two_grad[fast-fn4]

test_tensor_general.py::test_two_grad[fast-fn4]
fn = ('lt2', , )
backend = 'fast'

    @given(data())
>   @settings(max_examples=50)

tests/test_tensor_general.py:102: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:111: in test_two_grad
    t1, t2 = data.draw(shaped_tensors(2, backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_two_grad(
E           data=data(...), fn=('lt2', lt2, lt2), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_two_grad[fast-fn5]

test_tensor_general.py::test_two_grad[fast-fn5]
fn = ('mul2', , )
backend = 'fast'

    @given(data())
>   @settings(max_examples=50)

tests/test_tensor_general.py:102: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:111: in test_two_grad
    t1, t2 = data.draw(shaped_tensors(2, backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_two_grad(
E           data=data(...), fn=('mul2', mul2, mul2), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_reduce[fast-fn0]

test_tensor_general.py::test_reduce[fast-fn0]
fn = ('mean_full_red', , )
backend = 'fast'

    @given(data())
>   @settings(max_examples=100)

tests/test_tensor_general.py:117: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:126: in test_reduce
    t1 = data.draw(tensors(backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_reduce(
E           data=data(...),
E           fn=('mean_full_red', mean_full_red, mean_full_red),
E           backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_reduce[fast-fn1]

test_tensor_general.py::test_reduce[fast-fn1]
fn = ('mean_red', , )
backend = 'fast'

    @given(data())
>   @settings(max_examples=100)

tests/test_tensor_general.py:117: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:126: in test_reduce
    t1 = data.draw(tensors(backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_reduce(
E           data=data(...), fn=('mean_red', mean_red, mean_red), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_reduce[fast-fn2]

test_tensor_general.py::test_reduce[fast-fn2]
fn = ('sum_red', , )
backend = 'fast'

    @given(data())
>   @settings(max_examples=100)

tests/test_tensor_general.py:117: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:126: in test_reduce
    t1 = data.draw(tensors(backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_reduce(
E           data=data(...), fn=('sum_red', sum_red, sum_red), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_two_grad_broadcast[fast-fn0]

test_tensor_general.py::test_two_grad_broadcast[fast-fn0]
fn = ('add2', , )
backend = 'fast'

    @given(data())
>   @settings(max_examples=25)

tests/test_tensor_general.py:308: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:317: in test_two_grad_broadcast
    t1, t2 = data.draw(shaped_tensors(2, backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_two_grad_broadcast(
E           data=data(...), fn=('add2', add2, add2), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_two_grad_broadcast[fast-fn1]

test_tensor_general.py::test_two_grad_broadcast[fast-fn1]
fn = ('div2', , )
backend = 'fast'

    @given(data())
>   @settings(max_examples=25)

tests/test_tensor_general.py:308: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:317: in test_two_grad_broadcast
    t1, t2 = data.draw(shaped_tensors(2, backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_two_grad_broadcast(
E           data=data(...), fn=('div2', div2, div2), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_two_grad_broadcast[fast-fn2]

test_tensor_general.py::test_two_grad_broadcast[fast-fn2]
fn = ('eq2', , )
backend = 'fast'

    @given(data())
>   @settings(max_examples=25)

tests/test_tensor_general.py:308: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:317: in test_two_grad_broadcast
    t1, t2 = data.draw(shaped_tensors(2, backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_two_grad_broadcast(
E           data=data(...), fn=('eq2', eq2, eq2), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_two_grad_broadcast[fast-fn3]

test_tensor_general.py::test_two_grad_broadcast[fast-fn3]
fn = ('gt2', , )
backend = 'fast'

    @given(data())
>   @settings(max_examples=25)

tests/test_tensor_general.py:308: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:317: in test_two_grad_broadcast
    t1, t2 = data.draw(shaped_tensors(2, backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_two_grad_broadcast(
E           data=data(...), fn=('gt2', gt2, gt2), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_two_grad_broadcast[fast-fn4]

test_tensor_general.py::test_two_grad_broadcast[fast-fn4]
fn = ('lt2', , )
backend = 'fast'

    @given(data())
>   @settings(max_examples=25)

tests/test_tensor_general.py:308: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:317: in test_two_grad_broadcast
    t1, t2 = data.draw(shaped_tensors(2, backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_two_grad_broadcast(
E           data=data(...), fn=('lt2', lt2, lt2), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_two_grad_broadcast[fast-fn5]

test_tensor_general.py::test_two_grad_broadcast[fast-fn5]
fn = ('mul2', , )
backend = 'fast'

    @given(data())
>   @settings(max_examples=25)

tests/test_tensor_general.py:308: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:317: in test_two_grad_broadcast
    t1, t2 = data.draw(shaped_tensors(2, backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_two_grad_broadcast(
E           data=data(...), fn=('mul2', mul2, mul2), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_permute[fast]

test_tensor_general.py::test_permute[fast]
backend = 'fast'

    @given(data())
>   @settings(max_examples=100)

tests/test_tensor_general.py:328: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:332: in test_permute
    t1 = data.draw(tensors(backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (1,)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_permute(
E           data=data(...), backend='fast',
E       )

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_mm2

test_tensor_general.py::test_mm2
@pytest.mark.task3_2
    def test_mm2() -> None:
>       a = minitorch.rand((2, 3), backend=FastTensorBackend)

tests/test_tensor_general.py:343: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
minitorch/tensor_functions.py:308: in rand
    vals = [random.random() for _ in range(int(operators.prod(shape)))]
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (2, 3)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3

minitorch/operators.py:206: NotImplementedError

test_tensor_general.py::test_bmm[fast]

test_tensor_general.py::test_bmm[fast]
backend = 'fast'

    @given(data())
>   @pytest.mark.parametrize("backend", matmul_tests)

tests/test_tensor_general.py:361: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:370: in test_bmm
    a = data.draw(tensors(backend=shared[backend], shape=(D, A, B)))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:49: in tensor_data
    size = int(minitorch.prod(shape))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = (2, 2, 2)

    def prod(ls: Iterable[float]) -> float:
        "Product of a list using `reduce` and `mul`."
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_bmm(
E           data=data(...), backend='fast',
E       )
E       Draw 1: 2
E       Draw 2: 2
E       Draw 3: 2
E       Draw 4: 2

minitorch/operators.py:206: NotImplementedError

Patch diff

diff --git a/minitorch/autodiff.py b/minitorch/autodiff.py
index 39acbe8..06496e2 100644
--- a/minitorch/autodiff.py
+++ b/minitorch/autodiff.py
@@ -1,35 +1,57 @@
 from dataclasses import dataclass
 from typing import Any, Iterable, List, Tuple
+
 from typing_extensions import Protocol

+# ## Task 1.1
+# Central Difference calculation

-def central_difference(f: Any, *vals: Any, arg: int=0, epsilon: float=1e-06
-    ) ->Any:
-    """
+
+def central_difference(f: Any, *vals: Any, arg: int = 0, epsilon: float = 1e-6) -> Any:
+    r"""
     Computes an approximation to the derivative of `f` with respect to one arg.

     See :doc:`derivative` or https://en.wikipedia.org/wiki/Finite_difference for more details.

     Args:
         f : arbitrary function from n-scalar args to one value
-        *vals : n-float values $x_0 \\ldots x_{n-1}$
+        *vals : n-float values $x_0 \ldots x_{n-1}$
         arg : the number $i$ of the arg to compute the derivative
         epsilon : a small constant

     Returns:
-        An approximation of $f'_i(x_0, \\ldots, x_{n-1})$
+        An approximation of $f'_i(x_0, \ldots, x_{n-1})$
     """
-    pass
+    # TODO: Implement for Task 1.1.
+    raise NotImplementedError('Need to implement for Task 1.1')


 variable_count = 1


 class Variable(Protocol):
-    pass
+    def accumulate_derivative(self, x: Any) -> None:
+        pass

+    @property
+    def unique_id(self) -> int:
+        pass
+
+    def is_leaf(self) -> bool:
+        pass
+
+    def is_constant(self) -> bool:
+        pass

-def topological_sort(variable: Variable) ->Iterable[Variable]:
+    @property
+    def parents(self) -> Iterable["Variable"]:
+        pass
+
+    def chain_rule(self, d_output: Any) -> Iterable[Tuple["Variable", Any]]:
+        pass
+
+
+def topological_sort(variable: Variable) -> Iterable[Variable]:
     """
     Computes the topological order of the computation graph.

@@ -39,10 +61,11 @@ def topological_sort(variable: Variable) ->Iterable[Variable]:
     Returns:
         Non-constant Variables in topological order starting from the right.
     """
-    pass
+    # TODO: Implement for Task 1.4.
+    raise NotImplementedError('Need to implement for Task 1.4')


-def backpropagate(variable: Variable, deriv: Any) ->None:
+def backpropagate(variable: Variable, deriv: Any) -> None:
     """
     Runs backpropagation on the computation graph in order to
     compute derivatives for the leave nodes.
@@ -53,7 +76,8 @@ def backpropagate(variable: Variable, deriv: Any) ->None:

     No return. Should write to its results to the derivative values of each leaf through `accumulate_derivative`.
     """
-    pass
+    # TODO: Implement for Task 1.4.
+    raise NotImplementedError('Need to implement for Task 1.4')


 @dataclass
@@ -61,9 +85,16 @@ class Context:
     """
     Context class is used by `Function` to store information during the forward pass.
     """
+
     no_grad: bool = False
     saved_values: Tuple[Any, ...] = ()

-    def save_for_backward(self, *values: Any) ->None:
-        """Store the given `values` if they need to be used during backpropagation."""
-        pass
+    def save_for_backward(self, *values: Any) -> None:
+        "Store the given `values` if they need to be used during backpropagation."
+        if self.no_grad:
+            return
+        self.saved_values = values
+
+    @property
+    def saved_tensors(self) -> Tuple[Any, ...]:
+        return self.saved_values
diff --git a/minitorch/cuda_ops.py b/minitorch/cuda_ops.py
index c1d7c5e..ac4cbae 100644
--- a/minitorch/cuda_ops.py
+++ b/minitorch/cuda_ops.py
@@ -1,12 +1,30 @@
 from typing import Callable, Optional
+
 import numba
 from numba import cuda
+
 from .tensor import Tensor
-from .tensor_data import MAX_DIMS, Shape, Storage, Strides, TensorData, broadcast_index, index_to_position, shape_broadcast, to_index
+from .tensor_data import (
+    MAX_DIMS,
+    Shape,
+    Storage,
+    Strides,
+    TensorData,
+    broadcast_index,
+    index_to_position,
+    shape_broadcast,
+    to_index,
+)
 from .tensor_ops import MapProto, TensorOps
+
+# This code will CUDA compile fast versions your tensor_data functions.
+# If you get an error, read the docs for NUMBA as to what is allowed
+# in these functions.
+
 to_index = cuda.jit(device=True)(to_index)
 index_to_position = cuda.jit(device=True)(index_to_position)
 broadcast_index = cuda.jit(device=True)(broadcast_index)
+
 THREADS_PER_BLOCK = 32


@@ -14,13 +32,101 @@ class CudaOps(TensorOps):
     cuda = True

     @staticmethod
-    def map(fn: Callable[[float], float]) ->MapProto:
-        """See `tensor_ops.py`"""
-        pass
+    def map(fn: Callable[[float], float]) -> MapProto:
+        "See `tensor_ops.py`"
+        f = tensor_map(cuda.jit(device=True)(fn))
+
+        def ret(a: Tensor, out: Optional[Tensor] = None) -> Tensor:
+            if out is None:
+                out = a.zeros(a.shape)
+
+            # Instantiate and run the cuda kernel.
+            threadsperblock = THREADS_PER_BLOCK
+            blockspergrid = (out.size + THREADS_PER_BLOCK - 1) // THREADS_PER_BLOCK
+            f[blockspergrid, threadsperblock](*out.tuple(), out.size, *a.tuple())  # type: ignore
+            return out
+
+        return ret
+
+    @staticmethod
+    def zip(fn: Callable[[float, float], float]) -> Callable[[Tensor, Tensor], Tensor]:
+        f = tensor_zip(cuda.jit(device=True)(fn))
+
+        def ret(a: Tensor, b: Tensor) -> Tensor:
+            c_shape = shape_broadcast(a.shape, b.shape)
+            out = a.zeros(c_shape)
+            threadsperblock = THREADS_PER_BLOCK
+            blockspergrid = (out.size + (threadsperblock - 1)) // threadsperblock
+            f[blockspergrid, threadsperblock](  # type: ignore
+                *out.tuple(), out.size, *a.tuple(), *b.tuple()
+            )
+            return out
+
+        return ret
+
+    @staticmethod
+    def reduce(
+        fn: Callable[[float, float], float], start: float = 0.0
+    ) -> Callable[[Tensor, int], Tensor]:
+        f = tensor_reduce(cuda.jit(device=True)(fn))

+        def ret(a: Tensor, dim: int) -> Tensor:
+            out_shape = list(a.shape)
+            out_shape[dim] = (a.shape[dim] - 1) // 1024 + 1
+            out_a = a.zeros(tuple(out_shape))

-def tensor_map(fn: Callable[[float], float]) ->Callable[[Storage, Shape,
-    Strides, Storage, Shape, Strides], None]:
+            threadsperblock = 1024
+            blockspergrid = out_a.size
+            f[blockspergrid, threadsperblock](  # type: ignore
+                *out_a.tuple(), out_a.size, *a.tuple(), dim, start
+            )
+
+            return out_a
+
+        return ret
+
+    @staticmethod
+    def matrix_multiply(a: Tensor, b: Tensor) -> Tensor:
+        # Make these always be a 3 dimensional multiply
+        both_2d = 0
+        if len(a.shape) == 2:
+            a = a.contiguous().view(1, a.shape[0], a.shape[1])
+            both_2d += 1
+        if len(b.shape) == 2:
+            b = b.contiguous().view(1, b.shape[0], b.shape[1])
+            both_2d += 1
+        both_2d = both_2d == 2
+
+        ls = list(shape_broadcast(a.shape[:-2], b.shape[:-2]))
+        ls.append(a.shape[-2])
+        ls.append(b.shape[-1])
+        assert a.shape[-1] == b.shape[-2]
+        out = a.zeros(tuple(ls))
+
+        # One block per batch, extra rows, extra col
+        blockspergrid = (
+            (out.shape[1] + (THREADS_PER_BLOCK - 1)) // THREADS_PER_BLOCK,
+            (out.shape[2] + (THREADS_PER_BLOCK - 1)) // THREADS_PER_BLOCK,
+            out.shape[0],
+        )
+        threadsperblock = (THREADS_PER_BLOCK, THREADS_PER_BLOCK, 1)
+
+        tensor_matrix_multiply[blockspergrid, threadsperblock](
+            *out.tuple(), out.size, *a.tuple(), *b.tuple()
+        )
+
+        # Undo 3d if we added it.
+        if both_2d:
+            out = out.view(out.shape[1], out.shape[2])
+        return out
+
+
+# Implement
+
+
+def tensor_map(
+    fn: Callable[[float], float]
+) -> Callable[[Storage, Shape, Strides, Storage, Shape, Strides], None]:
     """
     CUDA higher-order tensor map function. ::

@@ -33,11 +139,31 @@ def tensor_map(fn: Callable[[float], float]) ->Callable[[Storage, Shape,
     Returns:
         Tensor map function.
     """
-    pass

-
-def tensor_zip(fn: Callable[[float, float], float]) ->Callable[[Storage,
-    Shape, Strides, Storage, Shape, Strides, Storage, Shape, Strides], None]:
+    def _map(
+        out: Storage,
+        out_shape: Shape,
+        out_strides: Strides,
+        out_size: int,
+        in_storage: Storage,
+        in_shape: Shape,
+        in_strides: Strides,
+    ) -> None:
+
+        out_index = cuda.local.array(MAX_DIMS, numba.int32)
+        in_index = cuda.local.array(MAX_DIMS, numba.int32)
+        i = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x
+        # TODO: Implement for Task 3.3.
+        raise NotImplementedError('Need to implement for Task 3.3')
+
+    return cuda.jit()(_map)  # type: ignore
+
+
+def tensor_zip(
+    fn: Callable[[float, float], float]
+) -> Callable[
+    [Storage, Shape, Strides, Storage, Shape, Strides, Storage, Shape, Strides], None
+]:
     """
     CUDA higher-order tensor zipWith (or map2) function ::

@@ -50,14 +176,36 @@ def tensor_zip(fn: Callable[[float, float], float]) ->Callable[[Storage,
     Returns:
         Tensor zip function.
     """
-    pass

+    def _zip(
+        out: Storage,
+        out_shape: Shape,
+        out_strides: Strides,
+        out_size: int,
+        a_storage: Storage,
+        a_shape: Shape,
+        a_strides: Strides,
+        b_storage: Storage,
+        b_shape: Shape,
+        b_strides: Strides,
+    ) -> None:
+
+        out_index = cuda.local.array(MAX_DIMS, numba.int32)
+        a_index = cuda.local.array(MAX_DIMS, numba.int32)
+        b_index = cuda.local.array(MAX_DIMS, numba.int32)
+        i = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x
+
+        # TODO: Implement for Task 3.3.
+        raise NotImplementedError('Need to implement for Task 3.3')

-def _sum_practice(out: Storage, a: Storage, size: int) ->None:
+    return cuda.jit()(_zip)  # type: ignore
+
+
+def _sum_practice(out: Storage, a: Storage, size: int) -> None:
     """
     This is a practice sum kernel to prepare for reduce.

-    Given an array of length $n$ and out of size $n //     ext{blockDIM}$
+    Given an array of length $n$ and out of size $n // \text{blockDIM}$
     it should sum up each blockDim values into an out cell.

     $[a_1, a_2, ..., a_{100}]$
@@ -74,14 +222,34 @@ def _sum_practice(out: Storage, a: Storage, size: int) ->None:
         size (int):  length of a.

     """
-    pass
+    BLOCK_DIM = 32
+
+    cache = cuda.shared.array(BLOCK_DIM, numba.float64)
+    i = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x
+    pos = cuda.threadIdx.x
+
+    # TODO: Implement for Task 3.3.
+    raise NotImplementedError('Need to implement for Task 3.3')


 jit_sum_practice = cuda.jit()(_sum_practice)


-def tensor_reduce(fn: Callable[[float, float], float]) ->Callable[[Storage,
-    Shape, Strides, Storage, Shape, Strides, int], None]:
+def sum_practice(a: Tensor) -> TensorData:
+    (size,) = a.shape
+    threadsperblock = THREADS_PER_BLOCK
+    blockspergrid = (size // THREADS_PER_BLOCK) + 1
+    out = TensorData([0.0 for i in range(2)], (2,))
+    out.to_cuda_()
+    jit_sum_practice[blockspergrid, threadsperblock](
+        out.tuple()[0], a._tensor._storage, size
+    )
+    return out
+
+
+def tensor_reduce(
+    fn: Callable[[float, float], float]
+) -> Callable[[Storage, Shape, Strides, Storage, Shape, Strides, int], None]:
     """
     CUDA higher-order tensor reduce function.

@@ -92,10 +260,31 @@ def tensor_reduce(fn: Callable[[float, float], float]) ->Callable[[Storage,
         Tensor reduce function.

     """
-    pass
-

-def _mm_practice(out: Storage, a: Storage, b: Storage, size: int) ->None:
+    def _reduce(
+        out: Storage,
+        out_shape: Shape,
+        out_strides: Strides,
+        out_size: int,
+        a_storage: Storage,
+        a_shape: Shape,
+        a_strides: Strides,
+        reduce_dim: int,
+        reduce_value: float,
+    ) -> None:
+        BLOCK_DIM = 1024
+        cache = cuda.shared.array(BLOCK_DIM, numba.float64)
+        out_index = cuda.local.array(MAX_DIMS, numba.int32)
+        out_pos = cuda.blockIdx.x
+        pos = cuda.threadIdx.x
+
+        # TODO: Implement for Task 3.3.
+        raise NotImplementedError('Need to implement for Task 3.3')
+
+    return cuda.jit()(_reduce)  # type: ignore
+
+
+def _mm_practice(out: Storage, a: Storage, b: Storage, size: int) -> None:
     """
     This is a practice square MM kernel to prepare for matmul.

@@ -125,15 +314,38 @@ def _mm_practice(out: Storage, a: Storage, b: Storage, size: int) ->None:
         b (Storage): storage for `b` tensor.
         size (int): size of the square
     """
-    pass
+    BLOCK_DIM = 32
+    # TODO: Implement for Task 3.3.
+    raise NotImplementedError('Need to implement for Task 3.3')


 jit_mm_practice = cuda.jit()(_mm_practice)


-def _tensor_matrix_multiply(out: Storage, out_shape: Shape, out_strides:
-    Strides, out_size: int, a_storage: Storage, a_shape: Shape, a_strides:
-    Strides, b_storage: Storage, b_shape: Shape, b_strides: Strides) ->None:
+def mm_practice(a: Tensor, b: Tensor) -> TensorData:
+    (size, _) = a.shape
+    threadsperblock = (THREADS_PER_BLOCK, THREADS_PER_BLOCK)
+    blockspergrid = 1
+    out = TensorData([0.0 for i in range(size * size)], (size, size))
+    out.to_cuda_()
+    jit_mm_practice[blockspergrid, threadsperblock](
+        out.tuple()[0], a._tensor._storage, b._tensor._storage, size
+    )
+    return out
+
+
+def _tensor_matrix_multiply(
+    out: Storage,
+    out_shape: Shape,
+    out_strides: Strides,
+    out_size: int,
+    a_storage: Storage,
+    a_shape: Shape,
+    a_strides: Strides,
+    b_storage: Storage,
+    b_shape: Shape,
+    b_strides: Strides,
+) -> None:
     """
     CUDA tensor matrix multiply function.

@@ -151,7 +363,30 @@ def _tensor_matrix_multiply(out: Storage, out_shape: Shape, out_strides:
     Returns:
         None : Fills in `out`
     """
-    pass
+    a_batch_stride = a_strides[0] if a_shape[0] > 1 else 0
+    b_batch_stride = b_strides[0] if b_shape[0] > 1 else 0
+    # Batch dimension - fixed
+    batch = cuda.blockIdx.z
+
+    BLOCK_DIM = 32
+    a_shared = cuda.shared.array((BLOCK_DIM, BLOCK_DIM), numba.float64)
+    b_shared = cuda.shared.array((BLOCK_DIM, BLOCK_DIM), numba.float64)
+
+    # The final position c[i, j]
+    i = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x
+    j = cuda.blockIdx.y * cuda.blockDim.y + cuda.threadIdx.y
+
+    # The local position in the block.
+    pi = cuda.threadIdx.x
+    pj = cuda.threadIdx.y
+
+    # Code Plan:
+    # 1) Move across shared dimension by block dim.
+    #    a) Copy into shared memory for a matrix.
+    #    b) Copy into shared memory for b matrix
+    #    c) Compute the dot produce for position c[i, j]
+    # TODO: Implement for Task 3.4.
+    raise NotImplementedError('Need to implement for Task 3.4')


 tensor_matrix_multiply = cuda.jit(_tensor_matrix_multiply)
diff --git a/minitorch/datasets.py b/minitorch/datasets.py
index 0547421..410dfa8 100644
--- a/minitorch/datasets.py
+++ b/minitorch/datasets.py
@@ -4,6 +4,15 @@ from dataclasses import dataclass
 from typing import List, Tuple


+def make_pts(N: int) -> List[Tuple[float, float]]:
+    X = []
+    for i in range(N):
+        x_1 = random.random()
+        x_2 = random.random()
+        X.append((x_1, x_2))
+    return X
+
+
 @dataclass
 class Graph:
     N: int
@@ -11,5 +20,76 @@ class Graph:
     y: List[int]


-datasets = {'Simple': simple, 'Diag': diag, 'Split': split, 'Xor': xor,
-    'Circle': circle, 'Spiral': spiral}
+def simple(N: int) -> Graph:
+    X = make_pts(N)
+    y = []
+    for x_1, x_2 in X:
+        y1 = 1 if x_1 < 0.5 else 0
+        y.append(y1)
+    return Graph(N, X, y)
+
+
+def diag(N: int) -> Graph:
+    X = make_pts(N)
+    y = []
+    for x_1, x_2 in X:
+        y1 = 1 if x_1 + x_2 < 0.5 else 0
+        y.append(y1)
+    return Graph(N, X, y)
+
+
+def split(N: int) -> Graph:
+    X = make_pts(N)
+    y = []
+    for x_1, x_2 in X:
+        y1 = 1 if x_1 < 0.2 or x_1 > 0.8 else 0
+        y.append(y1)
+    return Graph(N, X, y)
+
+
+def xor(N: int) -> Graph:
+    X = make_pts(N)
+    y = []
+    for x_1, x_2 in X:
+        y1 = 1 if ((x_1 < 0.5 and x_2 > 0.5) or (x_1 > 0.5 and x_2 < 0.5)) else 0
+        y.append(y1)
+    return Graph(N, X, y)
+
+
+def circle(N: int) -> Graph:
+    X = make_pts(N)
+    y = []
+    for x_1, x_2 in X:
+        x1, x2 = (x_1 - 0.5, x_2 - 0.5)
+        y1 = 1 if x1 * x1 + x2 * x2 > 0.1 else 0
+        y.append(y1)
+    return Graph(N, X, y)
+
+
+def spiral(N: int) -> Graph:
+    def x(t: float) -> float:
+        return t * math.cos(t) / 20.0
+
+    def y(t: float) -> float:
+        return t * math.sin(t) / 20.0
+
+    X = [
+        (x(10.0 * (float(i) / (N // 2))) + 0.5, y(10.0 * (float(i) / (N // 2))) + 0.5)
+        for i in range(5 + 0, 5 + N // 2)
+    ]
+    X = X + [
+        (y(-10.0 * (float(i) / (N // 2))) + 0.5, x(-10.0 * (float(i) / (N // 2))) + 0.5)
+        for i in range(5 + 0, 5 + N // 2)
+    ]
+    y2 = [0] * (N // 2) + [1] * (N // 2)
+    return Graph(N, X, y2)
+
+
+datasets = {
+    "Simple": simple,
+    "Diag": diag,
+    "Split": split,
+    "Xor": xor,
+    "Circle": circle,
+    "Spiral": spiral,
+}
diff --git a/minitorch/fast_conv.py b/minitorch/fast_conv.py
index 14f13fc..ce4244c 100644
--- a/minitorch/fast_conv.py
+++ b/minitorch/fast_conv.py
@@ -1,19 +1,42 @@
 from typing import Tuple
+
 import numpy as np
 from numba import njit, prange
+
 from .autodiff import Context
 from .tensor import Tensor
-from .tensor_data import MAX_DIMS, Index, Shape, Strides, broadcast_index, index_to_position, to_index
+from .tensor_data import (
+    MAX_DIMS,
+    Index,
+    Shape,
+    Strides,
+    broadcast_index,
+    index_to_position,
+    to_index,
+)
 from .tensor_functions import Function
-to_index = njit(inline='always')(to_index)
-index_to_position = njit(inline='always')(index_to_position)
-broadcast_index = njit(inline='always')(broadcast_index)

-
-def _tensor_conv1d(out: Tensor, out_shape: Shape, out_strides: Strides,
-    out_size: int, input: Tensor, input_shape: Shape, input_strides:
-    Strides, weight: Tensor, weight_shape: Shape, weight_strides: Strides,
-    reverse: bool) ->None:
+# This code will JIT compile fast versions your tensor_data functions.
+# If you get an error, read the docs for NUMBA as to what is allowed
+# in these functions.
+to_index = njit(inline="always")(to_index)
+index_to_position = njit(inline="always")(index_to_position)
+broadcast_index = njit(inline="always")(broadcast_index)
+
+
+def _tensor_conv1d(
+    out: Tensor,
+    out_shape: Shape,
+    out_strides: Strides,
+    out_size: int,
+    input: Tensor,
+    input_shape: Shape,
+    input_strides: Strides,
+    weight: Tensor,
+    weight_shape: Shape,
+    weight_strides: Strides,
+    reverse: bool,
+) -> None:
     """
     1D Convolution implementation.

@@ -45,16 +68,28 @@ def _tensor_conv1d(out: Tensor, out_shape: Shape, out_strides: Strides,
         weight_strides (Strides): strides for `input` tensor.
         reverse (bool): anchor weight at left or right
     """
-    pass
+    batch_, out_channels, out_width = out_shape
+    batch, in_channels, width = input_shape
+    out_channels_, in_channels_, kw = weight_shape
+
+    assert (
+        batch == batch_
+        and in_channels == in_channels_
+        and out_channels == out_channels_
+    )
+    s1 = input_strides
+    s2 = weight_strides
+
+    # TODO: Implement for Task 4.1.
+    raise NotImplementedError('Need to implement for Task 4.1')


 tensor_conv1d = njit(parallel=True)(_tensor_conv1d)


 class Conv1dFun(Function):
-
     @staticmethod
-    def forward(ctx: Context, input: Tensor, weight: Tensor) ->Tensor:
+    def forward(ctx: Context, input: Tensor, weight: Tensor) -> Tensor:
         """
         Compute a 1D Convolution

@@ -66,16 +101,63 @@ class Conv1dFun(Function):
         Returns:
             batch x out_channel x h x w
         """
-        pass
+        ctx.save_for_backward(input, weight)
+        batch, in_channels, w = input.shape
+        out_channels, in_channels2, kw = weight.shape
+        assert in_channels == in_channels2
+
+        # Run convolution
+        output = input.zeros((batch, out_channels, w))
+        tensor_conv1d(
+            *output.tuple(), output.size, *input.tuple(), *weight.tuple(), False
+        )
+        return output
+
+    @staticmethod
+    def backward(ctx: Context, grad_output: Tensor) -> Tuple[Tensor, Tensor]:
+        input, weight = ctx.saved_values
+        batch, in_channels, w = input.shape
+        out_channels, in_channels, kw = weight.shape
+        grad_weight = grad_output.zeros((in_channels, out_channels, kw))
+        new_input = input.permute(1, 0, 2)
+        new_grad_output = grad_output.permute(1, 0, 2)
+        tensor_conv1d(
+            *grad_weight.tuple(),
+            grad_weight.size,
+            *new_input.tuple(),
+            *new_grad_output.tuple(),
+            False,
+        )
+        grad_weight = grad_weight.permute(1, 0, 2)
+
+        grad_input = input.zeros((batch, in_channels, w))
+        new_weight = weight.permute(1, 0, 2)
+        tensor_conv1d(
+            *grad_input.tuple(),
+            grad_input.size,
+            *grad_output.tuple(),
+            *new_weight.tuple(),
+            True,
+        )
+        return grad_input, grad_weight


 conv1d = Conv1dFun.apply


-def _tensor_conv2d(out: Tensor, out_shape: Shape, out_strides: Strides,
-    out_size: int, input: Tensor, input_shape: Shape, input_strides:
-    Strides, weight: Tensor, weight_shape: Shape, weight_strides: Strides,
-    reverse: bool) ->None:
+def _tensor_conv2d(
+    out: Tensor,
+    out_shape: Shape,
+    out_strides: Strides,
+    out_size: int,
+    input: Tensor,
+    input_shape: Shape,
+    input_strides: Strides,
+    weight: Tensor,
+    weight_shape: Shape,
+    weight_strides: Strides,
+    reverse: bool,
+) -> None:
     """
     2D Convolution implementation.

@@ -108,16 +190,32 @@ def _tensor_conv2d(out: Tensor, out_shape: Shape, out_strides: Strides,
         weight_strides (Strides): strides for `input` tensor.
         reverse (bool): anchor weight at top-left or bottom-right
     """
-    pass
+    batch_, out_channels, _, _ = out_shape
+    batch, in_channels, height, width = input_shape
+    out_channels_, in_channels_, kh, kw = weight_shape
+
+    assert (
+        batch == batch_
+        and in_channels == in_channels_
+        and out_channels == out_channels_
+    )
+
+    s1 = input_strides
+    s2 = weight_strides
+    # inners
+    s10, s11, s12, s13 = s1[0], s1[1], s1[2], s1[3]
+    s20, s21, s22, s23 = s2[0], s2[1], s2[2], s2[3]
+
+    # TODO: Implement for Task 4.2.
+    raise NotImplementedError('Need to implement for Task 4.2')


 tensor_conv2d = njit(parallel=True, fastmath=True)(_tensor_conv2d)


 class Conv2dFun(Function):
-
     @staticmethod
-    def forward(ctx: Context, input: Tensor, weight: Tensor) ->Tensor:
+    def forward(ctx: Context, input: Tensor, weight: Tensor) -> Tensor:
         """
         Compute a 2D Convolution

@@ -129,7 +227,44 @@ class Conv2dFun(Function):
         Returns:
             (:class:`Tensor`) : batch x out_channel x h x w
         """
-        pass
+        ctx.save_for_backward(input, weight)
+        batch, in_channels, h, w = input.shape
+        out_channels, in_channels2, kh, kw = weight.shape
+        assert in_channels == in_channels2
+        output = input.zeros((batch, out_channels, h, w))
+        tensor_conv2d(
+            *output.tuple(), output.size, *input.tuple(), *weight.tuple(), False
+        )
+        return output
+
+    @staticmethod
+    def backward(ctx: Context, grad_output: Tensor) -> Tuple[Tensor, Tensor]:
+        input, weight = ctx.saved_values
+        batch, in_channels, h, w = input.shape
+        out_channels, in_channels, kh, kw = weight.shape
+
+        grad_weight = grad_output.zeros((in_channels, out_channels, kh, kw))
+        new_input = input.permute(1, 0, 2, 3)
+        new_grad_output = grad_output.permute(1, 0, 2, 3)
+        tensor_conv2d(
+            *grad_weight.tuple(),
+            grad_weight.size,
+            *new_input.tuple(),
+            *new_grad_output.tuple(),
+            False,
+        )
+        grad_weight = grad_weight.permute(1, 0, 2, 3)
+
+        grad_input = input.zeros((batch, in_channels, h, w))
+        new_weight = weight.permute(1, 0, 2, 3)
+        tensor_conv2d(
+            *grad_input.tuple(),
+            grad_input.size,
+            *grad_output.tuple(),
+            *new_weight.tuple(),
+            True,
+        )
+        return grad_input, grad_weight


 conv2d = Conv2dFun.apply
diff --git a/minitorch/fast_ops.py b/minitorch/fast_ops.py
index 71f96b7..dc73b86 100644
--- a/minitorch/fast_ops.py
+++ b/minitorch/fast_ops.py
@@ -1,39 +1,87 @@
 from __future__ import annotations
+
 from typing import TYPE_CHECKING
+
 import numpy as np
 from numba import njit, prange
-from .tensor_data import MAX_DIMS, broadcast_index, index_to_position, shape_broadcast, to_index
+
+from .tensor_data import (
+    MAX_DIMS,
+    broadcast_index,
+    index_to_position,
+    shape_broadcast,
+    to_index,
+)
 from .tensor_ops import MapProto, TensorOps
+
 if TYPE_CHECKING:
     from typing import Callable, Optional
+
     from .tensor import Tensor
     from .tensor_data import Index, Shape, Storage, Strides
-to_index = njit(inline='always')(to_index)
-index_to_position = njit(inline='always')(index_to_position)
-broadcast_index = njit(inline='always')(broadcast_index)

+# TIP: Use `NUMBA_DISABLE_JIT=1 pytest tests/ -m task3_1` to run these tests without JIT.
+
+# This code will JIT compile fast versions your tensor_data functions.
+# If you get an error, read the docs for NUMBA as to what is allowed
+# in these functions.
+to_index = njit(inline="always")(to_index)
+index_to_position = njit(inline="always")(index_to_position)
+broadcast_index = njit(inline="always")(broadcast_index)

-class FastOps(TensorOps):

+class FastOps(TensorOps):
     @staticmethod
-    def map(fn: Callable[[float], float]) ->MapProto:
-        """See `tensor_ops.py`"""
-        pass
+    def map(fn: Callable[[float], float]) -> MapProto:
+        "See `tensor_ops.py`"
+
+        # This line JIT compiles your tensor_map
+        f = tensor_map(njit()(fn))
+
+        def ret(a: Tensor, out: Optional[Tensor] = None) -> Tensor:
+            if out is None:
+                out = a.zeros(a.shape)
+            f(*out.tuple(), *a.tuple())
+            return out
+
+        return ret

     @staticmethod
-    def zip(fn: Callable[[float, float], float]) ->Callable[[Tensor, Tensor
-        ], Tensor]:
-        """See `tensor_ops.py`"""
-        pass
+    def zip(fn: Callable[[float, float], float]) -> Callable[[Tensor, Tensor], Tensor]:
+        "See `tensor_ops.py`"
+
+        f = tensor_zip(njit()(fn))
+
+        def ret(a: Tensor, b: Tensor) -> Tensor:
+            c_shape = shape_broadcast(a.shape, b.shape)
+            out = a.zeros(c_shape)
+            f(*out.tuple(), *a.tuple(), *b.tuple())
+            return out
+
+        return ret

     @staticmethod
-    def reduce(fn: Callable[[float, float], float], start: float=0.0
-        ) ->Callable[[Tensor, int], Tensor]:
-        """See `tensor_ops.py`"""
-        pass
+    def reduce(
+        fn: Callable[[float, float], float], start: float = 0.0
+    ) -> Callable[[Tensor, int], Tensor]:
+        "See `tensor_ops.py`"
+        f = tensor_reduce(njit()(fn))
+
+        def ret(a: Tensor, dim: int) -> Tensor:
+            out_shape = list(a.shape)
+            out_shape[dim] = 1
+
+            # Other values when not sum.
+            out = a.zeros(tuple(out_shape))
+            out._tensor._storage[:] = start
+
+            f(*out.tuple(), *a.tuple(), dim)
+            return out
+
+        return ret

     @staticmethod
-    def matrix_multiply(a: Tensor, b: Tensor) ->Tensor:
+    def matrix_multiply(a: Tensor, b: Tensor) -> Tensor:
         """
         Batched tensor matrix multiply ::

@@ -56,11 +104,37 @@ class FastOps(TensorOps):
         Returns:
             New tensor data
         """
-        pass

+        # Make these always be a 3 dimensional multiply
+        both_2d = 0
+        if len(a.shape) == 2:
+            a = a.contiguous().view(1, a.shape[0], a.shape[1])
+            both_2d += 1
+        if len(b.shape) == 2:
+            b = b.contiguous().view(1, b.shape[0], b.shape[1])
+            both_2d += 1
+        both_2d = both_2d == 2
+
+        ls = list(shape_broadcast(a.shape[:-2], b.shape[:-2]))
+        ls.append(a.shape[-2])
+        ls.append(b.shape[-1])
+        assert a.shape[-1] == b.shape[-2]
+        out = a.zeros(tuple(ls))
+
+        tensor_matrix_multiply(*out.tuple(), *a.tuple(), *b.tuple())
+
+        # Undo 3d if we added it.
+        if both_2d:
+            out = out.view(out.shape[1], out.shape[2])
+        return out

-def tensor_map(fn: Callable[[float], float]) ->Callable[[Storage, Shape,
-    Strides, Storage, Shape, Strides], None]:
+
+# Implementations
+
+
+def tensor_map(
+    fn: Callable[[float], float]
+) -> Callable[[Storage, Shape, Strides, Storage, Shape, Strides], None]:
     """
     NUMBA low_level tensor_map function. See `tensor_ops.py` for description.

@@ -76,11 +150,26 @@ def tensor_map(fn: Callable[[float], float]) ->Callable[[Storage, Shape,
     Returns:
         Tensor map function.
     """
-    pass
-

-def tensor_zip(fn: Callable[[float, float], float]) ->Callable[[Storage,
-    Shape, Strides, Storage, Shape, Strides, Storage, Shape, Strides], None]:
+    def _map(
+        out: Storage,
+        out_shape: Shape,
+        out_strides: Strides,
+        in_storage: Storage,
+        in_shape: Shape,
+        in_strides: Strides,
+    ) -> None:
+        # TODO: Implement for Task 3.1.
+        raise NotImplementedError('Need to implement for Task 3.1')
+
+    return njit(parallel=True)(_map)  # type: ignore
+
+
+def tensor_zip(
+    fn: Callable[[float, float], float]
+) -> Callable[
+    [Storage, Shape, Strides, Storage, Shape, Strides, Storage, Shape, Strides], None
+]:
     """
     NUMBA higher-order tensor zip function. See `tensor_ops.py` for description.

@@ -97,11 +186,27 @@ def tensor_zip(fn: Callable[[float, float], float]) ->Callable[[Storage,
     Returns:
         Tensor zip function.
     """
-    pass

-
-def tensor_reduce(fn: Callable[[float, float], float]) ->Callable[[Storage,
-    Shape, Strides, Storage, Shape, Strides, int], None]:
+    def _zip(
+        out: Storage,
+        out_shape: Shape,
+        out_strides: Strides,
+        a_storage: Storage,
+        a_shape: Shape,
+        a_strides: Strides,
+        b_storage: Storage,
+        b_shape: Shape,
+        b_strides: Strides,
+    ) -> None:
+        # TODO: Implement for Task 3.1.
+        raise NotImplementedError('Need to implement for Task 3.1')
+
+    return njit(parallel=True)(_zip)  # type: ignore
+
+
+def tensor_reduce(
+    fn: Callable[[float, float], float]
+) -> Callable[[Storage, Shape, Strides, Storage, Shape, Strides, int], None]:
     """
     NUMBA higher-order tensor reduce function. See `tensor_ops.py` for description.

@@ -117,12 +222,33 @@ def tensor_reduce(fn: Callable[[float, float], float]) ->Callable[[Storage,
     Returns:
         Tensor reduce function
     """
-    pass
-

-def _tensor_matrix_multiply(out: Storage, out_shape: Shape, out_strides:
-    Strides, a_storage: Storage, a_shape: Shape, a_strides: Strides,
-    b_storage: Storage, b_shape: Shape, b_strides: Strides) ->None:
+    def _reduce(
+        out: Storage,
+        out_shape: Shape,
+        out_strides: Strides,
+        a_storage: Storage,
+        a_shape: Shape,
+        a_strides: Strides,
+        reduce_dim: int,
+    ) -> None:
+        # TODO: Implement for Task 3.1.
+        raise NotImplementedError('Need to implement for Task 3.1')
+
+    return njit(parallel=True)(_reduce)  # type: ignore
+
+
+def _tensor_matrix_multiply(
+    out: Storage,
+    out_shape: Shape,
+    out_strides: Strides,
+    a_storage: Storage,
+    a_shape: Shape,
+    a_strides: Strides,
+    b_storage: Storage,
+    b_shape: Shape,
+    b_strides: Strides,
+) -> None:
     """
     NUMBA tensor matrix multiply function.

@@ -153,8 +279,11 @@ def _tensor_matrix_multiply(out: Storage, out_shape: Shape, out_strides:
     Returns:
         None : Fills in `out`
     """
-    pass
+    a_batch_stride = a_strides[0] if a_shape[0] > 1 else 0
+    b_batch_stride = b_strides[0] if b_shape[0] > 1 else 0
+
+    # TODO: Implement for Task 3.2.
+    raise NotImplementedError('Need to implement for Task 3.2')


-tensor_matrix_multiply = njit(parallel=True, fastmath=True)(
-    _tensor_matrix_multiply)
+tensor_matrix_multiply = njit(parallel=True, fastmath=True)(_tensor_matrix_multiply)
diff --git a/minitorch/module.py b/minitorch/module.py
index 91f0dc2..d32c609 100644
--- a/minitorch/module.py
+++ b/minitorch/module.py
@@ -1,4 +1,5 @@
 from __future__ import annotations
+
 from typing import Any, Dict, Optional, Sequence, Tuple


@@ -13,28 +14,32 @@ class Module:
         training : Whether the module is in training mode or evaluation mode

     """
+
     _modules: Dict[str, Module]
     _parameters: Dict[str, Parameter]
     training: bool

-    def __init__(self) ->None:
+    def __init__(self) -> None:
         self._modules = {}
         self._parameters = {}
         self.training = True

-    def modules(self) ->Sequence[Module]:
-        """Return the direct child modules of this module."""
-        pass
+    def modules(self) -> Sequence[Module]:
+        "Return the direct child modules of this module."
+        m: Dict[str, Module] = self.__dict__["_modules"]
+        return list(m.values())

-    def train(self) ->None:
-        """Set the mode of this module and all descendent modules to `train`."""
-        pass
+    def train(self) -> None:
+        "Set the mode of this module and all descendent modules to `train`."
+        # TODO: Implement for Task 0.4.
+        raise NotImplementedError('Need to implement for Task 0.4')

-    def eval(self) ->None:
-        """Set the mode of this module and all descendent modules to `eval`."""
-        pass
+    def eval(self) -> None:
+        "Set the mode of this module and all descendent modules to `eval`."
+        # TODO: Implement for Task 0.4.
+        raise NotImplementedError('Need to implement for Task 0.4')

-    def named_parameters(self) ->Sequence[Tuple[str, Parameter]]:
+    def named_parameters(self) -> Sequence[Tuple[str, Parameter]]:
         """
         Collect all the parameters of this module and its descendents.

@@ -42,13 +47,15 @@ class Module:
         Returns:
             The name and `Parameter` of each ancestor parameter.
         """
-        pass
+        # TODO: Implement for Task 0.4.
+        raise NotImplementedError('Need to implement for Task 0.4')

-    def parameters(self) ->Sequence[Parameter]:
-        """Enumerate over all the parameters of this module and its descendents."""
-        pass
+    def parameters(self) -> Sequence[Parameter]:
+        "Enumerate over all the parameters of this module and its descendents."
+        # TODO: Implement for Task 0.4.
+        raise NotImplementedError('Need to implement for Task 0.4')

-    def add_parameter(self, k: str, v: Any) ->Parameter:
+    def add_parameter(self, k: str, v: Any) -> Parameter:
         """
         Manually add a parameter. Useful helper for scalar parameters.

@@ -59,47 +66,54 @@ class Module:
         Returns:
             Newly created parameter.
         """
-        pass
+        val = Parameter(v, k)
+        self.__dict__["_parameters"][k] = val
+        return val

-    def __setattr__(self, key: str, val: Parameter) ->None:
+    def __setattr__(self, key: str, val: Parameter) -> None:
         if isinstance(val, Parameter):
-            self.__dict__['_parameters'][key] = val
+            self.__dict__["_parameters"][key] = val
         elif isinstance(val, Module):
-            self.__dict__['_modules'][key] = val
+            self.__dict__["_modules"][key] = val
         else:
             super().__setattr__(key, val)

-    def __getattr__(self, key: str) ->Any:
-        if key in self.__dict__['_parameters']:
-            return self.__dict__['_parameters'][key]
-        if key in self.__dict__['_modules']:
-            return self.__dict__['_modules'][key]
+    def __getattr__(self, key: str) -> Any:
+        if key in self.__dict__["_parameters"]:
+            return self.__dict__["_parameters"][key]
+
+        if key in self.__dict__["_modules"]:
+            return self.__dict__["_modules"][key]
         return None

-    def __call__(self, *args: Any, **kwargs: Any) ->Any:
+    def __call__(self, *args: Any, **kwargs: Any) -> Any:
         return self.forward(*args, **kwargs)

-    def __repr__(self) ->str:
-
-        def _addindent(s_: str, numSpaces: int) ->str:
-            s2 = s_.split('\n')
+    def __repr__(self) -> str:
+        def _addindent(s_: str, numSpaces: int) -> str:
+            s2 = s_.split("\n")
             if len(s2) == 1:
                 return s_
             first = s2.pop(0)
-            s2 = [(numSpaces * ' ' + line) for line in s2]
-            s = '\n'.join(s2)
-            s = first + '\n' + s
+            s2 = [(numSpaces * " ") + line for line in s2]
+            s = "\n".join(s2)
+            s = first + "\n" + s
             return s
+
         child_lines = []
+
         for key, module in self._modules.items():
             mod_str = repr(module)
             mod_str = _addindent(mod_str, 2)
-            child_lines.append('(' + key + '): ' + mod_str)
+            child_lines.append("(" + key + "): " + mod_str)
         lines = child_lines
-        main_str = self.__class__.__name__ + '('
+
+        main_str = self.__class__.__name__ + "("
         if lines:
-            main_str += '\n  ' + '\n  '.join(lines) + '\n'
-        main_str += ')'
+            # simple one-liner info, which most builtin Modules will use
+            main_str += "\n  " + "\n  ".join(lines) + "\n"
+
+        main_str += ")"
         return main_str


@@ -111,20 +125,24 @@ class Parameter:
     any value for testing.
     """

-    def __init__(self, x: Any, name: Optional[str]=None) ->None:
+    def __init__(self, x: Any, name: Optional[str] = None) -> None:
         self.value = x
         self.name = name
-        if hasattr(x, 'requires_grad_'):
+        if hasattr(x, "requires_grad_"):
             self.value.requires_grad_(True)
             if self.name:
                 self.value.name = self.name

-    def update(self, x: Any) ->None:
-        """Update the parameter value."""
-        pass
+    def update(self, x: Any) -> None:
+        "Update the parameter value."
+        self.value = x
+        if hasattr(x, "requires_grad_"):
+            self.value.requires_grad_(True)
+            if self.name:
+                self.value.name = self.name

-    def __repr__(self) ->str:
+    def __repr__(self) -> str:
         return repr(self.value)

-    def __str__(self) ->str:
+    def __str__(self) -> str:
         return str(self.value)
diff --git a/minitorch/modules.py b/minitorch/modules.py
index e69de29..de7519e 100644
--- a/minitorch/modules.py
+++ b/minitorch/modules.py
@@ -0,0 +1,60 @@
+# from .tensor import rand
+# from .functions import matmul, conv2d
+# from .module import Module, Parameter
+
+
+# class tLinear(Module):
+#     def __init__(self, in_size, out_size):
+#         super().__init__()
+#         self.weights = Parameter(rand((in_size, out_size)))
+#         self.bias = Parameter(rand((out_size,)))
+#         self.out_size = out_size
+
+#     def forward(self, x):
+#         batch, in_size = x.shape
+#         return (
+#             self.weights.value.view(1, in_size, self.out_size)
+#             * x.view(batch, in_size, 1)
+#         ).sum(1).view(batch, self.out_size) + self.bias.value.view(1, self.out_size)
+
+
+# class tLinear2(Module):
+#     def __init__(self, in_size, out_size):
+#         super().__init__()
+#         self.weights = Parameter(rand((in_size, out_size)))
+#         self.bias = Parameter(rand((out_size,)))
+#         self.out_size = out_size
+
+#     def forward(self, x):
+#         batch, in_size = x.shape
+#         return matmul(x, self.weights.value) + self.bias.value.view(1, self.out_size)
+
+
+# class Dropout(Module):
+#     def __init__(self, rate):
+#         super().__init__()
+#         self.rate = rate
+
+#     def forward(self, x):
+#         return (rand(x.shape) / 2 + 0.5 < self.rate) * x
+
+
+# class Conv2d(Module):
+#     def __init__(self, in_features, out_features, size):
+#         super().__init__()
+#         size1 = [size[0], size[1], in_features, out_features]
+#         size2 = [size[0], size[1], out_features]
+#         self.weights = Parameter(rand(size1))
+#         self.bias = Parameter(rand(size2))
+
+#     def forward(self, x):
+#         return conv2d(x, self.weights.value, self.bias.value)
+
+
+# # class MaxPool2d(Module):
+# #     def __init__(self, in_features, out_features, size):
+# #         super().__init__()
+
+
+# #     def forward(self, x):
+# #         return conv2d(x, self.weights.value, self.bias.value)
diff --git a/minitorch/nn.py b/minitorch/nn.py
index 577a3ff..92c0c8f 100644
--- a/minitorch/nn.py
+++ b/minitorch/nn.py
@@ -1,4 +1,5 @@
 from typing import Tuple
+
 from . import operators
 from .autodiff import Context
 from .fast_ops import FastOps
@@ -6,7 +7,7 @@ from .tensor import Tensor
 from .tensor_functions import Function, rand, tensor


-def tile(input: Tensor, kernel: Tuple[int, int]) ->Tuple[Tensor, int, int]:
+def tile(input: Tensor, kernel: Tuple[int, int]) -> Tuple[Tensor, int, int]:
     """
     Reshape an image tensor for 2D pooling

@@ -17,10 +18,16 @@ def tile(input: Tensor, kernel: Tuple[int, int]) ->Tuple[Tensor, int, int]:
     Returns:
         Tensor of size batch x channel x new_height x new_width x (kernel_height * kernel_width) as well as the new_height and new_width value.
     """
-    pass
+
+    batch, channel, height, width = input.shape
+    kh, kw = kernel
+    assert height % kh == 0
+    assert width % kw == 0
+    # TODO: Implement for Task 4.3.
+    raise NotImplementedError('Need to implement for Task 4.3')


-def avgpool2d(input: Tensor, kernel: Tuple[int, int]) ->Tensor:
+def avgpool2d(input: Tensor, kernel: Tuple[int, int]) -> Tensor:
     """
     Tiled average pooling 2D

@@ -31,13 +38,15 @@ def avgpool2d(input: Tensor, kernel: Tuple[int, int]) ->Tensor:
     Returns:
         Pooled tensor
     """
-    pass
+    batch, channel, height, width = input.shape
+    # TODO: Implement for Task 4.3.
+    raise NotImplementedError('Need to implement for Task 4.3')


-max_reduce = FastOps.reduce(operators.max, -1000000000.0)
+max_reduce = FastOps.reduce(operators.max, -1e9)


-def argmax(input: Tensor, dim: int) ->Tensor:
+def argmax(input: Tensor, dim: int) -> Tensor:
     """
     Compute the argmax as a 1-hot tensor.

@@ -50,29 +59,35 @@ def argmax(input: Tensor, dim: int) ->Tensor:
         :class:`Tensor` : tensor with 1 on highest cell in dim, 0 otherwise

     """
-    pass
+    out = max_reduce(input, dim)
+    return out == input


 class Max(Function):
-
     @staticmethod
-    def forward(ctx: Context, input: Tensor, dim: Tensor) ->Tensor:
-        """Forward of max should be max reduction"""
-        pass
+    def forward(ctx: Context, input: Tensor, dim: Tensor) -> Tensor:
+        "Forward of max should be max reduction"
+        # TODO: Implement for Task 4.4.
+        raise NotImplementedError('Need to implement for Task 4.4')

     @staticmethod
-    def backward(ctx: Context, grad_output: Tensor) ->Tuple[Tensor, float]:
-        """Backward of max should be argmax (see above)"""
-        pass
+    def backward(ctx: Context, grad_output: Tensor) -> Tuple[Tensor, float]:
+        "Backward of max should be argmax (see above)"
+        # TODO: Implement for Task 4.4.
+        raise NotImplementedError('Need to implement for Task 4.4')


-def softmax(input: Tensor, dim: int) ->Tensor:
-    """
+def max(input: Tensor, dim: int) -> Tensor:
+    return Max.apply(input, input._ensure_tensor(dim))
+
+
+def softmax(input: Tensor, dim: int) -> Tensor:
+    r"""
     Compute the softmax as a tensor.



-    $z_i = \\frac{e^{x_i}}{\\sum_i e^{x_i}}$
+    $z_i = \frac{e^{x_i}}{\sum_i e^{x_i}}$

     Args:
         input : input tensor
@@ -81,14 +96,15 @@ def softmax(input: Tensor, dim: int) ->Tensor:
     Returns:
         softmax tensor
     """
-    pass
+    # TODO: Implement for Task 4.4.
+    raise NotImplementedError('Need to implement for Task 4.4')


-def logsoftmax(input: Tensor, dim: int) ->Tensor:
-    """
+def logsoftmax(input: Tensor, dim: int) -> Tensor:
+    r"""
     Compute the log of the softmax as a tensor.

-    $z_i = x_i - \\log \\sum_i e^{x_i}$
+    $z_i = x_i - \log \sum_i e^{x_i}$

     See https://en.wikipedia.org/wiki/LogSumExp#log-sum-exp_trick_for_log-domain_calculations

@@ -99,10 +115,11 @@ def logsoftmax(input: Tensor, dim: int) ->Tensor:
     Returns:
          log of softmax tensor
     """
-    pass
+    # TODO: Implement for Task 4.4.
+    raise NotImplementedError('Need to implement for Task 4.4')


-def maxpool2d(input: Tensor, kernel: Tuple[int, int]) ->Tensor:
+def maxpool2d(input: Tensor, kernel: Tuple[int, int]) -> Tensor:
     """
     Tiled max pooling 2D

@@ -113,10 +130,12 @@ def maxpool2d(input: Tensor, kernel: Tuple[int, int]) ->Tensor:
     Returns:
         Tensor : pooled tensor
     """
-    pass
+    batch, channel, height, width = input.shape
+    # TODO: Implement for Task 4.4.
+    raise NotImplementedError('Need to implement for Task 4.4')


-def dropout(input: Tensor, rate: float, ignore: bool=False) ->Tensor:
+def dropout(input: Tensor, rate: float, ignore: bool = False) -> Tensor:
     """
     Dropout positions based on random noise.

@@ -128,4 +147,5 @@ def dropout(input: Tensor, rate: float, ignore: bool=False) ->Tensor:
     Returns:
         tensor with random positions dropped out
     """
-    pass
+    # TODO: Implement for Task 4.4.
+    raise NotImplementedError('Need to implement for Task 4.4')
diff --git a/minitorch/operators.py b/minitorch/operators.py
index 8740347..3334e64 100644
--- a/minitorch/operators.py
+++ b/minitorch/operators.py
@@ -1,109 +1,132 @@
 """
 Collection of the core mathematical operators used throughout the code base.
 """
+
 import math
 from typing import Callable, Iterable

+# ## Task 0.1
+#
+# Implementation of a prelude of elementary functions.

-def mul(x: float, y: float) ->float:
-    """$f(x, y) = x * y$"""
-    pass

+def mul(x: float, y: float) -> float:
+    "$f(x, y) = x * y$"
+    # TODO: Implement for Task 0.1.
+    raise NotImplementedError('Need to implement for Task 0.1')

-def id(x: float) ->float:
-    """$f(x) = x$"""
-    pass

+def id(x: float) -> float:
+    "$f(x) = x$"
+    # TODO: Implement for Task 0.1.
+    raise NotImplementedError('Need to implement for Task 0.1')

-def add(x: float, y: float) ->float:
-    """$f(x, y) = x + y$"""
-    pass

+def add(x: float, y: float) -> float:
+    "$f(x, y) = x + y$"
+    # TODO: Implement for Task 0.1.
+    raise NotImplementedError('Need to implement for Task 0.1')

-def neg(x: float) ->float:
-    """$f(x) = -x$"""
-    pass

+def neg(x: float) -> float:
+    "$f(x) = -x$"
+    # TODO: Implement for Task 0.1.
+    raise NotImplementedError('Need to implement for Task 0.1')

-def lt(x: float, y: float) ->float:
-    """$f(x) =$ 1.0 if x is less than y else 0.0"""
-    pass

+def lt(x: float, y: float) -> float:
+    "$f(x) =$ 1.0 if x is less than y else 0.0"
+    # TODO: Implement for Task 0.1.
+    raise NotImplementedError('Need to implement for Task 0.1')

-def eq(x: float, y: float) ->float:
-    """$f(x) =$ 1.0 if x is equal to y else 0.0"""
-    pass

+def eq(x: float, y: float) -> float:
+    "$f(x) =$ 1.0 if x is equal to y else 0.0"
+    # TODO: Implement for Task 0.1.
+    raise NotImplementedError('Need to implement for Task 0.1')

-def max(x: float, y: float) ->float:
-    """$f(x) =$ x if x is greater than y else y"""
-    pass

+def max(x: float, y: float) -> float:
+    "$f(x) =$ x if x is greater than y else y"
+    # TODO: Implement for Task 0.1.
+    raise NotImplementedError('Need to implement for Task 0.1')

-def is_close(x: float, y: float) ->float:
-    """$f(x) = |x - y| < 1e-2$"""
-    pass

+def is_close(x: float, y: float) -> float:
+    "$f(x) = |x - y| < 1e-2$"
+    # TODO: Implement for Task 0.1.
+    raise NotImplementedError('Need to implement for Task 0.1')

-def sigmoid(x: float) ->float:
-    """
-    $f(x) =  \\frac{1.0}{(1.0 + e^{-x})}$
+
+def sigmoid(x: float) -> float:
+    r"""
+    $f(x) =  \frac{1.0}{(1.0 + e^{-x})}$

     (See https://en.wikipedia.org/wiki/Sigmoid_function )

     Calculate as

-    $f(x) =  \\frac{1.0}{(1.0 + e^{-x})}$ if x >=0 else $\\frac{e^x}{(1.0 + e^{x})}$
+    $f(x) =  \frac{1.0}{(1.0 + e^{-x})}$ if x >=0 else $\frac{e^x}{(1.0 + e^{x})}$

     for stability.
     """
-    pass
+    # TODO: Implement for Task 0.1.
+    raise NotImplementedError('Need to implement for Task 0.1')


-def relu(x: float) ->float:
+def relu(x: float) -> float:
     """
     $f(x) =$ x if x is greater than 0, else 0

     (See https://en.wikipedia.org/wiki/Rectifier_(neural_networks) .)
     """
-    pass
+    # TODO: Implement for Task 0.1.
+    raise NotImplementedError('Need to implement for Task 0.1')
+

+EPS = 1e-6

-EPS = 1e-06

+def log(x: float) -> float:
+    "$f(x) = log(x)$"
+    return math.log(x + EPS)

-def log(x: float) ->float:
-    """$f(x) = log(x)$"""
-    pass

+def exp(x: float) -> float:
+    "$f(x) = e^{x}$"
+    return math.exp(x)

-def exp(x: float) ->float:
-    """$f(x) = e^{x}$"""
-    pass

+def log_back(x: float, d: float) -> float:
+    r"If $f = log$ as above, compute $d \times f'(x)$"
+    # TODO: Implement for Task 0.1.
+    raise NotImplementedError('Need to implement for Task 0.1')

-def log_back(x: float, d: float) ->float:
-    """If $f = log$ as above, compute $d \\times f'(x)$"""
-    pass

+def inv(x: float) -> float:
+    "$f(x) = 1/x$"
+    # TODO: Implement for Task 0.1.
+    raise NotImplementedError('Need to implement for Task 0.1')

-def inv(x: float) ->float:
-    """$f(x) = 1/x$"""
-    pass

+def inv_back(x: float, d: float) -> float:
+    r"If $f(x) = 1/x$ compute $d \times f'(x)$"
+    # TODO: Implement for Task 0.1.
+    raise NotImplementedError('Need to implement for Task 0.1')

-def inv_back(x: float, d: float) ->float:
-    """If $f(x) = 1/x$ compute $d \\times f'(x)$"""
-    pass

+def relu_back(x: float, d: float) -> float:
+    r"If $f = relu$ compute $d \times f'(x)$"
+    # TODO: Implement for Task 0.1.
+    raise NotImplementedError('Need to implement for Task 0.1')

-def relu_back(x: float, d: float) ->float:
-    """If $f = relu$ compute $d \\times f'(x)$"""
-    pass

+# ## Task 0.3

-def map(fn: Callable[[float], float]) ->Callable[[Iterable[float]],
-    Iterable[float]]:
+# Small practice library of elementary higher-order functions.
+
+
+def map(fn: Callable[[float], float]) -> Callable[[Iterable[float]], Iterable[float]]:
     """
     Higher-order map.

@@ -116,16 +139,19 @@ def map(fn: Callable[[float], float]) ->Callable[[Iterable[float]],
          A function that takes a list, applies `fn` to each element, and returns a
          new list
     """
-    pass
+    # TODO: Implement for Task 0.3.
+    raise NotImplementedError('Need to implement for Task 0.3')


-def negList(ls: Iterable[float]) ->Iterable[float]:
-    """Use `map` and `neg` to negate each element in `ls`"""
-    pass
+def negList(ls: Iterable[float]) -> Iterable[float]:
+    "Use `map` and `neg` to negate each element in `ls`"
+    # TODO: Implement for Task 0.3.
+    raise NotImplementedError('Need to implement for Task 0.3')


-def zipWith(fn: Callable[[float, float], float]) ->Callable[[Iterable[float
-    ], Iterable[float]], Iterable[float]]:
+def zipWith(
+    fn: Callable[[float, float], float]
+) -> Callable[[Iterable[float], Iterable[float]], Iterable[float]]:
     """
     Higher-order zipwith (or map2).

@@ -139,17 +165,20 @@ def zipWith(fn: Callable[[float, float], float]) ->Callable[[Iterable[float
          applying fn(x, y) on each pair of elements.

     """
-    pass
+    # TODO: Implement for Task 0.3.
+    raise NotImplementedError('Need to implement for Task 0.3')


-def addLists(ls1: Iterable[float], ls2: Iterable[float]) ->Iterable[float]:
-    """Add the elements of `ls1` and `ls2` using `zipWith` and `add`"""
-    pass
+def addLists(ls1: Iterable[float], ls2: Iterable[float]) -> Iterable[float]:
+    "Add the elements of `ls1` and `ls2` using `zipWith` and `add`"
+    # TODO: Implement for Task 0.3.
+    raise NotImplementedError('Need to implement for Task 0.3')


-def reduce(fn: Callable[[float, float], float], start: float) ->Callable[[
-    Iterable[float]], float]:
-    """
+def reduce(
+    fn: Callable[[float, float], float], start: float
+) -> Callable[[Iterable[float]], float]:
+    r"""
     Higher-order reduce.

     Args:
@@ -158,17 +187,20 @@ def reduce(fn: Callable[[float, float], float], start: float) ->Callable[[

     Returns:
          Function that takes a list `ls` of elements
-         $x_1 \\ldots x_n$ and computes the reduction :math:`fn(x_3, fn(x_2,
+         $x_1 \ldots x_n$ and computes the reduction :math:`fn(x_3, fn(x_2,
          fn(x_1, x_0)))`
     """
-    pass
+    # TODO: Implement for Task 0.3.
+    raise NotImplementedError('Need to implement for Task 0.3')


-def sum(ls: Iterable[float]) ->float:
-    """Sum up a list using `reduce` and `add`."""
-    pass
+def sum(ls: Iterable[float]) -> float:
+    "Sum up a list using `reduce` and `add`."
+    # TODO: Implement for Task 0.3.
+    raise NotImplementedError('Need to implement for Task 0.3')


-def prod(ls: Iterable[float]) ->float:
-    """Product of a list using `reduce` and `mul`."""
-    pass
+def prod(ls: Iterable[float]) -> float:
+    "Product of a list using `reduce` and `mul`."
+    # TODO: Implement for Task 0.3.
+    raise NotImplementedError('Need to implement for Task 0.3')
diff --git a/minitorch/optim.py b/minitorch/optim.py
index b6358e6..21c9dde 100644
--- a/minitorch/optim.py
+++ b/minitorch/optim.py
@@ -1,16 +1,37 @@
 from typing import Sequence
+
 from .module import Parameter
 from .scalar import Scalar


 class Optimizer:
-
     def __init__(self, parameters: Sequence[Parameter]):
         self.parameters = parameters


 class SGD(Optimizer):
-
-    def __init__(self, parameters: Sequence[Parameter], lr: float=1.0):
+    def __init__(self, parameters: Sequence[Parameter], lr: float = 1.0):
         super().__init__(parameters)
         self.lr = lr
+
+    def zero_grad(self) -> None:
+        for p in self.parameters:
+            if p.value is None:
+                continue
+            if hasattr(p.value, "derivative"):
+                if p.value.derivative is not None:
+                    p.value.derivative = None
+            if hasattr(p.value, "grad"):
+                if p.value.grad is not None:
+                    p.value.grad = None
+
+    def step(self) -> None:
+        for p in self.parameters:
+            if p.value is None:
+                continue
+            if hasattr(p.value, "derivative"):
+                if p.value.derivative is not None:
+                    p.update(Scalar(p.value.data - self.lr * p.value.derivative))
+            elif hasattr(p.value, "grad"):
+                if p.value.grad is not None:
+                    p.update(p.value - self.lr * p.value.grad)
diff --git a/minitorch/scalar.py b/minitorch/scalar.py
index a8a0420..942079d 100644
--- a/minitorch/scalar.py
+++ b/minitorch/scalar.py
@@ -1,10 +1,26 @@
 from __future__ import annotations
+
 from dataclasses import dataclass
 from typing import Any, Iterable, Optional, Sequence, Tuple, Type, Union
+
 import numpy as np
+
 from .autodiff import Context, Variable, backpropagate, central_difference
-from .scalar_functions import EQ, LT, Add, Exp, Inv, Log, Mul, Neg, ReLU, ScalarFunction, Sigmoid
-ScalarLike = Union[float, int, 'Scalar']
+from .scalar_functions import (
+    EQ,
+    LT,
+    Add,
+    Exp,
+    Inv,
+    Log,
+    Mul,
+    Neg,
+    ReLU,
+    ScalarFunction,
+    Sigmoid,
+)
+
+ScalarLike = Union[float, int, "Scalar"]


 @dataclass
@@ -19,11 +35,15 @@ class ScalarHistory:
         inputs : The inputs that were given when `last_fn.forward` was called.

     """
+
     last_fn: Optional[Type[ScalarFunction]] = None
     ctx: Optional[Context] = None
     inputs: Sequence[Scalar] = ()


+# ## Task 1.2 and 1.4
+# Scalar Forward and Backward
+
 _var_count = 0


@@ -35,14 +55,19 @@ class Scalar:
     number's creation. They can only be manipulated by
     `ScalarFunction`.
     """
+
     history: Optional[ScalarHistory]
     derivative: Optional[float]
     data: float
     unique_id: int
     name: str

-    def __init__(self, v: float, back: ScalarHistory=ScalarHistory(), name:
-        Optional[str]=None):
+    def __init__(
+        self,
+        v: float,
+        back: ScalarHistory = ScalarHistory(),
+        name: Optional[str] = None,
+    ):
         global _var_count
         _var_count += 1
         self.unique_id = _var_count
@@ -54,46 +79,70 @@ class Scalar:
         else:
             self.name = str(self.unique_id)

-    def __repr__(self) ->str:
-        return 'Scalar(%f)' % self.data
+    def __repr__(self) -> str:
+        return "Scalar(%f)" % self.data

-    def __mul__(self, b: ScalarLike) ->Scalar:
+    def __mul__(self, b: ScalarLike) -> Scalar:
         return Mul.apply(self, b)

-    def __truediv__(self, b: ScalarLike) ->Scalar:
+    def __truediv__(self, b: ScalarLike) -> Scalar:
         return Mul.apply(self, Inv.apply(b))

-    def __rtruediv__(self, b: ScalarLike) ->Scalar:
+    def __rtruediv__(self, b: ScalarLike) -> Scalar:
         return Mul.apply(b, Inv.apply(self))

-    def __add__(self, b: ScalarLike) ->Scalar:
+    def __add__(self, b: ScalarLike) -> Scalar:
+        # TODO: Implement for Task 1.2.
         raise NotImplementedError('Need to implement for Task 1.2')

-    def __bool__(self) ->bool:
+    def __bool__(self) -> bool:
         return bool(self.data)

-    def __lt__(self, b: ScalarLike) ->Scalar:
+    def __lt__(self, b: ScalarLike) -> Scalar:
+        # TODO: Implement for Task 1.2.
         raise NotImplementedError('Need to implement for Task 1.2')

-    def __gt__(self, b: ScalarLike) ->Scalar:
+    def __gt__(self, b: ScalarLike) -> Scalar:
+        # TODO: Implement for Task 1.2.
         raise NotImplementedError('Need to implement for Task 1.2')

-    def __eq__(self, b: ScalarLike) ->Scalar:
+    def __eq__(self, b: ScalarLike) -> Scalar:  # type: ignore[override]
+        # TODO: Implement for Task 1.2.
         raise NotImplementedError('Need to implement for Task 1.2')

-    def __sub__(self, b: ScalarLike) ->Scalar:
+    def __sub__(self, b: ScalarLike) -> Scalar:
+        # TODO: Implement for Task 1.2.
         raise NotImplementedError('Need to implement for Task 1.2')

-    def __neg__(self) ->Scalar:
+    def __neg__(self) -> Scalar:
+        # TODO: Implement for Task 1.2.
         raise NotImplementedError('Need to implement for Task 1.2')

-    def __radd__(self, b: ScalarLike) ->Scalar:
+    def __radd__(self, b: ScalarLike) -> Scalar:
         return self + b

-    def __rmul__(self, b: ScalarLike) ->Scalar:
+    def __rmul__(self, b: ScalarLike) -> Scalar:
         return self * b

-    def accumulate_derivative(self, x: Any) ->None:
+    def log(self) -> Scalar:
+        # TODO: Implement for Task 1.2.
+        raise NotImplementedError('Need to implement for Task 1.2')
+
+    def exp(self) -> Scalar:
+        # TODO: Implement for Task 1.2.
+        raise NotImplementedError('Need to implement for Task 1.2')
+
+    def sigmoid(self) -> Scalar:
+        # TODO: Implement for Task 1.2.
+        raise NotImplementedError('Need to implement for Task 1.2')
+
+    def relu(self) -> Scalar:
+        # TODO: Implement for Task 1.2.
+        raise NotImplementedError('Need to implement for Task 1.2')
+
+    # Variable elements for backprop
+
+    def accumulate_derivative(self, x: Any) -> None:
         """
         Add `val` to the the derivative accumulated on this variable.
         Should only be called during autodifferentiation on leaf variables.
@@ -101,13 +150,33 @@ class Scalar:
         Args:
             x: value to be accumulated
         """
-        pass
+        assert self.is_leaf(), "Only leaf variables can have derivatives."
+        if self.derivative is None:
+            self.derivative = 0.0
+        self.derivative += x
+
+    def is_leaf(self) -> bool:
+        "True if this variable created by the user (no `last_fn`)"
+        return self.history is not None and self.history.last_fn is None
+
+    def is_constant(self) -> bool:
+        return self.history is None
+
+    @property
+    def parents(self) -> Iterable[Variable]:
+        assert self.history is not None
+        return self.history.inputs
+
+    def chain_rule(self, d_output: Any) -> Iterable[Tuple[Variable, Any]]:
+        h = self.history
+        assert h is not None
+        assert h.last_fn is not None
+        assert h.ctx is not None

-    def is_leaf(self) ->bool:
-        """True if this variable created by the user (no `last_fn`)"""
-        pass
+        # TODO: Implement for Task 1.3.
+        raise NotImplementedError('Need to implement for Task 1.3')

-    def backward(self, d_output: Optional[float]=None) ->None:
+    def backward(self, d_output: Optional[float] = None) -> None:
         """
         Calls autodiff to fill in the derivatives for the history of this object.

@@ -115,10 +184,12 @@ class Scalar:
             d_output (number, opt): starting derivative to backpropagate through the model
                                    (typically left out, and assumed to be 1.0).
         """
-        pass
+        if d_output is None:
+            d_output = 1.0
+        backpropagate(self, d_output)


-def derivative_check(f: Any, *scalars: Scalar) ->None:
+def derivative_check(f: Any, *scalars: Scalar) -> None:
     """
     Checks that autodiff works on a python function.
     Asserts False if derivative is incorrect.
@@ -127,4 +198,21 @@ def derivative_check(f: Any, *scalars: Scalar) ->None:
         f : function from n-scalars to 1-scalar.
         *scalars  : n input scalar values.
     """
-    pass
+    out = f(*scalars)
+    out.backward()
+
+    err_msg = """
+Derivative check at arguments f(%s) and received derivative f'=%f for argument %d,
+but was expecting derivative f'=%f from central difference."""
+    for i, x in enumerate(scalars):
+        check = central_difference(f, *scalars, arg=i)
+        print(str([x.data for x in scalars]), x.derivative, i, check)
+        assert x.derivative is not None
+        np.testing.assert_allclose(
+            x.derivative,
+            check.data,
+            1e-2,
+            1e-2,
+            err_msg=err_msg
+            % (str([x.data for x in scalars]), x.derivative, i, check.data),
+        )
diff --git a/minitorch/scalar_functions.py b/minitorch/scalar_functions.py
index c55ef86..b5deab0 100644
--- a/minitorch/scalar_functions.py
+++ b/minitorch/scalar_functions.py
@@ -1,21 +1,30 @@
 from __future__ import annotations
+
 from typing import TYPE_CHECKING
+
 import minitorch
+
 from . import operators
 from .autodiff import Context
+
 if TYPE_CHECKING:
     from typing import Tuple
+
     from .scalar import Scalar, ScalarLike


-def wrap_tuple(x):
-    """Turn a possible value into a tuple"""
-    pass
+def wrap_tuple(x):  # type: ignore
+    "Turn a possible value into a tuple"
+    if isinstance(x, tuple):
+        return x
+    return (x,)


-def unwrap_tuple(x):
-    """Turn a singleton tuple into a value"""
-    pass
+def unwrap_tuple(x):  # type: ignore
+    "Turn a singleton tuple into a value"
+    if len(x) == 1:
+        return x[0]
+    return x


 class ScalarFunction:
@@ -27,42 +36,175 @@ class ScalarFunction:
     here to group together the `forward` and `backward` code.
     """

+    @classmethod
+    def _backward(cls, ctx: Context, d_out: float) -> Tuple[float, ...]:
+        return wrap_tuple(cls.backward(ctx, d_out))  # type: ignore
+
+    @classmethod
+    def _forward(cls, ctx: Context, *inps: float) -> float:
+        return cls.forward(ctx, *inps)  # type: ignore
+
+    @classmethod
+    def apply(cls, *vals: "ScalarLike") -> Scalar:
+        raw_vals = []
+        scalars = []
+        for v in vals:
+            if isinstance(v, minitorch.scalar.Scalar):
+                scalars.append(v)
+                raw_vals.append(v.data)
+            else:
+                scalars.append(minitorch.scalar.Scalar(v))
+                raw_vals.append(v)
+
+        # Create the context.
+        ctx = Context(False)
+
+        # Call forward with the variables.
+        c = cls._forward(ctx, *raw_vals)
+        assert isinstance(c, float), "Expected return type float got %s" % (type(c))

+        # Create a new variable from the result with a new history.
+        back = minitorch.scalar.ScalarHistory(cls, ctx, scalars)
+        return minitorch.scalar.Scalar(c, back)
+
+
+# Examples
 class Add(ScalarFunction):
-    """Addition function $f(x, y) = x + y$"""
+    "Addition function $f(x, y) = x + y$"
+
+    @staticmethod
+    def forward(ctx: Context, a: float, b: float) -> float:
+        return a + b
+
+    @staticmethod
+    def backward(ctx: Context, d_output: float) -> Tuple[float, ...]:
+        return d_output, d_output


 class Log(ScalarFunction):
-    """Log function $f(x) = log(x)$"""
+    "Log function $f(x) = log(x)$"
+
+    @staticmethod
+    def forward(ctx: Context, a: float) -> float:
+        ctx.save_for_backward(a)
+        return operators.log(a)
+
+    @staticmethod
+    def backward(ctx: Context, d_output: float) -> float:
+        (a,) = ctx.saved_values
+        return operators.log_back(a, d_output)
+
+
+# To implement.


 class Mul(ScalarFunction):
-    """Multiplication function"""
+    "Multiplication function"
+
+    @staticmethod
+    def forward(ctx: Context, a: float, b: float) -> float:
+        # TODO: Implement for Task 1.2.
+        raise NotImplementedError('Need to implement for Task 1.2')
+
+    @staticmethod
+    def backward(ctx: Context, d_output: float) -> Tuple[float, float]:
+        # TODO: Implement for Task 1.4.
+        raise NotImplementedError('Need to implement for Task 1.4')


 class Inv(ScalarFunction):
-    """Inverse function"""
+    "Inverse function"
+
+    @staticmethod
+    def forward(ctx: Context, a: float) -> float:
+        # TODO: Implement for Task 1.2.
+        raise NotImplementedError('Need to implement for Task 1.2')
+
+    @staticmethod
+    def backward(ctx: Context, d_output: float) -> float:
+        # TODO: Implement for Task 1.4.
+        raise NotImplementedError('Need to implement for Task 1.4')


 class Neg(ScalarFunction):
-    """Negation function"""
+    "Negation function"
+
+    @staticmethod
+    def forward(ctx: Context, a: float) -> float:
+        # TODO: Implement for Task 1.2.
+        raise NotImplementedError('Need to implement for Task 1.2')
+
+    @staticmethod
+    def backward(ctx: Context, d_output: float) -> float:
+        # TODO: Implement for Task 1.4.
+        raise NotImplementedError('Need to implement for Task 1.4')


 class Sigmoid(ScalarFunction):
-    """Sigmoid function"""
+    "Sigmoid function"
+
+    @staticmethod
+    def forward(ctx: Context, a: float) -> float:
+        # TODO: Implement for Task 1.2.
+        raise NotImplementedError('Need to implement for Task 1.2')
+
+    @staticmethod
+    def backward(ctx: Context, d_output: float) -> float:
+        # TODO: Implement for Task 1.4.
+        raise NotImplementedError('Need to implement for Task 1.4')


 class ReLU(ScalarFunction):
-    """ReLU function"""
+    "ReLU function"
+
+    @staticmethod
+    def forward(ctx: Context, a: float) -> float:
+        # TODO: Implement for Task 1.2.
+        raise NotImplementedError('Need to implement for Task 1.2')
+
+    @staticmethod
+    def backward(ctx: Context, d_output: float) -> float:
+        # TODO: Implement for Task 1.4.
+        raise NotImplementedError('Need to implement for Task 1.4')


 class Exp(ScalarFunction):
-    """Exp function"""
+    "Exp function"
+
+    @staticmethod
+    def forward(ctx: Context, a: float) -> float:
+        # TODO: Implement for Task 1.2.
+        raise NotImplementedError('Need to implement for Task 1.2')
+
+    @staticmethod
+    def backward(ctx: Context, d_output: float) -> float:
+        # TODO: Implement for Task 1.4.
+        raise NotImplementedError('Need to implement for Task 1.4')


 class LT(ScalarFunction):
-    """Less-than function $f(x) =$ 1.0 if x is less than y else 0.0"""
+    "Less-than function $f(x) =$ 1.0 if x is less than y else 0.0"
+
+    @staticmethod
+    def forward(ctx: Context, a: float, b: float) -> float:
+        # TODO: Implement for Task 1.2.
+        raise NotImplementedError('Need to implement for Task 1.2')
+
+    @staticmethod
+    def backward(ctx: Context, d_output: float) -> Tuple[float, float]:
+        # TODO: Implement for Task 1.4.
+        raise NotImplementedError('Need to implement for Task 1.4')


 class EQ(ScalarFunction):
-    """Equal function $f(x) =$ 1.0 if x is equal to y else 0.0"""
+    "Equal function $f(x) =$ 1.0 if x is equal to y else 0.0"
+
+    @staticmethod
+    def forward(ctx: Context, a: float, b: float) -> float:
+        # TODO: Implement for Task 1.2.
+        raise NotImplementedError('Need to implement for Task 1.2')
+
+    @staticmethod
+    def backward(ctx: Context, d_output: float) -> Tuple[float, float]:
+        # TODO: Implement for Task 1.4.
+        raise NotImplementedError('Need to implement for Task 1.4')
diff --git a/minitorch/tensor.py b/minitorch/tensor.py
index dc62ddc..0ff577b 100644
--- a/minitorch/tensor.py
+++ b/minitorch/tensor.py
@@ -1,21 +1,48 @@
 """
 Implementation of the core Tensor object for autodifferentiation.
 """
+
 from __future__ import annotations
+
 from dataclasses import dataclass
 from typing import TYPE_CHECKING
+
 import numpy as np
+
 from . import operators
 from .autodiff import Context, Variable, backpropagate
 from .tensor_data import TensorData
-from .tensor_functions import EQ, LT, Add, All, Copy, Exp, Inv, IsClose, Log, MatMul, Mul, Neg, Permute, ReLU, Sigmoid, Sum, View, tensor
+from .tensor_functions import (
+    EQ,
+    LT,
+    Add,
+    All,
+    Copy,
+    Exp,
+    Inv,
+    IsClose,
+    Log,
+    MatMul,
+    Mul,
+    Neg,
+    Permute,
+    ReLU,
+    Sigmoid,
+    Sum,
+    View,
+    tensor,
+)
+
 if TYPE_CHECKING:
     from typing import Any, Iterable, List, Optional, Sequence, Tuple, Type, Union
+
     import numpy.typing as npt
+
     from .tensor_data import Shape, Storage, Strides, UserIndex, UserShape, UserStrides
     from .tensor_functions import Function
     from .tensor_ops import TensorBackend
-    TensorLike = Union[float, int, 'Tensor']
+
+    TensorLike = Union[float, int, "Tensor"]


 @dataclass
@@ -24,6 +51,7 @@ class History:
     `History` stores the history of `Function` operations that was
     used to construct the current Variable.
     """
+
     last_fn: Optional[Type[Function]] = None
     ctx: Optional[Context] = None
     inputs: Sequence[Tensor] = ()
@@ -37,6 +65,7 @@ class Tensor:
     Tensor is a generalization of Scalar in that it is a Variable that
     handles multidimensional arrays.
     """
+
     backend: TensorBackend
     history: Optional[History]
     grad: Optional[Tensor]
@@ -44,8 +73,13 @@ class Tensor:
     unique_id: int
     name: str

-    def __init__(self, v: TensorData, back: Optional[History]=None, name:
-        Optional[str]=None, backend: Optional[TensorBackend]=None):
+    def __init__(
+        self,
+        v: TensorData,
+        back: Optional[History] = None,
+        name: Optional[str] = None,
+        backend: Optional[TensorBackend] = None,
+    ):
         global _tensor_count
         _tensor_count += 1
         self.unique_id = _tensor_count
@@ -59,119 +93,177 @@ class Tensor:
             self.name = name
         else:
             self.name = str(self.unique_id)
+
         self.f = backend

-    def to_numpy(self) ->npt.NDArray[np.float64]:
+    def requires_grad_(self, x: bool) -> None:
+        self.history = History()
+
+    def requires_grad(self) -> bool:
+        return self.history is not None
+
+    def to_numpy(self) -> npt.NDArray[np.float64]:
         """
         Returns:
              Converted to numpy array
         """
-        pass
+        return self.contiguous()._tensor._storage.reshape(self.shape)

+    # Properties
     @property
-    def shape(self) ->UserShape:
+    def shape(self) -> UserShape:
         """
         Returns:
              shape of the tensor
         """
-        pass
+        return self._tensor.shape

     @property
-    def size(self) ->int:
+    def size(self) -> int:
         """
         Returns:
              int : size of the tensor
         """
-        pass
+        return self._tensor.size

     @property
-    def dims(self) ->int:
+    def dims(self) -> int:
         """
         Returns:
              int : dimensionality of the tensor
         """
-        pass
+        return self._tensor.dims

-    def _ensure_tensor(self, b: TensorLike) ->Tensor:
-        """Turns a python number into a tensor with the same backend."""
-        pass
+    def _ensure_tensor(self, b: TensorLike) -> Tensor:
+        "Turns a python number into a tensor with the same backend."
+        if isinstance(b, (int, float)):
+            c = Tensor.make([b], (1,), backend=self.backend)
+        else:
+            b._type_(self.backend)
+            c = b
+        return c

-    def __add__(self, b: TensorLike) ->Tensor:
+    # Functions
+    def __add__(self, b: TensorLike) -> Tensor:
         return Add.apply(self, self._ensure_tensor(b))

-    def __sub__(self, b: TensorLike) ->Tensor:
+    def __sub__(self, b: TensorLike) -> Tensor:
         return Add.apply(self, -self._ensure_tensor(b))

-    def __mul__(self, b: TensorLike) ->Tensor:
+    def __mul__(self, b: TensorLike) -> Tensor:
         return Mul.apply(self, self._ensure_tensor(b))

-    def __truediv__(self, b: TensorLike) ->Tensor:
+    def __truediv__(self, b: TensorLike) -> Tensor:
         return Mul.apply(self, Inv.apply(self._ensure_tensor(b)))

-    def __rtruediv__(self, b: TensorLike) ->Tensor:
+    def __rtruediv__(self, b: TensorLike) -> Tensor:
         return Mul.apply(self._ensure_tensor(b), Inv.apply(self))

-    def __matmul__(self, b: Tensor) ->Tensor:
-        """Not used until Module 3"""
+    def __matmul__(self, b: Tensor) -> Tensor:
+        "Not used until Module 3"
         return MatMul.apply(self, b)

-    def __lt__(self, b: TensorLike) ->Tensor:
+    def __lt__(self, b: TensorLike) -> Tensor:
         return LT.apply(self, self._ensure_tensor(b))

-    def __eq__(self, b: TensorLike) ->Tensor:
+    def __eq__(self, b: TensorLike) -> Tensor:  # type: ignore[override]
         return EQ.apply(self, self._ensure_tensor(b))

-    def __gt__(self, b: TensorLike) ->Tensor:
+    def __gt__(self, b: TensorLike) -> Tensor:
         return LT.apply(self._ensure_tensor(b), self)

-    def __neg__(self) ->Tensor:
+    def __neg__(self) -> Tensor:
         return Neg.apply(self)

-    def __radd__(self, b: TensorLike) ->Tensor:
+    def __radd__(self, b: TensorLike) -> Tensor:
         return self + b

-    def __rmul__(self, b: TensorLike) ->Tensor:
+    def __rmul__(self, b: TensorLike) -> Tensor:
         return self * b

-    def sum(self, dim: Optional[int]=None) ->Tensor:
-        """Compute the sum over dimension `dim`"""
-        pass
+    def all(self, dim: Optional[int] = None) -> Tensor:
+        if dim is None:
+            return All.apply(self.view(self.size), self._ensure_tensor(0))
+        else:
+            return All.apply(self, self._ensure_tensor(dim))
+
+    def is_close(self, y: Tensor) -> Tensor:
+        return IsClose.apply(self, y)
+
+    def sigmoid(self) -> Tensor:
+        return Sigmoid.apply(self)
+
+    def relu(self) -> Tensor:
+        return ReLU.apply(self)

-    def mean(self, dim: Optional[int]=None) ->Tensor:
-        """Compute the mean over dimension `dim`"""
-        pass
+    def log(self) -> Tensor:
+        return Log.apply(self)

-    def permute(self, *order: int) ->Tensor:
-        """Permute tensor dimensions to *order"""
-        pass
+    def exp(self) -> Tensor:
+        return Exp.apply(self)

-    def view(self, *shape: int) ->Tensor:
-        """Change the shape of the tensor to a new shape with the same size"""
-        pass
+    def item(self) -> float:
+        assert self.size == 1
+        x: float = self._tensor._storage[0]
+        return x

-    def contiguous(self) ->Tensor:
-        """Return a contiguous tensor with the same data"""
-        pass
+    def sum(self, dim: Optional[int] = None) -> Tensor:
+        "Compute the sum over dimension `dim`"
+        if dim is None:
+            return Sum.apply(self.contiguous().view(self.size), self._ensure_tensor(0))
+        else:
+            return Sum.apply(self, self._ensure_tensor(dim))
+
+    def mean(self, dim: Optional[int] = None) -> Tensor:
+        "Compute the mean over dimension `dim`"
+        if dim is not None:
+            return self.sum(dim) / self.shape[dim]
+        else:
+            return self.sum() / self.size
+
+    def permute(self, *order: int) -> Tensor:
+        "Permute tensor dimensions to *order"
+        return Permute.apply(self, tensor(list(order)))
+
+    def view(self, *shape: int) -> Tensor:
+        "Change the shape of the tensor to a new shape with the same size"
+        return View.apply(self, tensor(list(shape)))

-    def __repr__(self) ->str:
+    def contiguous(self) -> Tensor:
+        "Return a contiguous tensor with the same data"
+        return Copy.apply(self)
+
+    def __repr__(self) -> str:
         return self._tensor.to_string()

-    def __getitem__(self, key: Union[int, UserIndex]) ->float:
+    def __getitem__(self, key: Union[int, UserIndex]) -> float:
         key2 = (key,) if isinstance(key, int) else key
         return self._tensor.get(key2)

-    def __setitem__(self, key: Union[int, UserIndex], val: float) ->None:
+    def __setitem__(self, key: Union[int, UserIndex], val: float) -> None:
         key2 = (key,) if isinstance(key, int) else key
         self._tensor.set(key2, val)

-    @staticmethod
-    def make(storage: Union[Storage, List[float]], shape: UserShape,
-        strides: Optional[UserStrides]=None, backend: Optional[
-        TensorBackend]=None) ->Tensor:
-        """Create a new tensor from data"""
-        pass
+    # Internal methods used for autodiff.
+    def _type_(self, backend: TensorBackend) -> None:
+        self.backend = backend
+        if backend.cuda:  # pragma: no cover
+            self._tensor.to_cuda_()
+
+    def _new(self, tensor_data: TensorData) -> Tensor:
+        return Tensor(tensor_data, backend=self.backend)

-    def expand(self, other: Tensor) ->Tensor:
+    @staticmethod
+    def make(
+        storage: Union[Storage, List[float]],
+        shape: UserShape,
+        strides: Optional[UserStrides] = None,
+        backend: Optional[TensorBackend] = None,
+    ) -> Tensor:
+        "Create a new tensor from data"
+        return Tensor(TensorData(storage, shape, strides), backend=backend)
+
+    def expand(self, other: Tensor) -> Tensor:
         """
         Method used to allow for backprop over broadcasting.
         This method is called when the output of `backward`
@@ -185,9 +277,51 @@ class Tensor:
             Expanded version of `other` with the right derivatives

         """
-        pass

-    def accumulate_derivative(self, x: Any) ->None:
+        # Case 1: Both the same shape.
+        if self.shape == other.shape:
+            return other
+
+        # Case 2: Backward is a smaller than self. Broadcast up.
+        true_shape = TensorData.shape_broadcast(self.shape, other.shape)
+        buf = self.zeros(true_shape)
+        self.backend.id_map(other, buf)
+        if self.shape == true_shape:
+            return buf
+
+        # Case 3: Still different, reduce extra dims.
+        out = buf
+        orig_shape = [1] * (len(out.shape) - len(self.shape)) + list(self.shape)
+        for dim, shape in enumerate(out.shape):
+            if orig_shape[dim] == 1 and shape != 1:
+                out = self.backend.add_reduce(out, dim)
+        assert out.size == self.size, f"{out.shape} {self.shape}"
+        # START CODE CHANGE (2021)
+        return Tensor.make(out._tensor._storage, self.shape, backend=self.backend)
+        # END CODE CHANGE (2021)
+
+    def zeros(self, shape: Optional[UserShape] = None) -> Tensor:
+        def zero(shape: UserShape) -> Tensor:
+            return Tensor.make(
+                [0.0] * int(operators.prod(shape)), shape, backend=self.backend
+            )
+
+        if shape is None:
+            out = zero(self.shape)
+        else:
+            out = zero(shape)
+        out._type_(self.backend)
+        return out
+
+    def tuple(self) -> Tuple[Storage, Shape, Strides]:
+        return self._tensor.tuple()
+
+    def detach(self) -> Tensor:
+        return Tensor(self._tensor, backend=self.backend)
+
+    # Variable elements for backprop
+
+    def accumulate_derivative(self, x: Any) -> None:
         """
         Add `val` to the the derivative accumulated on this variable.
         Should only be called during autodifferentiation on leaf variables.
@@ -195,14 +329,46 @@ class Tensor:
         Args:
             x : value to be accumulated
         """
-        pass
+        assert self.is_leaf(), "Only leaf variables can have derivatives."
+        if self.grad is None:
+            self.grad = Tensor.make(
+                [0] * int(operators.prod(self.shape)), self.shape, backend=self.backend
+            )
+        self.grad += x
+
+    def is_leaf(self) -> bool:
+        "True if this variable created by the user (no `last_fn`)"
+        return self.history is not None and self.history.last_fn is None

-    def is_leaf(self) ->bool:
-        """True if this variable created by the user (no `last_fn`)"""
-        pass
+    def is_constant(self) -> bool:
+        return self.history is None

-    def zero_grad_(self) ->None:
+    @property
+    def parents(self) -> Iterable[Variable]:
+        assert self.history is not None
+        return self.history.inputs
+
+    def chain_rule(self, d_output: Any) -> Iterable[Tuple[Variable, Any]]:
+        h = self.history
+        assert h is not None
+        assert h.last_fn is not None
+        assert h.ctx is not None
+
+        x = h.last_fn._backward(h.ctx, d_output)
+        assert len(x) == len(h.inputs), f"Bug in function {h.last_fn}"
+        return [
+            (inp, inp.expand(self._ensure_tensor(d_in)))
+            for inp, d_in in zip(h.inputs, x)
+        ]
+
+    def backward(self, grad_output: Optional[Tensor] = None) -> None:
+        if grad_output is None:
+            assert self.shape == (1,), "Must provide grad_output if non-scalar"
+            grad_output = Tensor.make([1.0], (1,), backend=self.backend)
+        backpropagate(self, grad_output)
+
+    def zero_grad_(self) -> None:  # pragma: no cover
         """
         Reset the derivative on this variable.
         """
-        pass
+        self.grad = None
diff --git a/minitorch/tensor_data.py b/minitorch/tensor_data.py
index a28b7f8..1d4a0c9 100644
--- a/minitorch/tensor_data.py
+++ b/minitorch/tensor_data.py
@@ -1,17 +1,21 @@
 from __future__ import annotations
+
 import random
 from typing import Iterable, Optional, Sequence, Tuple, Union
+
 import numba
 import numpy as np
 import numpy.typing as npt
 from numpy import array, float64
 from typing_extensions import TypeAlias
+
 from .operators import prod
+
 MAX_DIMS = 32


 class IndexingError(RuntimeError):
-    """Exception raised for indexing errors."""
+    "Exception raised for indexing errors."
     pass


@@ -20,12 +24,13 @@ OutIndex: TypeAlias = npt.NDArray[np.int32]
 Index: TypeAlias = npt.NDArray[np.int32]
 Shape: TypeAlias = npt.NDArray[np.int32]
 Strides: TypeAlias = npt.NDArray[np.int32]
+
 UserIndex: TypeAlias = Sequence[int]
 UserShape: TypeAlias = Sequence[int]
 UserStrides: TypeAlias = Sequence[int]


-def index_to_position(index: Index, strides: Strides) ->int:
+def index_to_position(index: Index, strides: Strides) -> int:
     """
     Converts a multidimensional tensor `index` into a single-dimensional position in
     storage based on strides.
@@ -37,10 +42,12 @@ def index_to_position(index: Index, strides: Strides) ->int:
     Returns:
         Position in storage
     """
-    pass

+    # TODO: Implement for Task 2.1.
+    raise NotImplementedError('Need to implement for Task 2.1')

-def to_index(ordinal: int, shape: Shape, out_index: OutIndex) ->None:
+
+def to_index(ordinal: int, shape: Shape, out_index: OutIndex) -> None:
     """
     Convert an `ordinal` to an index in the `shape`.
     Should ensure that enumerating position 0 ... size of a
@@ -53,11 +60,13 @@ def to_index(ordinal: int, shape: Shape, out_index: OutIndex) ->None:
         out_index : return index corresponding to position.

     """
-    pass
+    # TODO: Implement for Task 2.1.
+    raise NotImplementedError('Need to implement for Task 2.1')


-def broadcast_index(big_index: Index, big_shape: Shape, shape: Shape,
-    out_index: OutIndex) ->None:
+def broadcast_index(
+    big_index: Index, big_shape: Shape, shape: Shape, out_index: OutIndex
+) -> None:
     """
     Convert a `big_index` into `big_shape` to a smaller `out_index`
     into `shape` following broadcasting rules. In this case
@@ -74,10 +83,11 @@ def broadcast_index(big_index: Index, big_shape: Shape, shape: Shape,
     Returns:
         None
     """
-    pass
+    # TODO: Implement for Task 2.2.
+    raise NotImplementedError('Need to implement for Task 2.2')


-def shape_broadcast(shape1: UserShape, shape2: UserShape) ->UserShape:
+def shape_broadcast(shape1: UserShape, shape2: UserShape) -> UserShape:
     """
     Broadcast two shapes to create a new union shape.

@@ -91,7 +101,17 @@ def shape_broadcast(shape1: UserShape, shape2: UserShape) ->UserShape:
     Raises:
         IndexingError : if cannot broadcast
     """
-    pass
+    # TODO: Implement for Task 2.2.
+    raise NotImplementedError('Need to implement for Task 2.2')
+
+
+def strides_from_shape(shape: UserShape) -> UserStrides:
+    layout = [1]
+    offset = 1
+    for s in reversed(shape):
+        layout.append(s * offset)
+        offset = s * offset
+    return tuple(reversed(layout[:-1]))


 class TensorData:
@@ -102,19 +122,24 @@ class TensorData:
     shape: UserShape
     dims: int

-    def __init__(self, storage: Union[Sequence[float], Storage], shape:
-        UserShape, strides: Optional[UserStrides]=None):
+    def __init__(
+        self,
+        storage: Union[Sequence[float], Storage],
+        shape: UserShape,
+        strides: Optional[UserStrides] = None,
+    ):
         if isinstance(storage, np.ndarray):
             self._storage = storage
         else:
             self._storage = array(storage, dtype=float64)
+
         if strides is None:
             strides = strides_from_shape(shape)
-        assert isinstance(strides, tuple), 'Strides must be tuple'
-        assert isinstance(shape, tuple), 'Shape must be tuple'
+
+        assert isinstance(strides, tuple), "Strides must be tuple"
+        assert isinstance(shape, tuple), "Shape must be tuple"
         if len(strides) != len(shape):
-            raise IndexingError(f'Len of strides {strides} must match {shape}.'
-                )
+            raise IndexingError(f"Len of strides {strides} must match {shape}.")
         self._strides = array(strides)
         self._shape = array(shape)
         self.strides = strides
@@ -123,16 +148,72 @@ class TensorData:
         self.shape = shape
         assert len(self._storage) == self.size

-    def is_contiguous(self) ->bool:
+    def to_cuda_(self) -> None:  # pragma: no cover
+        if not numba.cuda.is_cuda_array(self._storage):
+            self._storage = numba.cuda.to_device(self._storage)
+
+    def is_contiguous(self) -> bool:
         """
         Check that the layout is contiguous, i.e. outer dimensions have bigger strides than inner dimensions.

         Returns:
             bool : True if contiguous
         """
-        pass
-
-    def permute(self, *order: int) ->TensorData:
+        last = 1e9
+        for stride in self._strides:
+            if stride > last:
+                return False
+            last = stride
+        return True
+
+    @staticmethod
+    def shape_broadcast(shape_a: UserShape, shape_b: UserShape) -> UserShape:
+        return shape_broadcast(shape_a, shape_b)
+
+    def index(self, index: Union[int, UserIndex]) -> int:
+        if isinstance(index, int):
+            aindex: Index = array([index])
+        if isinstance(index, tuple):
+            aindex = array(index)
+
+        # Pretend 0-dim shape is 1-dim shape of singleton
+        shape = self.shape
+        if len(shape) == 0 and len(aindex) != 0:
+            shape = (1,)
+
+        # Check for errors
+        if aindex.shape[0] != len(self.shape):
+            raise IndexingError(f"Index {aindex} must be size of {self.shape}.")
+        for i, ind in enumerate(aindex):
+            if ind >= self.shape[i]:
+                raise IndexingError(f"Index {aindex} out of range {self.shape}.")
+            if ind < 0:
+                raise IndexingError(f"Negative indexing for {aindex} not supported.")
+
+        # Call fast indexing.
+        return index_to_position(array(index), self._strides)
+
+    def indices(self) -> Iterable[UserIndex]:
+        lshape: Shape = array(self.shape)
+        out_index: Index = array(self.shape)
+        for i in range(self.size):
+            to_index(i, lshape, out_index)
+            yield tuple(out_index)
+
+    def sample(self) -> UserIndex:
+        return tuple((random.randint(0, s - 1) for s in self.shape))
+
+    def get(self, key: UserIndex) -> float:
+        x: float = self._storage[self.index(key)]
+        return x
+
+    def set(self, key: UserIndex, val: float) -> None:
+        self._storage[self.index(key)] = val
+
+    def tuple(self) -> Tuple[Storage, Shape, Strides]:
+        return (self._storage, self._shape, self._strides)
+
+    def permute(self, *order: int) -> TensorData:
         """
         Permute the dimensions of the tensor.

@@ -142,4 +223,33 @@ class TensorData:
         Returns:
             New `TensorData` with the same storage and a new dimension order.
         """
-        pass
+        assert list(sorted(order)) == list(
+            range(len(self.shape))
+        ), f"Must give a position to each dimension. Shape: {self.shape} Order: {order}"
+
+        # TODO: Implement for Task 2.1.
+        raise NotImplementedError('Need to implement for Task 2.1')
+
+    def to_string(self) -> str:
+        s = ""
+        for index in self.indices():
+            l = ""
+            for i in range(len(index) - 1, -1, -1):
+                if index[i] == 0:
+                    l = "\n%s[" % ("\t" * i) + l
+                else:
+                    break
+            s += l
+            v = self.get(index)
+            s += f"{v:3.2f}"
+            l = ""
+            for i in range(len(index) - 1, -1, -1):
+                if index[i] == self.shape[i] - 1:
+                    l += "]"
+                else:
+                    break
+            if l:
+                s += l
+            else:
+                s += " "
+        return s
diff --git a/minitorch/tensor_functions.py b/minitorch/tensor_functions.py
index 7602588..f1c0547 100644
--- a/minitorch/tensor_functions.py
+++ b/minitorch/tensor_functions.py
@@ -1,98 +1,279 @@
 """
 Implementation of the autodifferentiation Functions for Tensor.
 """
+
 from __future__ import annotations
+
 import random
 from typing import TYPE_CHECKING
+
 import numpy as np
+
 import minitorch
+
 from . import operators
 from .autodiff import Context
 from .tensor_ops import SimpleBackend, TensorBackend
+
 if TYPE_CHECKING:
     from typing import Any, List, Tuple
+
     from .tensor import Tensor
     from .tensor_data import UserIndex, UserShape


-def wrap_tuple(x):
-    """Turn a possible value into a tuple"""
-    pass
+def wrap_tuple(x):  # type: ignore
+    "Turn a possible value into a tuple"
+    if isinstance(x, tuple):
+        return x
+    return (x,)


+# Constructors
 class Function:
-    pass
+    @classmethod
+    def _backward(cls, ctx: Context, grad_out: Tensor) -> Tuple[Tensor, ...]:
+        return wrap_tuple(cls.backward(ctx, grad_out))  # type: ignore
+
+    @classmethod
+    def _forward(cls, ctx: Context, *inps: Tensor) -> Tensor:
+        return cls.forward(ctx, *inps)  # type: ignore
+
+    @classmethod
+    def apply(cls, *vals: Tensor) -> Tensor:
+        raw_vals = []
+        need_grad = False
+        for v in vals:
+            if v.requires_grad():
+                need_grad = True
+            raw_vals.append(v.detach())
+
+        # Create the context.
+        ctx = Context(not need_grad)
+
+        # Call forward with the variables.
+        c = cls._forward(ctx, *raw_vals)
+        # assert isinstance(c, Tensor), "Expected return type Tensor got %s" % (
+        #     type(c)
+        # )
+
+        # Create a new variable from the result with a new history.
+        back = None
+        if need_grad:
+            back = minitorch.History(cls, ctx, vals)
+        return minitorch.Tensor(c._tensor, back, backend=c.backend)


 class Neg(Function):
-    pass
+    @staticmethod
+    def forward(ctx: Context, t1: Tensor) -> Tensor:
+        return t1.f.neg_map(t1)
+
+    @staticmethod
+    def backward(ctx: Context, grad_output: Tensor) -> Tensor:
+        return grad_output.f.neg_map(grad_output)


 class Inv(Function):
-    pass
+    @staticmethod
+    def forward(ctx: Context, t1: Tensor) -> Tensor:
+        ctx.save_for_backward(t1)
+        return t1.f.inv_map(t1)
+
+    @staticmethod
+    def backward(ctx: Context, grad_output: Tensor) -> Tensor:
+        (t1,) = ctx.saved_values
+        return grad_output.f.inv_back_zip(t1, grad_output)


 class Add(Function):
-    pass
+    @staticmethod
+    def forward(ctx: Context, t1: Tensor, t2: Tensor) -> Tensor:
+        return t1.f.add_zip(t1, t2)
+
+    @staticmethod
+    def backward(ctx: Context, grad_output: Tensor) -> Tuple[Tensor, Tensor]:
+        return grad_output, grad_output


 class Mul(Function):
-    pass
+    @staticmethod
+    def forward(ctx: Context, a: Tensor, b: Tensor) -> Tensor:
+        # TODO: Implement for Task 2.3.
+        raise NotImplementedError('Need to implement for Task 2.3')
+
+    @staticmethod
+    def backward(ctx: Context, grad_output: Tensor) -> Tuple[Tensor, Tensor]:
+        # TODO: Implement for Task 2.4.
+        raise NotImplementedError('Need to implement for Task 2.4')


 class Sigmoid(Function):
-    pass
+    @staticmethod
+    def forward(ctx: Context, t1: Tensor) -> Tensor:
+        # TODO: Implement for Task 2.3.
+        raise NotImplementedError('Need to implement for Task 2.3')
+
+    @staticmethod
+    def backward(ctx: Context, grad_output: Tensor) -> Tensor:
+        # TODO: Implement for Task 2.4.
+        raise NotImplementedError('Need to implement for Task 2.4')


 class ReLU(Function):
-    pass
+    @staticmethod
+    def forward(ctx: Context, t1: Tensor) -> Tensor:
+        # TODO: Implement for Task 2.3.
+        raise NotImplementedError('Need to implement for Task 2.3')
+
+    @staticmethod
+    def backward(ctx: Context, grad_output: Tensor) -> Tensor:
+        # TODO: Implement for Task 2.4.
+        raise NotImplementedError('Need to implement for Task 2.4')


 class Log(Function):
-    pass
+    @staticmethod
+    def forward(ctx: Context, t1: Tensor) -> Tensor:
+        # TODO: Implement for Task 2.3.
+        raise NotImplementedError('Need to implement for Task 2.3')
+
+    @staticmethod
+    def backward(ctx: Context, grad_output: Tensor) -> Tensor:
+        # TODO: Implement for Task 2.4.
+        raise NotImplementedError('Need to implement for Task 2.4')


 class Exp(Function):
-    pass
+    @staticmethod
+    def forward(ctx: Context, t1: Tensor) -> Tensor:
+        # TODO: Implement for Task 2.3.
+        raise NotImplementedError('Need to implement for Task 2.3')
+
+    @staticmethod
+    def backward(ctx: Context, grad_output: Tensor) -> Tensor:
+        # TODO: Implement for Task 2.4.
+        raise NotImplementedError('Need to implement for Task 2.4')


 class Sum(Function):
-    pass
+    @staticmethod
+    def forward(ctx: Context, a: Tensor, dim: Tensor) -> Tensor:
+        ctx.save_for_backward(a.shape, dim)
+        return a.f.add_reduce(a, int(dim.item()))
+
+    @staticmethod
+    def backward(ctx: Context, grad_output: Tensor) -> Tuple[Tensor, float]:
+        a_shape, dim = ctx.saved_values
+        return grad_output, 0.0


 class All(Function):
-    pass
+    @staticmethod
+    def forward(ctx: Context, a: Tensor, dim: Tensor) -> Tensor:
+        if dim is not None:
+            return a.f.mul_reduce(a, int(dim.item()))
+        else:
+            return a.f.mul_reduce(a.contiguous().view(int(operators.prod(a.shape))), 0)


 class LT(Function):
-    pass
+    @staticmethod
+    def forward(ctx: Context, a: Tensor, b: Tensor) -> Tensor:
+        # TODO: Implement for Task 2.3.
+        raise NotImplementedError('Need to implement for Task 2.3')
+
+    @staticmethod
+    def backward(ctx: Context, grad_output: Tensor) -> Tuple[Tensor, Tensor]:
+        # TODO: Implement for Task 2.4.
+        raise NotImplementedError('Need to implement for Task 2.4')


 class EQ(Function):
-    pass
+    @staticmethod
+    def forward(ctx: Context, a: Tensor, b: Tensor) -> Tensor:
+        # TODO: Implement for Task 2.3.
+        raise NotImplementedError('Need to implement for Task 2.3')
+
+    @staticmethod
+    def backward(ctx: Context, grad_output: Tensor) -> Tuple[Tensor, Tensor]:
+        # TODO: Implement for Task 2.4.
+        raise NotImplementedError('Need to implement for Task 2.4')


 class IsClose(Function):
-    pass
+    @staticmethod
+    def forward(ctx: Context, a: Tensor, b: Tensor) -> Tensor:
+        # TODO: Implement for Task 2.3.
+        raise NotImplementedError('Need to implement for Task 2.3')


 class Permute(Function):
-    pass
+    @staticmethod
+    def forward(ctx: Context, a: Tensor, order: Tensor) -> Tensor:
+        # TODO: Implement for Task 2.3.
+        raise NotImplementedError('Need to implement for Task 2.3')
+
+    @staticmethod
+    def backward(ctx: Context, grad_output: Tensor) -> Tuple[Tensor, float]:
+        # TODO: Implement for Task 2.4.
+        raise NotImplementedError('Need to implement for Task 2.4')


 class View(Function):
-    pass
+    @staticmethod
+    def forward(ctx: Context, a: Tensor, shape: Tensor) -> Tensor:
+        ctx.save_for_backward(a.shape)
+        assert a._tensor.is_contiguous(), "Must be contiguous to view"
+        shape2 = [int(shape[i]) for i in range(shape.size)]
+        return minitorch.Tensor.make(
+            a._tensor._storage, tuple(shape2), backend=a.backend
+        )
+
+    @staticmethod
+    def backward(ctx: Context, grad_output: Tensor) -> Tuple[Tensor, float]:
+        (original,) = ctx.saved_values
+        return (
+            minitorch.Tensor.make(
+                grad_output._tensor._storage, original, backend=grad_output.backend
+            ),
+            0.0,
+        )


 class Copy(Function):
-    pass
+    @staticmethod
+    def forward(ctx: Context, a: Tensor) -> Tensor:
+        return a.f.id_map(a)
+
+    @staticmethod
+    def backward(ctx: Context, grad_output: Tensor) -> Tensor:
+        return grad_output


 class MatMul(Function):
-    pass
+    @staticmethod
+    def forward(ctx: Context, t1: Tensor, t2: Tensor) -> Tensor:
+        ctx.save_for_backward(t1, t2)
+        return t1.f.matrix_multiply(t1, t2)
+
+    @staticmethod
+    def backward(ctx: Context, grad_output: Tensor) -> Tuple[Tensor, Tensor]:
+        t1, t2 = ctx.saved_values

+        def transpose(a: Tensor) -> Tensor:
+            order = list(range(a.dims))
+            order[-2], order[-1] = order[-1], order[-2]
+            return a._new(a._tensor.permute(*order))

-def zeros(shape: UserShape, backend: TensorBackend=SimpleBackend) ->Tensor:
+        return (
+            grad_output.f.matrix_multiply(grad_output, transpose(t2)),
+            grad_output.f.matrix_multiply(transpose(t1), grad_output),
+        )
+
+
+# Helpers for Constructing tensors
+def zeros(shape: UserShape, backend: TensorBackend = SimpleBackend) -> Tensor:
     """
     Produce a zero tensor of size `shape`.

@@ -103,11 +284,16 @@ def zeros(shape: UserShape, backend: TensorBackend=SimpleBackend) ->Tensor:
     Returns:
         new tensor
     """
-    pass
+    return minitorch.Tensor.make(
+        [0] * int(operators.prod(shape)), shape, backend=backend
+    )


-def rand(shape: UserShape, backend: TensorBackend=SimpleBackend,
-    requires_grad: bool=False) ->Tensor:
+def rand(
+    shape: UserShape,
+    backend: TensorBackend = SimpleBackend,
+    requires_grad: bool = False,
+) -> Tensor:
     """
     Produce a random tensor of size `shape`.

@@ -119,11 +305,18 @@ def rand(shape: UserShape, backend: TensorBackend=SimpleBackend,
     Returns:
         :class:`Tensor` : new tensor
     """
-    pass
-
-
-def _tensor(ls: Any, shape: UserShape, backend: TensorBackend=SimpleBackend,
-    requires_grad: bool=False) ->Tensor:
+    vals = [random.random() for _ in range(int(operators.prod(shape)))]
+    tensor = minitorch.Tensor.make(vals, shape, backend=backend)
+    tensor.requires_grad_(requires_grad)
+    return tensor
+
+
+def _tensor(
+    ls: Any,
+    shape: UserShape,
+    backend: TensorBackend = SimpleBackend,
+    requires_grad: bool = False,
+) -> Tensor:
     """
     Produce a tensor with data ls and shape `shape`.

@@ -136,11 +329,14 @@ def _tensor(ls: Any, shape: UserShape, backend: TensorBackend=SimpleBackend,
     Returns:
         new tensor
     """
-    pass
+    tensor = minitorch.Tensor.make(ls, shape, backend=backend)
+    tensor.requires_grad_(requires_grad)
+    return tensor


-def tensor(ls: Any, backend: TensorBackend=SimpleBackend, requires_grad:
-    bool=False) ->Tensor:
+def tensor(
+    ls: Any, backend: TensorBackend = SimpleBackend, requires_grad: bool = False
+) -> Tensor:
     """
     Produce a tensor with data and shape from ls

@@ -152,4 +348,66 @@ def tensor(ls: Any, backend: TensorBackend=SimpleBackend, requires_grad:
     Returns:
         :class:`Tensor` : new tensor
     """
-    pass
+
+    def shape(ls: Any) -> List[int]:
+        if isinstance(ls, (list, tuple)):
+            return [len(ls)] + shape(ls[0])
+        else:
+            return []
+
+    def flatten(ls: Any) -> List[float]:
+        if isinstance(ls, (list, tuple)):
+            return [y for x in ls for y in flatten(x)]
+        else:
+            return [ls]
+
+    cur = flatten(ls)
+    shape2 = shape(ls)
+    return _tensor(cur, tuple(shape2), backend=backend, requires_grad=requires_grad)
+
+
+# Gradient check for tensors
+
+
+def grad_central_difference(
+    f: Any, *vals: Tensor, arg: int = 0, epsilon: float = 1e-6, ind: UserIndex
+) -> float:
+    x = vals[arg]
+    up = zeros(x.shape)
+    up[ind] = epsilon
+    vals1 = [x if j != arg else x + up for j, x in enumerate(vals)]
+    vals2 = [x if j != arg else x - up for j, x in enumerate(vals)]
+    delta: Tensor = f(*vals1).sum() - f(*vals2).sum()
+
+    return delta[0] / (2.0 * epsilon)
+
+
+def grad_check(f: Any, *vals: Tensor) -> None:
+    for x in vals:
+        x.requires_grad_(True)
+        x.zero_grad_()
+    random.seed(10)
+    out = f(*vals)
+    out.sum().backward()
+    err_msg = """
+
+Gradient check error for function %s.
+
+Input %s
+
+Received derivative %f for argument %d and index %s,
+but was expecting derivative %f from central difference.
+
+"""
+
+    for i, x in enumerate(vals):
+        ind = x._tensor.sample()
+        check = grad_central_difference(f, *vals, arg=i, ind=ind)
+        assert x.grad is not None
+        np.testing.assert_allclose(
+            x.grad[ind],
+            check,
+            1e-2,
+            1e-2,
+            err_msg=err_msg % (f, vals, x.grad[ind], i, ind, check),
+        )
diff --git a/minitorch/tensor_ops.py b/minitorch/tensor_ops.py
index e10ff6c..db82d54 100644
--- a/minitorch/tensor_ops.py
+++ b/minitorch/tensor_ops.py
@@ -1,26 +1,56 @@
 from __future__ import annotations
+
 from typing import TYPE_CHECKING, Callable, Optional, Type
+
 import numpy as np
 from typing_extensions import Protocol
+
 from . import operators
-from .tensor_data import MAX_DIMS, broadcast_index, index_to_position, shape_broadcast, to_index
+from .tensor_data import (
+    MAX_DIMS,
+    broadcast_index,
+    index_to_position,
+    shape_broadcast,
+    to_index,
+)
+
 if TYPE_CHECKING:
     from .tensor import Tensor
     from .tensor_data import Index, Shape, Storage, Strides


 class MapProto(Protocol):
-
-    def __call__(self, x: Tensor, out: Optional[Tensor]=..., /) ->Tensor:
+    def __call__(self, x: Tensor, out: Optional[Tensor] = ..., /) -> Tensor:
         ...


 class TensorOps:
+    @staticmethod
+    def map(fn: Callable[[float], float]) -> MapProto:
+        pass
+
+    @staticmethod
+    def cmap(fn: Callable[[float], float]) -> Callable[[Tensor, Tensor], Tensor]:
+        pass
+
+    @staticmethod
+    def zip(fn: Callable[[float, float], float]) -> Callable[[Tensor, Tensor], Tensor]:
+        pass
+
+    @staticmethod
+    def reduce(
+        fn: Callable[[float, float], float], start: float = 0.0
+    ) -> Callable[[Tensor, int], Tensor]:
+        pass
+
+    @staticmethod
+    def matrix_multiply(a: Tensor, b: Tensor) -> Tensor:
+        raise NotImplementedError("Not implemented in this assignment")
+
     cuda = False


 class TensorBackend:
-
     def __init__(self, ops: Type[TensorOps]):
         """
         Dynamically construct a tensor backend based on a `tensor_ops` object
@@ -34,6 +64,8 @@ class TensorBackend:
             A collection of tensor functions

         """
+
+        # Maps
         self.neg_map = ops.map(operators.neg)
         self.sigmoid_map = ops.map(operators.sigmoid)
         self.relu_map = ops.map(operators.relu)
@@ -42,6 +74,8 @@ class TensorBackend:
         self.id_map = ops.map(operators.id)
         self.id_cmap = ops.cmap(operators.id)
         self.inv_map = ops.map(operators.inv)
+
+        # Zips
         self.add_zip = ops.zip(operators.add)
         self.mul_zip = ops.zip(operators.mul)
         self.lt_zip = ops.zip(operators.lt)
@@ -50,6 +84,8 @@ class TensorBackend:
         self.relu_back_zip = ops.zip(operators.relu_back)
         self.log_back_zip = ops.zip(operators.log_back)
         self.inv_back_zip = ops.zip(operators.inv_back)
+
+        # Reduce
         self.add_reduce = ops.reduce(operators.add, 0.0)
         self.mul_reduce = ops.reduce(operators.mul, 1.0)
         self.matrix_multiply = ops.matrix_multiply
@@ -57,9 +93,8 @@ class TensorBackend:


 class SimpleOps(TensorOps):
-
     @staticmethod
-    def map(fn: Callable[[float], float]) ->MapProto:
+    def map(fn: Callable[[float], float]) -> MapProto:
         """
         Higher-order tensor map function ::

@@ -88,11 +123,21 @@ class SimpleOps(TensorOps):
         Returns:
             new tensor data
         """
-        pass
+
+        f = tensor_map(fn)
+
+        def ret(a: Tensor, out: Optional[Tensor] = None) -> Tensor:
+            if out is None:
+                out = a.zeros(a.shape)
+            f(*out.tuple(), *a.tuple())
+            return out
+
+        return ret

     @staticmethod
-    def zip(fn: Callable[[float, float], float]) ->Callable[['Tensor',
-        'Tensor'], 'Tensor']:
+    def zip(
+        fn: Callable[[float, float], float]
+    ) -> Callable[["Tensor", "Tensor"], "Tensor"]:
         """
         Higher-order tensor zip function ::

@@ -120,11 +165,24 @@ class SimpleOps(TensorOps):
         Returns:
             :class:`TensorData` : new tensor data
         """
-        pass
+
+        f = tensor_zip(fn)
+
+        def ret(a: "Tensor", b: "Tensor") -> "Tensor":
+            if a.shape != b.shape:
+                c_shape = shape_broadcast(a.shape, b.shape)
+            else:
+                c_shape = a.shape
+            out = a.zeros(c_shape)
+            f(*out.tuple(), *a.tuple(), *b.tuple())
+            return out
+
+        return ret

     @staticmethod
-    def reduce(fn: Callable[[float, float], float], start: float=0.0
-        ) ->Callable[['Tensor', int], 'Tensor']:
+    def reduce(
+        fn: Callable[[float, float], float], start: float = 0.0
+    ) -> Callable[["Tensor", int], "Tensor"]:
         """
         Higher-order tensor reduce function. ::

@@ -147,12 +205,34 @@ class SimpleOps(TensorOps):
         Returns:
             :class:`TensorData` : new tensor
         """
-        pass
+        f = tensor_reduce(fn)
+
+        def ret(a: "Tensor", dim: int) -> "Tensor":
+            out_shape = list(a.shape)
+            out_shape[dim] = 1
+
+            # Other values when not sum.
+            out = a.zeros(tuple(out_shape))
+            out._tensor._storage[:] = start
+
+            f(*out.tuple(), *a.tuple(), dim)
+            return out
+
+        return ret
+
+    @staticmethod
+    def matrix_multiply(a: "Tensor", b: "Tensor") -> "Tensor":
+        raise NotImplementedError("Not implemented in this assignment")
+
     is_cuda = False


-def tensor_map(fn: Callable[[float], float]) ->Callable[[Storage, Shape,
-    Strides, Storage, Shape, Strides], None]:
+# Implementations.
+
+
+def tensor_map(
+    fn: Callable[[float], float]
+) -> Callable[[Storage, Shape, Strides, Storage, Shape, Strides], None]:
     """
     Low-level implementation of tensor map between
     tensors with *possibly different strides*.
@@ -175,11 +255,26 @@ def tensor_map(fn: Callable[[float], float]) ->Callable[[Storage, Shape,
     Returns:
         Tensor map function.
     """
-    pass
-

-def tensor_zip(fn: Callable[[float, float], float]) ->Callable[[Storage,
-    Shape, Strides, Storage, Shape, Strides, Storage, Shape, Strides], None]:
+    def _map(
+        out: Storage,
+        out_shape: Shape,
+        out_strides: Strides,
+        in_storage: Storage,
+        in_shape: Shape,
+        in_strides: Strides,
+    ) -> None:
+        # TODO: Implement for Task 2.3.
+        raise NotImplementedError('Need to implement for Task 2.3')
+
+    return _map
+
+
+def tensor_zip(
+    fn: Callable[[float, float], float]
+) -> Callable[
+    [Storage, Shape, Strides, Storage, Shape, Strides, Storage, Shape, Strides], None
+]:
     """
     Low-level implementation of tensor zip between
     tensors with *possibly different strides*.
@@ -202,11 +297,27 @@ def tensor_zip(fn: Callable[[float, float], float]) ->Callable[[Storage,
     Returns:
         Tensor zip function.
     """
-    pass

-
-def tensor_reduce(fn: Callable[[float, float], float]) ->Callable[[Storage,
-    Shape, Strides, Storage, Shape, Strides, int], None]:
+    def _zip(
+        out: Storage,
+        out_shape: Shape,
+        out_strides: Strides,
+        a_storage: Storage,
+        a_shape: Shape,
+        a_strides: Strides,
+        b_storage: Storage,
+        b_shape: Shape,
+        b_strides: Strides,
+    ) -> None:
+        # TODO: Implement for Task 2.3.
+        raise NotImplementedError('Need to implement for Task 2.3')
+
+    return _zip
+
+
+def tensor_reduce(
+    fn: Callable[[float, float], float]
+) -> Callable[[Storage, Shape, Strides, Storage, Shape, Strides, int], None]:
     """
     Low-level implementation of tensor reduce.

@@ -219,7 +330,20 @@ def tensor_reduce(fn: Callable[[float, float], float]) ->Callable[[Storage,
     Returns:
         Tensor reduce function.
     """
-    pass
+
+    def _reduce(
+        out: Storage,
+        out_shape: Shape,
+        out_strides: Strides,
+        a_storage: Storage,
+        a_shape: Shape,
+        a_strides: Strides,
+        reduce_dim: int,
+    ) -> None:
+        # TODO: Implement for Task 2.3.
+        raise NotImplementedError('Need to implement for Task 2.3')
+
+    return _reduce


 SimpleBackend = TensorBackend(SimpleOps)
diff --git a/minitorch/testing.py b/minitorch/testing.py
index bc2dc74..add0003 100644
--- a/minitorch/testing.py
+++ b/minitorch/testing.py
@@ -1,93 +1,213 @@
+# type: ignore
+
 from typing import Callable, Generic, Iterable, Tuple, TypeVar
+
 import minitorch.operators as operators
-A = TypeVar('A')
+
+A = TypeVar("A")


 class MathTest(Generic[A]):
+    @staticmethod
+    def neg(a: A) -> A:
+        "Negate the argument"
+        return -a
+
+    @staticmethod
+    def addConstant(a: A) -> A:
+        "Add contant to the argument"
+        return 5 + a
+
+    @staticmethod
+    def square(a: A) -> A:
+        "Manual square"
+        return a * a
+
+    @staticmethod
+    def cube(a: A) -> A:
+        "Manual cube"
+        return a * a * a
+
+    @staticmethod
+    def subConstant(a: A) -> A:
+        "Subtract a constant from the argument"
+        return a - 5

     @staticmethod
-    def neg(a: A) ->A:
-        """Negate the argument"""
-        pass
+    def multConstant(a: A) -> A:
+        "Multiply a constant to the argument"
+        return 5 * a

     @staticmethod
-    def addConstant(a: A) ->A:
-        """Add contant to the argument"""
-        pass
+    def div(a: A) -> A:
+        "Divide by a constant"
+        return a / 5

     @staticmethod
-    def square(a: A) ->A:
-        """Manual square"""
-        pass
+    def inv(a: A) -> A:
+        "Invert after adding"
+        return operators.inv(a + 3.5)

     @staticmethod
-    def cube(a: A) ->A:
-        """Manual cube"""
-        pass
+    def sig(a: A) -> A:
+        "Apply sigmoid"
+        return operators.sigmoid(a)

     @staticmethod
-    def subConstant(a: A) ->A:
-        """Subtract a constant from the argument"""
-        pass
+    def log(a: A) -> A:
+        "Apply log to a large value"
+        return operators.log(a + 100000)

     @staticmethod
-    def multConstant(a: A) ->A:
-        """Multiply a constant to the argument"""
-        pass
+    def relu(a: A) -> A:
+        "Apply relu"
+        return operators.relu(a + 5.5)

     @staticmethod
-    def div(a: A) ->A:
-        """Divide by a constant"""
-        pass
+    def exp(a: A) -> A:
+        "Apply exp to a smaller value"
+        return operators.exp(a - 200)

     @staticmethod
-    def inv(a: A) ->A:
-        """Invert after adding"""
-        pass
+    def explog(a: A) -> A:
+        return operators.log(a + 100000) + operators.exp(a - 200)

     @staticmethod
-    def sig(a: A) ->A:
-        """Apply sigmoid"""
-        pass
+    def add2(a: A, b: A) -> A:
+        "Add two arguments"
+        return a + b

     @staticmethod
-    def log(a: A) ->A:
-        """Apply log to a large value"""
-        pass
+    def mul2(a: A, b: A) -> A:
+        "Mul two arguments"
+        return a * b

     @staticmethod
-    def relu(a: A) ->A:
-        """Apply relu"""
-        pass
+    def div2(a: A, b: A) -> A:
+        "Divide two arguments"
+        return a / (b + 5.5)

     @staticmethod
-    def exp(a: A) ->A:
-        """Apply exp to a smaller value"""
-        pass
+    def gt2(a: A, b: A) -> A:
+        return operators.lt(b, a + 1.2)

     @staticmethod
-    def add2(a: A, b: A) ->A:
-        """Add two arguments"""
-        pass
+    def lt2(a: A, b: A) -> A:
+        return operators.lt(a + 1.2, b)

     @staticmethod
-    def mul2(a: A, b: A) ->A:
-        """Mul two arguments"""
-        pass
+    def eq2(a: A, b: A) -> A:
+        return operators.eq(a, (b + 5.5))

     @staticmethod
-    def div2(a: A, b: A) ->A:
-        """Divide two arguments"""
-        pass
+    def sum_red(a: Iterable[A]) -> A:
+        return operators.sum(a)
+
+    @staticmethod
+    def mean_red(a: Iterable[A]) -> A:
+        return operators.sum(a) / float(len(a))
+
+    @staticmethod
+    def mean_full_red(a: Iterable[A]) -> A:
+        return operators.sum(a) / float(len(a))
+
+    @staticmethod
+    def complex(a: A) -> A:
+        return (
+            operators.log(
+                operators.sigmoid(
+                    operators.relu(operators.relu(a * 10 + 7) * 6 + 5) * 10
+                )
+            )
+            / 50
+        )

     @classmethod
-    def _tests(cls) ->Tuple[Tuple[str, Callable[[A], A]], Tuple[str,
-        Callable[[A, A], A]], Tuple[str, Callable[[Iterable[A]], A]]]:
+    def _tests(
+        cls,
+    ) -> Tuple[
+        Tuple[str, Callable[[A], A]],
+        Tuple[str, Callable[[A, A], A]],
+        Tuple[str, Callable[[Iterable[A]], A]],
+    ]:
         """
         Returns a list of all the math tests.
         """
-        pass
+        one_arg = []
+        two_arg = []
+        red_arg = []
+        for k in dir(MathTest):
+            if callable(getattr(MathTest, k)) and not k.startswith("_"):
+                base_fn = getattr(cls, k)
+                # scalar_fn = getattr(cls, k)
+                tup = (k, base_fn)
+                if k.endswith("2"):
+                    two_arg.append(tup)
+                elif k.endswith("red"):
+                    red_arg.append(tup)
+                else:
+                    one_arg.append(tup)
+        return one_arg, two_arg, red_arg
+
+    @classmethod
+    def _comp_testing(cls):
+        one_arg, two_arg, red_arg = cls._tests()
+        one_argv, two_argv, red_argv = MathTest._tests()
+        one_arg = [(n1, f2, f1) for (n1, f1), (n2, f2) in zip(one_arg, one_argv)]
+        two_arg = [(n1, f2, f1) for (n1, f1), (n2, f2) in zip(two_arg, two_argv)]
+        red_arg = [(n1, f2, f1) for (n1, f1), (n2, f2) in zip(red_arg, red_argv)]
+        return one_arg, two_arg, red_arg


 class MathTestVariable(MathTest):
-    pass
+    @staticmethod
+    def inv(a):
+        return 1.0 / (a + 3.5)
+
+    @staticmethod
+    def sig(x):
+        return x.sigmoid()
+
+    @staticmethod
+    def log(x):
+        return (x + 100000).log()
+
+    @staticmethod
+    def relu(x):
+        return (x + 5.5).relu()
+
+    @staticmethod
+    def exp(a):
+        return (a - 200).exp()
+
+    @staticmethod
+    def explog(a):
+        return (a + 100000).log() + (a - 200).exp()
+
+    @staticmethod
+    def sum_red(a):
+        return a.sum(0)
+
+    @staticmethod
+    def mean_red(a):
+        return a.mean(0)
+
+    @staticmethod
+    def mean_full_red(a):
+        return a.mean()
+
+    @staticmethod
+    def eq2(a, b):
+        return a == (b + 5.5)
+
+    @staticmethod
+    def gt2(a, b):
+        return a + 1.2 > b
+
+    @staticmethod
+    def lt2(a, b):
+        return a + 1.2 < b
+
+    @staticmethod
+    def complex(a):
+        return (((a * 10 + 7).relu() * 6 + 5).relu() * 10).sigmoid().log() / 50