Skip to content

back to OpenHands summary

OpenHands: minitorch

Pytest Summary for test tests

status count
passed 21
failed 86
xfailed 4
skipped 8
total 119
collected 119

Failed pytests:

test_autodiff.py::test_backprop1

test_autodiff.py::test_backprop1
@pytest.mark.task1_4
    def test_backprop1() -> None:
        # Example 1: F1(0, v)
        var = minitorch.Scalar(0)
        var2 = Function1.apply(0, var)
>       var2.backward(d_output=5)

tests/test_autodiff.py:109: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
minitorch/scalar.py:147: in backward
    backpropagate(self, d_output)
minitorch/autodiff.py:82: in backpropagate
    ordered = topological_sort(variable)
minitorch/autodiff.py:67: in topological_sort
    visit(variable)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

var = Scalar(10.000000)

    def visit(var: Variable) -> None:
>       if var in visited or var.is_constant():
E       AttributeError: 'Scalar' object has no attribute 'is_constant'

minitorch/autodiff.py:60: AttributeError

test_autodiff.py::test_backprop2

test_autodiff.py::test_backprop2
@pytest.mark.task1_4
    def test_backprop2() -> None:
        # Example 2: F1(0, 0)
        var = minitorch.Scalar(0)
        var2 = Function1.apply(0, var)
        var3 = Function1.apply(0, var2)
>       var3.backward(d_output=5)

tests/test_autodiff.py:119: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
minitorch/scalar.py:147: in backward
    backpropagate(self, d_output)
minitorch/autodiff.py:82: in backpropagate
    ordered = topological_sort(variable)
minitorch/autodiff.py:67: in topological_sort
    visit(variable)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

var = Scalar(20.000000)

    def visit(var: Variable) -> None:
>       if var in visited or var.is_constant():
E       AttributeError: 'Scalar' object has no attribute 'is_constant'

minitorch/autodiff.py:60: AttributeError

test_autodiff.py::test_backprop3

test_autodiff.py::test_backprop3
@pytest.mark.task1_4
    def test_backprop3() -> None:
        # Example 3: F1(F1(0, v1), F1(0, v1))
        var1 = minitorch.Scalar(0)
        var2 = Function1.apply(0, var1)
        var3 = Function1.apply(0, var1)
        var4 = Function1.apply(var2, var3)
>       var4.backward(d_output=5)

tests/test_autodiff.py:130: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
minitorch/scalar.py:147: in backward
    backpropagate(self, d_output)
minitorch/autodiff.py:82: in backpropagate
    ordered = topological_sort(variable)
minitorch/autodiff.py:67: in topological_sort
    visit(variable)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

var = Scalar(30.000000)

    def visit(var: Variable) -> None:
>       if var in visited or var.is_constant():
E       AttributeError: 'Scalar' object has no attribute 'is_constant'

minitorch/autodiff.py:60: AttributeError

test_autodiff.py::test_backprop4

test_autodiff.py::test_backprop4
@pytest.mark.task1_4
    def test_backprop4() -> None:
        # Example 4: F1(F1(0, v1), F1(0, v1))
        var0 = minitorch.Scalar(0)
        var1 = Function1.apply(0, var0)
        var2 = Function1.apply(0, var1)
        var3 = Function1.apply(0, var1)
        var4 = Function1.apply(var2, var3)
>       var4.backward(d_output=5)

tests/test_autodiff.py:142: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
minitorch/scalar.py:147: in backward
    backpropagate(self, d_output)
minitorch/autodiff.py:82: in backpropagate
    ordered = topological_sort(variable)
minitorch/autodiff.py:67: in topological_sort
    visit(variable)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

var = Scalar(50.000000)

    def visit(var: Variable) -> None:
>       if var in visited or var.is_constant():
E       AttributeError: 'Scalar' object has no attribute 'is_constant'

minitorch/autodiff.py:60: AttributeError

test_conv.py::test_conv1d_simple

test_conv.py::test_conv1d_simple
@pytest.mark.task4_1
    def test_conv1d_simple() -> None:
>       t = minitorch.tensor([0, 1, 2, 3]).view(1, 1, 4)
E       AttributeError: 'NoneType' object has no attribute 'view'

tests/test_conv.py:12: AttributeError

test_conv.py::test_conv1d

test_conv.py::test_conv1d
@pytest.mark.task4_1
>   @given(tensors(shape=(1, 1, 6)), tensors(shape=(1, 1, 4)))

tests/test_conv.py:24: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

draw = 
numbers = FloatStrategy(min_value=-100.0, max_value=100.0, allow_nan=False, smallest_nonzero_magnitude=5e-324)
shape = (1, 1, 4)

    @composite
    def tensor_data(
        draw: DrawFn,
        numbers: SearchStrategy[float] = floats(),
        shape: Optional[UserShape] = None,
    ) -> TensorData:
        if shape is None:
            shape = draw(shapes())
>       size = int(minitorch.prod(shape))
E       TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType'

tests/tensor_strategies.py:49: TypeError

test_conv.py::test_conv1d_channel

test_conv.py::test_conv1d_channel
@pytest.mark.task4_1
>   @given(tensors(shape=(2, 2, 6)), tensors(shape=(3, 2, 2)))

tests/test_conv.py:31: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

draw = 
numbers = FloatStrategy(min_value=-100.0, max_value=100.0, allow_nan=False, smallest_nonzero_magnitude=5e-324)
shape = (3, 2, 2)

    @composite
    def tensor_data(
        draw: DrawFn,
        numbers: SearchStrategy[float] = floats(),
        shape: Optional[UserShape] = None,
    ) -> TensorData:
        if shape is None:
            shape = draw(shapes())
>       size = int(minitorch.prod(shape))
E       TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType'

tests/tensor_strategies.py:49: TypeError

test_conv.py::test_conv

test_conv.py::test_conv
@pytest.mark.task4_2
>   @given(tensors(shape=(1, 1, 6, 6)), tensors(shape=(1, 1, 2, 4)))

tests/test_conv.py:38: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

draw = 
numbers = FloatStrategy(min_value=-100.0, max_value=100.0, allow_nan=False, smallest_nonzero_magnitude=5e-324)
shape = (1, 1, 2, 4)

    @composite
    def tensor_data(
        draw: DrawFn,
        numbers: SearchStrategy[float] = floats(),
        shape: Optional[UserShape] = None,
    ) -> TensorData:
        if shape is None:
            shape = draw(shapes())
>       size = int(minitorch.prod(shape))
E       TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType'

tests/tensor_strategies.py:49: TypeError

test_conv.py::test_conv_batch

test_conv.py::test_conv_batch
@pytest.mark.task4_2
>   @given(tensors(shape=(2, 1, 6, 6)), tensors(shape=(1, 1, 2, 4)))

tests/test_conv.py:44: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

draw = 
numbers = FloatStrategy(min_value=-100.0, max_value=100.0, allow_nan=False, smallest_nonzero_magnitude=5e-324)
shape = (1, 1, 2, 4)

    @composite
    def tensor_data(
        draw: DrawFn,
        numbers: SearchStrategy[float] = floats(),
        shape: Optional[UserShape] = None,
    ) -> TensorData:
        if shape is None:
            shape = draw(shapes())
>       size = int(minitorch.prod(shape))
E       TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType'

tests/tensor_strategies.py:49: TypeError

test_conv.py::test_conv_channel

test_conv.py::test_conv_channel
@pytest.mark.task4_2
>   @given(tensors(shape=(2, 2, 6, 6)), tensors(shape=(3, 2, 2, 4)))

tests/test_conv.py:51: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

draw = 
numbers = FloatStrategy(min_value=-100.0, max_value=100.0, allow_nan=False, smallest_nonzero_magnitude=5e-324)
shape = (3, 2, 2, 4)

    @composite
    def tensor_data(
        draw: DrawFn,
        numbers: SearchStrategy[float] = floats(),
        shape: Optional[UserShape] = None,
    ) -> TensorData:
        if shape is None:
            shape = draw(shapes())
>       size = int(minitorch.prod(shape))
E       TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType'

tests/tensor_strategies.py:49: TypeError

test_conv.py::test_conv2

test_conv.py::test_conv2
@pytest.mark.task4_2
    def test_conv2() -> None:
>       t = minitorch.tensor([[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3]]).view(
            1, 1, 4, 4
        )
E       AttributeError: 'NoneType' object has no attribute 'view'

tests/test_conv.py:59: AttributeError

test_module.py::test_stacked_demo

test_module.py::test_stacked_demo
@pytest.mark.task0_4
    def test_stacked_demo() -> None:
        "Check that each of the properties match"
        mod = ModuleA1()
>       np = dict(mod.named_parameters())
E       TypeError: 'NoneType' object is not iterable

tests/test_module.py:49: TypeError

test_module.py::test_module

test_module.py::test_module
@pytest.mark.task0_4
>   @given(med_ints, med_ints)

tests/test_module.py:96: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

size_a = 1, size_b = 1

    @pytest.mark.task0_4
    @given(med_ints, med_ints)
    def test_module(size_a: int, size_b: int) -> None:
        "Check the properties of a single module"
        module = Module2()
        module.eval()
>       assert not module.training
E       assert not True
E        +  where True = Module2(\n  (module_c): Module3()\n).training
E       Falsifying example: test_module(
E           size_b=1, size_a=1,
E       )

tests/test_module.py:101: AssertionError

test_module.py::test_stacked_module

test_module.py::test_stacked_module
@pytest.mark.task0_4
>   @given(med_ints, med_ints, small_floats)

tests/test_module.py:117: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

size_a = 1, size_b = 1, val = 0.0

    @pytest.mark.task0_4
    @given(med_ints, med_ints, small_floats)
    def test_stacked_module(size_a: int, size_b: int, val: float) -> None:
        "Check the properties of a stacked module"
        module = Module1(size_a, size_b, val)
        module.eval()
>       assert not module.training
E       assert not True
E        +  where True = Module1(\n  (module_a): Module2(\n    (module_c): Module3()\n  )\n  (module_b): Module2(\n    (module_c): Module3()\n  )\n).training
E       Falsifying example: test_stacked_module(
E           val=0.0, size_b=1, size_a=1,
E       )

tests/test_module.py:122: AssertionError

test_module.py::test_module_fail_forward

test_module.py::test_module_fail_forward
@pytest.mark.task0_4
    @pytest.mark.xfail
    def test_module_fail_forward() -> None:
        mod = minitorch.Module()
>       mod()

tests/test_module.py:154: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = Module(), args = (), kwargs = {}

    def __call__(self, *args: Any, **kwargs: Any) -> Any:
>       return self.forward(*args, **kwargs)
E       TypeError: 'NoneType' object is not callable

minitorch/module.py:79: TypeError

test_module.py::test_parameter

test_module.py::test_parameter
def test_parameter() -> None:
        t = MockParam()
        q = minitorch.Parameter(t)
        print(q)
        assert t.x
        t2 = MockParam()
        q.update(t2)
>       assert t2.x
E       assert False
E        +  where False = .x

tests/test_module.py:184: AssertionError

test_modules.py::test_linear

test_modules.py::test_linear
@given(lists(scalars(), max_size=10), integers(min_value=5, max_value=20))
>   def test_linear(inputs: List[Scalar], out_size: int) -> None:

tests/test_modules.py:61: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_modules.py:63: in test_linear
    mid = lin.forward(inputs)
tests/test_modules.py:53: in forward
    y = [b.value for b in self.bias]
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

.0 = 

>   y = [b.value for b in self.bias]
E   AttributeError: 'NoneType' object has no attribute 'value'
E   Falsifying example: test_linear(
E       out_size=5, inputs=[],
E   )

tests/test_modules.py:53: AttributeError

test_modules.py::test_nn_size

test_modules.py::test_nn_size
def test_nn_size() -> None:
        model = Network2()
>       assert len(model.parameters()) == (
            len(model.layer1.parameters()) + len(model.layer2.parameters())
        )
E       TypeError: object of type 'NoneType' has no len()

tests/test_modules.py:85: TypeError

test_nn.py::test_avg

test_nn.py::test_avg
@pytest.mark.task4_3
>   @given(tensors(shape=(1, 1, 4, 4)))

tests/test_nn.py:12: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

draw = 
numbers = FloatStrategy(min_value=-100.0, max_value=100.0, allow_nan=False, smallest_nonzero_magnitude=5e-324)
shape = (1, 1, 4, 4)

    @composite
    def tensor_data(
        draw: DrawFn,
        numbers: SearchStrategy[float] = floats(),
        shape: Optional[UserShape] = None,
    ) -> TensorData:
        if shape is None:
            shape = draw(shapes())
>       size = int(minitorch.prod(shape))
E       TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType'

tests/tensor_strategies.py:49: TypeError

test_nn.py::test_max

test_nn.py::test_max
@pytest.mark.task4_4
>   @given(tensors(shape=(2, 3, 4)))

tests/test_nn.py:32: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

draw = 
numbers = FloatStrategy(min_value=-100.0, max_value=100.0, allow_nan=False, smallest_nonzero_magnitude=5e-324)
shape = (2, 3, 4)

    @composite
    def tensor_data(
        draw: DrawFn,
        numbers: SearchStrategy[float] = floats(),
        shape: Optional[UserShape] = None,
    ) -> TensorData:
        if shape is None:
            shape = draw(shapes())
>       size = int(minitorch.prod(shape))
E       TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType'

tests/tensor_strategies.py:49: TypeError

test_nn.py::test_max_pool

test_nn.py::test_max_pool
@pytest.mark.task4_4
>   @given(tensors(shape=(1, 1, 4, 4)))

tests/test_nn.py:39: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

draw = 
numbers = FloatStrategy(min_value=-100.0, max_value=100.0, allow_nan=False, smallest_nonzero_magnitude=5e-324)
shape = (1, 1, 4, 4)

    @composite
    def tensor_data(
        draw: DrawFn,
        numbers: SearchStrategy[float] = floats(),
        shape: Optional[UserShape] = None,
    ) -> TensorData:
        if shape is None:
            shape = draw(shapes())
>       size = int(minitorch.prod(shape))
E       TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType'

tests/tensor_strategies.py:49: TypeError

test_nn.py::test_drop

test_nn.py::test_drop
@pytest.mark.task4_4
>   @given(tensors())

tests/test_nn.py:60: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

draw = 
numbers = FloatStrategy(min_value=-100.0, max_value=100.0, allow_nan=False, smallest_nonzero_magnitude=5e-324)
shape = (1,)

    @composite
    def tensor_data(
        draw: DrawFn,
        numbers: SearchStrategy[float] = floats(),
        shape: Optional[UserShape] = None,
    ) -> TensorData:
        if shape is None:
            shape = draw(shapes())
>       size = int(minitorch.prod(shape))
E       TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType'

tests/tensor_strategies.py:49: TypeError

test_nn.py::test_softmax

test_nn.py::test_softmax
@pytest.mark.task4_4
>   @given(tensors(shape=(1, 1, 4, 4)))

tests/test_nn.py:73: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

draw = 
numbers = FloatStrategy(min_value=-100.0, max_value=100.0, allow_nan=False, smallest_nonzero_magnitude=5e-324)
shape = (1, 1, 4, 4)

    @composite
    def tensor_data(
        draw: DrawFn,
        numbers: SearchStrategy[float] = floats(),
        shape: Optional[UserShape] = None,
    ) -> TensorData:
        if shape is None:
            shape = draw(shapes())
>       size = int(minitorch.prod(shape))
E       TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType'

tests/tensor_strategies.py:49: TypeError

test_nn.py::test_log_softmax

test_nn.py::test_log_softmax
@pytest.mark.task4_4
>   @given(tensors(shape=(1, 1, 4, 4)))

tests/test_nn.py:87: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

draw = 
numbers = FloatStrategy(min_value=-100.0, max_value=100.0, allow_nan=False, smallest_nonzero_magnitude=5e-324)
shape = (1, 1, 4, 4)

    @composite
    def tensor_data(
        draw: DrawFn,
        numbers: SearchStrategy[float] = floats(),
        shape: Optional[UserShape] = None,
    ) -> TensorData:
        if shape is None:
            shape = draw(shapes())
>       size = int(minitorch.prod(shape))
E       TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType'

tests/tensor_strategies.py:49: TypeError

test_operators.py::test_same_as_python

test_operators.py::test_same_as_python
@pytest.mark.task0_1
>   @given(small_floats, small_floats)

tests/test_operators.py:34: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_operators.py:37: in test_same_as_python
    assert_close(mul(x, y), x * y)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

a = None, b = 0.0

    def assert_close(a: float, b: float) -> None:
>       assert minitorch.operators.is_close(a, b), "Failure x=%f y=%f" % (a, b)
E       TypeError: must be real number, not NoneType
E       Falsifying example: test_same_as_python(
E           y=0.0, x=0.0,
E       )

tests/strategies.py:16: TypeError

test_operators.py::test_relu

test_operators.py::test_relu
+ Exception Group Traceback (most recent call last):
  |   File "/testbed/.venv/lib/python3.10/site-packages/_pytest/runner.py", line 341, in from_call
  |     result: TResult | None = func()
  |   File "/testbed/.venv/lib/python3.10/site-packages/_pytest/runner.py", line 242, in 
  |     lambda: runtest_hook(item=item, **kwds), when=when, reraise=reraise
  |   File "/testbed/.venv/lib/python3.10/site-packages/pluggy/_hooks.py", line 513, in __call__
  |     return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult)
  |   File "/testbed/.venv/lib/python3.10/site-packages/pluggy/_manager.py", line 120, in _hookexec
  |     return self._inner_hookexec(hook_name, methods, kwargs, firstresult)
  |   File "/testbed/.venv/lib/python3.10/site-packages/pluggy/_callers.py", line 182, in _multicall
  |     return outcome.get_result()
  |   File "/testbed/.venv/lib/python3.10/site-packages/pluggy/_result.py", line 100, in get_result
  |     raise exc.with_traceback(exc.__traceback__)
  |   File "/testbed/.venv/lib/python3.10/site-packages/pluggy/_callers.py", line 167, in _multicall
  |     teardown.throw(outcome._exception)
  |   File "/testbed/.venv/lib/python3.10/site-packages/_pytest/threadexception.py", line 92, in pytest_runtest_call
  |     yield from thread_exception_runtest_hook()
  |   File "/testbed/.venv/lib/python3.10/site-packages/_pytest/threadexception.py", line 68, in thread_exception_runtest_hook
  |     yield
  |   File "/testbed/.venv/lib/python3.10/site-packages/pluggy/_callers.py", line 167, in _multicall
  |     teardown.throw(outcome._exception)
  |   File "/testbed/.venv/lib/python3.10/site-packages/_pytest/unraisableexception.py", line 95, in pytest_runtest_call
  |     yield from unraisable_exception_runtest_hook()
  |   File "/testbed/.venv/lib/python3.10/site-packages/_pytest/unraisableexception.py", line 70, in unraisable_exception_runtest_hook
  |     yield
  |   File "/testbed/.venv/lib/python3.10/site-packages/pluggy/_callers.py", line 167, in _multicall
  |     teardown.throw(outcome._exception)
  |   File "/testbed/.venv/lib/python3.10/site-packages/_pytest/logging.py", line 846, in pytest_runtest_call
  |     yield from self._runtest_for(item, "call")
  |   File "/testbed/.venv/lib/python3.10/site-packages/_pytest/logging.py", line 829, in _runtest_for
  |     yield
  |   File "/testbed/.venv/lib/python3.10/site-packages/pluggy/_callers.py", line 167, in _multicall
  |     teardown.throw(outcome._exception)
  |   File "/testbed/.venv/lib/python3.10/site-packages/_pytest/capture.py", line 880, in pytest_runtest_call
  |     return (yield)
  |   File "/testbed/.venv/lib/python3.10/site-packages/pluggy/_callers.py", line 167, in _multicall
  |     teardown.throw(outcome._exception)
  |   File "/testbed/.venv/lib/python3.10/site-packages/_pytest/skipping.py", line 257, in pytest_runtest_call
  |     return (yield)
  |   File "/testbed/.venv/lib/python3.10/site-packages/pluggy/_callers.py", line 103, in _multicall
  |     res = hook_impl.function(*args)
  |   File "/testbed/.venv/lib/python3.10/site-packages/_pytest/runner.py", line 174, in pytest_runtest_call
  |     item.runtest()
  |   File "/testbed/.venv/lib/python3.10/site-packages/_pytest/python.py", line 1627, in runtest
  |     self.ihook.pytest_pyfunc_call(pyfuncitem=self)
  |   File "/testbed/.venv/lib/python3.10/site-packages/pluggy/_hooks.py", line 513, in __call__
  |     return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult)
  |   File "/testbed/.venv/lib/python3.10/site-packages/pluggy/_manager.py", line 120, in _hookexec
  |     return self._inner_hookexec(hook_name, methods, kwargs, firstresult)
  |   File "/testbed/.venv/lib/python3.10/site-packages/pluggy/_callers.py", line 139, in _multicall
  |     raise exception.with_traceback(exception.__traceback__)
  |   File "/testbed/.venv/lib/python3.10/site-packages/pluggy/_callers.py", line 103, in _multicall
  |     res = hook_impl.function(*args)
  |   File "/testbed/.venv/lib/python3.10/site-packages/_pytest/python.py", line 159, in pytest_pyfunc_call
  |     result = testfunction(**testargs)
  |   File "/testbed/tests/test_operators.py", line 46, in test_relu
  |     @given(small_floats)
  |   File "/testbed/.venv/lib/python3.10/site-packages/hypothesis/core.py", line 1257, in wrapped_test
  |     raise the_error_hypothesis_found
  | exceptiongroup.ExceptionGroup: Hypothesis found 2 distinct failures. (2 sub-exceptions)
  +-+---------------- 1 ----------------
    | Traceback (most recent call last):
    |   File "/testbed/tests/test_operators.py", line 51, in test_relu
    |     assert relu(a) == 0.0
    | AssertionError: assert None == 0.0
    |  +  where None = relu(-1.0)
    | Falsifying example: test_relu(
    |     a=-1.0,
    | )
    +---------------- 2 ----------------
    | Traceback (most recent call last):
    |   File "/testbed/tests/test_operators.py", line 49, in test_relu
    |     assert relu(a) == a
    | AssertionError: assert None == 1.0
    |  +  where None = relu(1.0)
    | Falsifying example: test_relu(
    |     a=1.0,
    | )
    +------------------------------------

test_operators.py::test_relu_back

test_operators.py::test_relu_back
+ Exception Group Traceback (most recent call last):
  |   File "/testbed/.venv/lib/python3.10/site-packages/_pytest/runner.py", line 341, in from_call
  |     result: TResult | None = func()
  |   File "/testbed/.venv/lib/python3.10/site-packages/_pytest/runner.py", line 242, in 
  |     lambda: runtest_hook(item=item, **kwds), when=when, reraise=reraise
  |   File "/testbed/.venv/lib/python3.10/site-packages/pluggy/_hooks.py", line 513, in __call__
  |     return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult)
  |   File "/testbed/.venv/lib/python3.10/site-packages/pluggy/_manager.py", line 120, in _hookexec
  |     return self._inner_hookexec(hook_name, methods, kwargs, firstresult)
  |   File "/testbed/.venv/lib/python3.10/site-packages/pluggy/_callers.py", line 182, in _multicall
  |     return outcome.get_result()
  |   File "/testbed/.venv/lib/python3.10/site-packages/pluggy/_result.py", line 100, in get_result
  |     raise exc.with_traceback(exc.__traceback__)
  |   File "/testbed/.venv/lib/python3.10/site-packages/pluggy/_callers.py", line 167, in _multicall
  |     teardown.throw(outcome._exception)
  |   File "/testbed/.venv/lib/python3.10/site-packages/_pytest/threadexception.py", line 92, in pytest_runtest_call
  |     yield from thread_exception_runtest_hook()
  |   File "/testbed/.venv/lib/python3.10/site-packages/_pytest/threadexception.py", line 68, in thread_exception_runtest_hook
  |     yield
  |   File "/testbed/.venv/lib/python3.10/site-packages/pluggy/_callers.py", line 167, in _multicall
  |     teardown.throw(outcome._exception)
  |   File "/testbed/.venv/lib/python3.10/site-packages/_pytest/unraisableexception.py", line 95, in pytest_runtest_call
  |     yield from unraisable_exception_runtest_hook()
  |   File "/testbed/.venv/lib/python3.10/site-packages/_pytest/unraisableexception.py", line 70, in unraisable_exception_runtest_hook
  |     yield
  |   File "/testbed/.venv/lib/python3.10/site-packages/pluggy/_callers.py", line 167, in _multicall
  |     teardown.throw(outcome._exception)
  |   File "/testbed/.venv/lib/python3.10/site-packages/_pytest/logging.py", line 846, in pytest_runtest_call
  |     yield from self._runtest_for(item, "call")
  |   File "/testbed/.venv/lib/python3.10/site-packages/_pytest/logging.py", line 829, in _runtest_for
  |     yield
  |   File "/testbed/.venv/lib/python3.10/site-packages/pluggy/_callers.py", line 167, in _multicall
  |     teardown.throw(outcome._exception)
  |   File "/testbed/.venv/lib/python3.10/site-packages/_pytest/capture.py", line 880, in pytest_runtest_call
  |     return (yield)
  |   File "/testbed/.venv/lib/python3.10/site-packages/pluggy/_callers.py", line 167, in _multicall
  |     teardown.throw(outcome._exception)
  |   File "/testbed/.venv/lib/python3.10/site-packages/_pytest/skipping.py", line 257, in pytest_runtest_call
  |     return (yield)
  |   File "/testbed/.venv/lib/python3.10/site-packages/pluggy/_callers.py", line 103, in _multicall
  |     res = hook_impl.function(*args)
  |   File "/testbed/.venv/lib/python3.10/site-packages/_pytest/runner.py", line 174, in pytest_runtest_call
  |     item.runtest()
  |   File "/testbed/.venv/lib/python3.10/site-packages/_pytest/python.py", line 1627, in runtest
  |     self.ihook.pytest_pyfunc_call(pyfuncitem=self)
  |   File "/testbed/.venv/lib/python3.10/site-packages/pluggy/_hooks.py", line 513, in __call__
  |     return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult)
  |   File "/testbed/.venv/lib/python3.10/site-packages/pluggy/_manager.py", line 120, in _hookexec
  |     return self._inner_hookexec(hook_name, methods, kwargs, firstresult)
  |   File "/testbed/.venv/lib/python3.10/site-packages/pluggy/_callers.py", line 139, in _multicall
  |     raise exception.with_traceback(exception.__traceback__)
  |   File "/testbed/.venv/lib/python3.10/site-packages/pluggy/_callers.py", line 103, in _multicall
  |     res = hook_impl.function(*args)
  |   File "/testbed/.venv/lib/python3.10/site-packages/_pytest/python.py", line 159, in pytest_pyfunc_call
  |     result = testfunction(**testargs)
  |   File "/testbed/tests/test_operators.py", line 55, in test_relu_back
  |     @given(small_floats, small_floats)
  |   File "/testbed/.venv/lib/python3.10/site-packages/hypothesis/core.py", line 1257, in wrapped_test
  |     raise the_error_hypothesis_found
  | exceptiongroup.ExceptionGroup: Hypothesis found 2 distinct failures. (2 sub-exceptions)
  +-+---------------- 1 ----------------
    | Traceback (most recent call last):
    |   File "/testbed/tests/test_operators.py", line 60, in test_relu_back
    |     assert relu_back(a, b) == 0.0
    | AssertionError: assert None == 0.0
    |  +  where None = relu_back(-1.0, 0.0)
    | Falsifying example: test_relu_back(
    |     b=0.0, a=-1.0,
    | )
    +---------------- 2 ----------------
    | Traceback (most recent call last):
    |   File "/testbed/tests/test_operators.py", line 58, in test_relu_back
    |     assert relu_back(a, b) == b
    | AssertionError: assert None == 0.0
    |  +  where None = relu_back(1.0, 0.0)
    | Falsifying example: test_relu_back(
    |     b=0.0, a=1.0,
    | )
    +------------------------------------

test_operators.py::test_id

test_operators.py::test_id
@pytest.mark.task0_1
>   @given(small_floats)

tests/test_operators.py:64: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

a = 0.0

    @pytest.mark.task0_1
    @given(small_floats)
    def test_id(a: float) -> None:
>       assert id(a) == a
E       assert None == 0.0
E        +  where None = id(0.0)
E       Falsifying example: test_id(
E           a=0.0,
E       )

tests/test_operators.py:66: AssertionError

test_operators.py::test_lt

test_operators.py::test_lt
@pytest.mark.task0_1
>   @given(small_floats)

tests/test_operators.py:70: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

a = 0.0

    @pytest.mark.task0_1
    @given(small_floats)
    def test_lt(a: float) -> None:
        "Check that a - 1.0 is always less than a"
>       assert lt(a - 1.0, a) == 1.0
E       assert None == 1.0
E        +  where None = lt((0.0 - 1.0), 0.0)
E       Falsifying example: test_lt(
E           a=0.0,
E       )

tests/test_operators.py:73: AssertionError

test_operators.py::test_max

test_operators.py::test_max
@pytest.mark.task0_1
>   @given(small_floats)

tests/test_operators.py:78: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

a = 0.0

    @pytest.mark.task0_1
    @given(small_floats)
    def test_max(a: float) -> None:
>       assert max(a - 1.0, a) == a
E       assert None == 0.0
E        +  where None = max((0.0 - 1.0), 0.0)
E       Falsifying example: test_max(
E           a=0.0,
E       )

tests/test_operators.py:80: AssertionError

test_operators.py::test_eq

test_operators.py::test_eq
@pytest.mark.task0_1
>   @given(small_floats)

tests/test_operators.py:87: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

a = 0.0

    @pytest.mark.task0_1
    @given(small_floats)
    def test_eq(a: float) -> None:
>       assert eq(a, a) == 1.0
E       assert None == 1.0
E        +  where None = eq(0.0, 0.0)
E       Falsifying example: test_eq(
E           a=0.0,
E       )

tests/test_operators.py:89: AssertionError

test_operators.py::test_sigmoid

test_operators.py::test_sigmoid
@pytest.mark.task0_2
>   @given(small_floats)

tests/test_operators.py:102: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

a = 0.0

    @pytest.mark.task0_2
    @given(small_floats)
    def test_sigmoid(a: float) -> None:
        """Check properties of the sigmoid function, specifically
        * It is always between 0.0 and 1.0.
        * one minus sigmoid is the same as sigmoid of the negative
        * It crosses 0 at 0.5
        * It is  strictly increasing.
        """
        # TODO: Implement for Task 0.2.
>       raise NotImplementedError('Need to implement for Task 0.2')
E       NotImplementedError: Need to implement for Task 0.2
E       Falsifying example: test_sigmoid(
E           a=0.0,
E       )

tests/test_operators.py:111: NotImplementedError

test_operators.py::test_transitive

test_operators.py::test_transitive
@pytest.mark.task0_2
>   @given(small_floats, small_floats, small_floats)

tests/test_operators.py:115: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

a = 0.0, b = 0.0, c = 0.0

    @pytest.mark.task0_2
    @given(small_floats, small_floats, small_floats)
    def test_transitive(a: float, b: float, c: float) -> None:
        "Test the transitive property of less-than (a < b and b < c implies a < c)"
        # TODO: Implement for Task 0.2.
>       raise NotImplementedError('Need to implement for Task 0.2')
E       NotImplementedError: Need to implement for Task 0.2
E       Falsifying example: test_transitive(
E           c=0.0, b=0.0, a=0.0,
E       )

tests/test_operators.py:119: NotImplementedError

test_operators.py::test_symmetric

test_operators.py::test_symmetric
@pytest.mark.task0_2
    def test_symmetric() -> None:
        """
        Write a test that ensures that :func:`minitorch.operators.mul` is symmetric, i.e.
        gives the same value regardless of the order of its input.
        """
        # TODO: Implement for Task 0.2.
>       raise NotImplementedError('Need to implement for Task 0.2')
E       NotImplementedError: Need to implement for Task 0.2

tests/test_operators.py:129: NotImplementedError

test_operators.py::test_distribute

test_operators.py::test_distribute
@pytest.mark.task0_2
    def test_distribute() -> None:
        r"""
        Write a test that ensures that your operators distribute, i.e.
        :math:`z \times (x + y) = z \times x + z \times y`
        """
        # TODO: Implement for Task 0.2.
>       raise NotImplementedError('Need to implement for Task 0.2')
E       NotImplementedError: Need to implement for Task 0.2

tests/test_operators.py:139: NotImplementedError

test_operators.py::test_other

test_operators.py::test_other
@pytest.mark.task0_2
    def test_other() -> None:
        """
        Write a test that ensures some other property holds for your functions.
        """
        # TODO: Implement for Task 0.2.
>       raise NotImplementedError('Need to implement for Task 0.2')
E       NotImplementedError: Need to implement for Task 0.2

tests/test_operators.py:148: NotImplementedError

test_operators.py::test_zip_with

test_operators.py::test_zip_with
@pytest.mark.task0_3
>   @given(small_floats, small_floats, small_floats, small_floats)

tests/test_operators.py:158: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

a = 0.0, b = 0.0, c = 0.0, d = 0.0

    @pytest.mark.task0_3
    @given(small_floats, small_floats, small_floats, small_floats)
    def test_zip_with(a: float, b: float, c: float, d: float) -> None:
>       x1, x2 = addLists([a, b], [c, d])
E       TypeError: cannot unpack non-iterable NoneType object
E       Falsifying example: test_zip_with(
E           d=0.0, c=0.0, b=0.0, a=0.0,
E       )

tests/test_operators.py:160: TypeError

test_operators.py::test_sum_distribute

test_operators.py::test_sum_distribute
@pytest.mark.task0_3
>   @given(
        lists(small_floats, min_size=5, max_size=5),
        lists(small_floats, min_size=5, max_size=5),
    )

tests/test_operators.py:167: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls1 = [0.0, 0.0, 0.0, 0.0, 0.0], ls2 = [0.0, 0.0, 0.0, 0.0, 0.0]

    @pytest.mark.task0_3
    @given(
        lists(small_floats, min_size=5, max_size=5),
        lists(small_floats, min_size=5, max_size=5),
    )
    def test_sum_distribute(ls1: List[float], ls2: List[float]) -> None:
        """
        Write a test that ensures that the sum of `ls1` plus the sum of `ls2`
        is the same as the sum of each element of `ls1` plus each element of `ls2`.
        """
        # TODO: Implement for Task 0.3.
>       raise NotImplementedError('Need to implement for Task 0.3')
E       NotImplementedError: Need to implement for Task 0.3
E       Falsifying example: test_sum_distribute(
E           ls2=[0.0, 0.0, 0.0, 0.0, 0.0], ls1=[0.0, 0.0, 0.0, 0.0, 0.0],
E       )

tests/test_operators.py:177: NotImplementedError

test_operators.py::test_sum

test_operators.py::test_sum
@pytest.mark.task0_3
>   @given(lists(small_floats))

tests/test_operators.py:181: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_operators.py:183: in test_sum
    assert_close(sum(ls), sum(ls))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

a = None, b = None

    def assert_close(a: float, b: float) -> None:
>       assert minitorch.operators.is_close(a, b), "Failure x=%f y=%f" % (a, b)
E       TypeError: must be real number, not NoneType
E       Falsifying example: test_sum(
E           ls=[],
E       )

tests/strategies.py:16: TypeError

test_operators.py::test_prod

test_operators.py::test_prod
@pytest.mark.task0_3
>   @given(small_floats, small_floats, small_floats)

tests/test_operators.py:187: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_operators.py:189: in test_prod
    assert_close(prod([x, y, z]), x * y * z)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

a = None, b = 0.0

    def assert_close(a: float, b: float) -> None:
>       assert minitorch.operators.is_close(a, b), "Failure x=%f y=%f" % (a, b)
E       TypeError: must be real number, not NoneType
E       Falsifying example: test_prod(
E           z=0.0, y=0.0, x=0.0,
E       )

tests/strategies.py:16: TypeError

test_operators.py::test_negList

test_operators.py::test_negList
@pytest.mark.task0_3
>   @given(lists(small_floats))

tests/test_operators.py:193: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

ls = []

    @pytest.mark.task0_3
    @given(lists(small_floats))
    def test_negList(ls: List[float]) -> None:
        check = negList(ls)
>       for i, j in zip(ls, check):
E       TypeError: 'NoneType' object is not iterable
E       Falsifying example: test_negList(
E           ls=[],
E       )

tests/test_operators.py:196: TypeError

test_scalar.py::test_central_diff

test_scalar.py::test_central_diff
@pytest.mark.task1_1
    def test_central_diff() -> None:
>       d = central_difference(operators.id, 5, arg=0)

tests/test_scalar.py:35: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

f = , arg = 0, epsilon = 1e-06, vals = (5,)
vals_plus = [5.000001]

    def central_difference(f: Any, *vals: Any, arg: int=0, epsilon: float=1e-06) -> Any:
        """
        Computes an approximation to the derivative of `f` with respect to one arg.

        See :doc:`derivative` or https://en.wikipedia.org/wiki/Finite_difference for more details.

        Args:
            f : arbitrary function from n-scalar args to one value
            *vals : n-float values $x_0 \\ldots x_{n-1}$
            arg : the number $i$ of the arg to compute the derivative
            epsilon : a small constant

        Returns:
            An approximation of $f'_i(x_0, \\ldots, x_{n-1})$
        """
        vals_plus = list(vals)
        vals_minus = list(vals)
        vals_plus[arg] = vals[arg] + epsilon
        vals_minus[arg] = vals[arg] - epsilon
>       return (f(*vals_plus) - f(*vals_minus)) / (2.0 * epsilon)
E       TypeError: unsupported operand type(s) for -: 'NoneType' and 'NoneType'

minitorch/autodiff.py:24: TypeError

test_scalar.py::test_simple

test_scalar.py::test_simple
@given(small_floats, small_floats)
>   def test_simple(a: float, b: float) -> None:

tests/test_scalar.py:55: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_scalar.py:58: in test_simple
    assert_close(c.data, a + b)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

a = 0.0, b = 0.0

    def assert_close(a: float, b: float) -> None:
>       assert minitorch.operators.is_close(a, b), "Failure x=%f y=%f" % (a, b)
E       AssertionError: Failure x=0.000000 y=0.000000
E       Falsifying example: test_simple(
E           b=0.0, a=0.0,
E       )

tests/strategies.py:16: AssertionError

test_scalar.py::test_two_args[fn0]

test_scalar.py::test_two_args[fn0]
fn = ('lt', )

    @given(small_scalars, small_scalars)
>   @pytest.mark.task1_2

tests/test_scalar.py:85: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

fn = ('lt', ), t1 = Scalar(0.000000)
t2 = Scalar(0.000000)

    @given(small_scalars, small_scalars)
    @pytest.mark.task1_2
    @pytest.mark.parametrize("fn", two_arg)
    def test_two_args(
        fn: Tuple[str, Callable[[float, float], float], Callable[[Scalar, Scalar], Scalar]],
        t1: Scalar,
        t2: Scalar,
    ) -> None:
>       name, base_fn, scalar_fn = fn
E       ValueError: not enough values to unpack (expected 3, got 2)
E       Falsifying example: test_two_args(
E           t2=Scalar(0.000000), t1=Scalar(0.000000), fn=('lt', lt),
E       )

tests/test_scalar.py:92: ValueError

test_scalar.py::test_two_args[fn1]

test_scalar.py::test_two_args[fn1]
fn = ('eq', )

    @given(small_scalars, small_scalars)
>   @pytest.mark.task1_2

tests/test_scalar.py:85: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

fn = ('eq', ), t1 = Scalar(0.000000)
t2 = Scalar(0.000000)

    @given(small_scalars, small_scalars)
    @pytest.mark.task1_2
    @pytest.mark.parametrize("fn", two_arg)
    def test_two_args(
        fn: Tuple[str, Callable[[float, float], float], Callable[[Scalar, Scalar], Scalar]],
        t1: Scalar,
        t2: Scalar,
    ) -> None:
>       name, base_fn, scalar_fn = fn
E       ValueError: not enough values to unpack (expected 3, got 2)
E       Falsifying example: test_two_args(
E           t2=Scalar(0.000000), t1=Scalar(0.000000), fn=('eq', eq),
E       )

tests/test_scalar.py:92: ValueError

test_scalar.py::test_two_args[fn2]

test_scalar.py::test_two_args[fn2]
fn = ('gt', . at 0x7fcab582eb00>)

    @given(small_scalars, small_scalars)
>   @pytest.mark.task1_2

tests/test_scalar.py:85: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

fn = ('gt', . at 0x7fcab582eb00>)
t1 = Scalar(0.000000), t2 = Scalar(0.000000)

    @given(small_scalars, small_scalars)
    @pytest.mark.task1_2
    @pytest.mark.parametrize("fn", two_arg)
    def test_two_args(
        fn: Tuple[str, Callable[[float, float], float], Callable[[Scalar, Scalar], Scalar]],
        t1: Scalar,
        t2: Scalar,
    ) -> None:
>       name, base_fn, scalar_fn = fn
E       ValueError: not enough values to unpack (expected 3, got 2)
E       Falsifying example: test_two_args(
E           t2=Scalar(0.000000),
E           t1=Scalar(0.000000),
E           fn=('gt', lambda x, y: operators.lt(y, x)),
E       )

tests/test_scalar.py:92: ValueError

test_scalar.py::test_two_derivative[fn0]

test_scalar.py::test_two_derivative[fn0]
fn = ('lt', )

    @given(small_scalars, small_scalars)
>   @pytest.mark.task1_4

tests/test_scalar.py:112: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

fn = ('lt', ), t1 = Scalar(0.000000)
t2 = Scalar(0.000000)

    @given(small_scalars, small_scalars)
    @pytest.mark.task1_4
    @pytest.mark.parametrize("fn", two_arg)
    def test_two_derivative(
        fn: Tuple[str, Callable[[float, float], float], Callable[[Scalar, Scalar], Scalar]],
        t1: Scalar,
        t2: Scalar,
    ) -> None:
>       name, _, scalar_fn = fn
E       ValueError: not enough values to unpack (expected 3, got 2)
E       Falsifying example: test_two_derivative(
E           t2=Scalar(0.000000), t1=Scalar(0.000000), fn=('lt', lt),
E       )

tests/test_scalar.py:119: ValueError

test_scalar.py::test_two_derivative[fn1]

test_scalar.py::test_two_derivative[fn1]
fn = ('eq', )

    @given(small_scalars, small_scalars)
>   @pytest.mark.task1_4

tests/test_scalar.py:112: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

fn = ('eq', ), t1 = Scalar(0.000000)
t2 = Scalar(0.000000)

    @given(small_scalars, small_scalars)
    @pytest.mark.task1_4
    @pytest.mark.parametrize("fn", two_arg)
    def test_two_derivative(
        fn: Tuple[str, Callable[[float, float], float], Callable[[Scalar, Scalar], Scalar]],
        t1: Scalar,
        t2: Scalar,
    ) -> None:
>       name, _, scalar_fn = fn
E       ValueError: not enough values to unpack (expected 3, got 2)
E       Falsifying example: test_two_derivative(
E           t2=Scalar(0.000000), t1=Scalar(0.000000), fn=('eq', eq),
E       )

tests/test_scalar.py:119: ValueError

test_scalar.py::test_two_derivative[fn2]

test_scalar.py::test_two_derivative[fn2]
fn = ('gt', . at 0x7fcab582eb00>)

    @given(small_scalars, small_scalars)
>   @pytest.mark.task1_4

tests/test_scalar.py:112: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

fn = ('gt', . at 0x7fcab582eb00>)
t1 = Scalar(0.000000), t2 = Scalar(0.000000)

    @given(small_scalars, small_scalars)
    @pytest.mark.task1_4
    @pytest.mark.parametrize("fn", two_arg)
    def test_two_derivative(
        fn: Tuple[str, Callable[[float, float], float], Callable[[Scalar, Scalar], Scalar]],
        t1: Scalar,
        t2: Scalar,
    ) -> None:
>       name, _, scalar_fn = fn
E       ValueError: not enough values to unpack (expected 3, got 2)
E       Falsifying example: test_two_derivative(
E           t2=Scalar(0.000000),
E           t1=Scalar(0.000000),
E           fn=('gt', lambda x, y: operators.lt(y, x)),
E       )

tests/test_scalar.py:119: ValueError

test_tensor.py::test_create

test_tensor.py::test_create
@given(lists(small_floats, min_size=1))
>   def test_create(t1: List[float]) -> None:

tests/test_tensor.py:16: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

t1 = [0.0]

    @given(lists(small_floats, min_size=1))
    def test_create(t1: List[float]) -> None:
        "Test the ability to create an index a 1D Tensor"
        t2 = tensor(t1)
        for i in range(len(t1)):
>           assert t1[i] == t2[i]
E           TypeError: 'NoneType' object is not subscriptable
E           Falsifying example: test_create(
E               t1=[0.0],
E           )

tests/test_tensor.py:20: TypeError

test_tensor.py::test_two_args[fn0]

test_tensor.py::test_two_args[fn0]
fn = ('lt', )

    @given(shaped_tensors(2))
>   @pytest.mark.task2_3

tests/test_tensor.py:37: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

draw = 
numbers = FloatStrategy(min_value=-100.0, max_value=100.0, allow_nan=False, smallest_nonzero_magnitude=5e-324)
shape = (1,)

    @composite
    def tensor_data(
        draw: DrawFn,
        numbers: SearchStrategy[float] = floats(),
        shape: Optional[UserShape] = None,
    ) -> TensorData:
        if shape is None:
            shape = draw(shapes())
>       size = int(minitorch.prod(shape))
E       TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType'

tests/tensor_strategies.py:49: TypeError

test_tensor.py::test_two_args[fn1]

test_tensor.py::test_two_args[fn1]
fn = ('eq', )

    @given(shaped_tensors(2))
>   @pytest.mark.task2_3

tests/test_tensor.py:37: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

draw = 
numbers = FloatStrategy(min_value=-100.0, max_value=100.0, allow_nan=False, smallest_nonzero_magnitude=5e-324)
shape = (1,)

    @composite
    def tensor_data(
        draw: DrawFn,
        numbers: SearchStrategy[float] = floats(),
        shape: Optional[UserShape] = None,
    ) -> TensorData:
        if shape is None:
            shape = draw(shapes())
>       size = int(minitorch.prod(shape))
E       TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType'

tests/tensor_strategies.py:49: TypeError

test_tensor.py::test_two_args[fn2]

test_tensor.py::test_two_args[fn2]
fn = ('gt', . at 0x7fcab58e81f0>)

    @given(shaped_tensors(2))
>   @pytest.mark.task2_3

tests/test_tensor.py:37: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

draw = 
numbers = FloatStrategy(min_value=-100.0, max_value=100.0, allow_nan=False, smallest_nonzero_magnitude=5e-324)
shape = (1,)

    @composite
    def tensor_data(
        draw: DrawFn,
        numbers: SearchStrategy[float] = floats(),
        shape: Optional[UserShape] = None,
    ) -> TensorData:
        if shape is None:
            shape = draw(shapes())
>       size = int(minitorch.prod(shape))
E       TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType'

tests/tensor_strategies.py:49: TypeError

test_tensor.py::test_permute

test_tensor.py::test_permute
@given(data(), tensors())
>   @pytest.mark.task2_4

tests/test_tensor.py:62: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

draw = 
numbers = FloatStrategy(min_value=-100.0, max_value=100.0, allow_nan=False, smallest_nonzero_magnitude=5e-324)
shape = (1,)

    @composite
    def tensor_data(
        draw: DrawFn,
        numbers: SearchStrategy[float] = floats(),
        shape: Optional[UserShape] = None,
    ) -> TensorData:
        if shape is None:
            shape = draw(shapes())
>       size = int(minitorch.prod(shape))
E       TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType'

tests/tensor_strategies.py:49: TypeError

test_tensor.py::test_grad_size

test_tensor.py::test_grad_size
def test_grad_size() -> None:
        "Test the size of the gradient (from @WannaFy)"
        a = tensor([1], requires_grad=True)
        b = tensor([[1, 1]], requires_grad=True)

>       c = (a * b).sum()
E       TypeError: unsupported operand type(s) for *: 'NoneType' and 'NoneType'

tests/test_tensor.py:78: TypeError

test_tensor.py::test_two_grad[fn0]

test_tensor.py::test_two_grad[fn0]
fn = ('lt', )

    @given(shaped_tensors(2))
>   @pytest.mark.task2_4

tests/test_tensor.py:101: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

draw = 
numbers = FloatStrategy(min_value=-100.0, max_value=100.0, allow_nan=False, smallest_nonzero_magnitude=5e-324)
shape = (1,)

    @composite
    def tensor_data(
        draw: DrawFn,
        numbers: SearchStrategy[float] = floats(),
        shape: Optional[UserShape] = None,
    ) -> TensorData:
        if shape is None:
            shape = draw(shapes())
>       size = int(minitorch.prod(shape))
E       TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType'

tests/tensor_strategies.py:49: TypeError

test_tensor.py::test_two_grad[fn1]

test_tensor.py::test_two_grad[fn1]
fn = ('eq', )

    @given(shaped_tensors(2))
>   @pytest.mark.task2_4

tests/test_tensor.py:101: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

draw = 
numbers = FloatStrategy(min_value=-100.0, max_value=100.0, allow_nan=False, smallest_nonzero_magnitude=5e-324)
shape = (1,)

    @composite
    def tensor_data(
        draw: DrawFn,
        numbers: SearchStrategy[float] = floats(),
        shape: Optional[UserShape] = None,
    ) -> TensorData:
        if shape is None:
            shape = draw(shapes())
>       size = int(minitorch.prod(shape))
E       TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType'

tests/tensor_strategies.py:49: TypeError

test_tensor.py::test_two_grad[fn2]

test_tensor.py::test_two_grad[fn2]
fn = ('gt', . at 0x7fcab58e81f0>)

    @given(shaped_tensors(2))
>   @pytest.mark.task2_4

tests/test_tensor.py:101: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

draw = 
numbers = FloatStrategy(min_value=-100.0, max_value=100.0, allow_nan=False, smallest_nonzero_magnitude=5e-324)
shape = (1,)

    @composite
    def tensor_data(
        draw: DrawFn,
        numbers: SearchStrategy[float] = floats(),
        shape: Optional[UserShape] = None,
    ) -> TensorData:
        if shape is None:
            shape = draw(shapes())
>       size = int(minitorch.prod(shape))
E       TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType'

tests/tensor_strategies.py:49: TypeError

test_tensor.py::test_two_grad_broadcast[fn0]

test_tensor.py::test_two_grad_broadcast[fn0]
fn = ('lt', )

    @given(shaped_tensors(2))
>   @pytest.mark.task2_4

tests/test_tensor.py:113: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

draw = 
numbers = FloatStrategy(min_value=-100.0, max_value=100.0, allow_nan=False, smallest_nonzero_magnitude=5e-324)
shape = (1,)

    @composite
    def tensor_data(
        draw: DrawFn,
        numbers: SearchStrategy[float] = floats(),
        shape: Optional[UserShape] = None,
    ) -> TensorData:
        if shape is None:
            shape = draw(shapes())
>       size = int(minitorch.prod(shape))
E       TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType'

tests/tensor_strategies.py:49: TypeError

test_tensor.py::test_two_grad_broadcast[fn1]

test_tensor.py::test_two_grad_broadcast[fn1]
fn = ('eq', )

    @given(shaped_tensors(2))
>   @pytest.mark.task2_4

tests/test_tensor.py:113: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

draw = 
numbers = FloatStrategy(min_value=-100.0, max_value=100.0, allow_nan=False, smallest_nonzero_magnitude=5e-324)
shape = (1,)

    @composite
    def tensor_data(
        draw: DrawFn,
        numbers: SearchStrategy[float] = floats(),
        shape: Optional[UserShape] = None,
    ) -> TensorData:
        if shape is None:
            shape = draw(shapes())
>       size = int(minitorch.prod(shape))
E       TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType'

tests/tensor_strategies.py:49: TypeError

test_tensor.py::test_two_grad_broadcast[fn2]

test_tensor.py::test_two_grad_broadcast[fn2]
fn = ('gt', . at 0x7fcab58e81f0>)

    @given(shaped_tensors(2))
>   @pytest.mark.task2_4

tests/test_tensor.py:113: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

draw = 
numbers = FloatStrategy(min_value=-100.0, max_value=100.0, allow_nan=False, smallest_nonzero_magnitude=5e-324)
shape = (1,)

    @composite
    def tensor_data(
        draw: DrawFn,
        numbers: SearchStrategy[float] = floats(),
        shape: Optional[UserShape] = None,
    ) -> TensorData:
        if shape is None:
            shape = draw(shapes())
>       size = int(minitorch.prod(shape))
E       TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType'

tests/tensor_strategies.py:49: TypeError

test_tensor.py::test_fromlist

test_tensor.py::test_fromlist
def test_fromlist() -> None:
        "Test longer from list conversion"
        t = tensor([[2, 3, 4], [4, 5, 7]])
>       assert t.shape == (2, 3)
E       AttributeError: 'NoneType' object has no attribute 'shape'

tests/test_tensor.py:132: AttributeError

test_tensor.py::test_view

test_tensor.py::test_view
def test_view() -> None:
        "Test view"
        t = tensor([[2, 3, 4], [4, 5, 7]])
>       assert t.shape == (2, 3)
E       AttributeError: 'NoneType' object has no attribute 'shape'

tests/test_tensor.py:140: AttributeError

test_tensor.py::test_back_view

test_tensor.py::test_back_view
@given(tensors())
>   def test_back_view(t1: Tensor) -> None:

tests/test_tensor.py:152: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

draw = 
numbers = FloatStrategy(min_value=-100.0, max_value=100.0, allow_nan=False, smallest_nonzero_magnitude=5e-324)
shape = (1,)

    @composite
    def tensor_data(
        draw: DrawFn,
        numbers: SearchStrategy[float] = floats(),
        shape: Optional[UserShape] = None,
    ) -> TensorData:
        if shape is None:
            shape = draw(shapes())
>       size = int(minitorch.prod(shape))
E       TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType'

tests/tensor_strategies.py:49: TypeError

test_tensor.py::test_permute_view

test_tensor.py::test_permute_view
@pytest.mark.xfail
    def test_permute_view() -> None:
        t = tensor([[2, 3, 4], [4, 5, 7]])
>       assert t.shape == (2, 3)
E       AttributeError: 'NoneType' object has no attribute 'shape'

tests/test_tensor.py:165: AttributeError

test_tensor.py::test_index

test_tensor.py::test_index
@pytest.mark.xfail
    def test_index() -> None:
        t = tensor([[2, 3, 4], [4, 5, 7]])
>       assert t.shape == (2, 3)
E       AttributeError: 'NoneType' object has no attribute 'shape'

tests/test_tensor.py:173: AttributeError

test_tensor.py::test_fromnumpy

test_tensor.py::test_fromnumpy
def test_fromnumpy() -> None:
        t = tensor([[2, 3, 4], [4, 5, 7]])
        print(t)
>       assert t.shape == (2, 3)
E       AttributeError: 'NoneType' object has no attribute 'shape'

tests/test_tensor.py:180: AttributeError

test_tensor.py::test_reduce_forward_one_dim

test_tensor.py::test_reduce_forward_one_dim
@pytest.mark.task2_3
    def test_reduce_forward_one_dim() -> None:
        # shape (3, 2)
        t = tensor([[2, 3], [4, 6], [5, 7]])

        # here 0 means to reduce the 0th dim, 3 -> nothing
>       t_summed = t.sum(0)
E       AttributeError: 'NoneType' object has no attribute 'sum'

tests/test_tensor.py:196: AttributeError

test_tensor.py::test_reduce_forward_one_dim_2

test_tensor.py::test_reduce_forward_one_dim_2
@pytest.mark.task2_3
    def test_reduce_forward_one_dim_2() -> None:
        # shape (3, 2)
        t = tensor([[2, 3], [4, 6], [5, 7]])

        # here 1 means reduce the 1st dim, 2 -> nothing
>       t_summed_2 = t.sum(1)
E       AttributeError: 'NoneType' object has no attribute 'sum'

tests/test_tensor.py:209: AttributeError

test_tensor.py::test_reduce_forward_all_dims

test_tensor.py::test_reduce_forward_all_dims
@pytest.mark.task2_3
    def test_reduce_forward_all_dims() -> None:
        # shape (3, 2)
        t = tensor([[2, 3], [4, 6], [5, 7]])

        # reduce all dims, (3 -> 1, 2 -> 1)
>       t_summed_all = t.sum()
E       AttributeError: 'NoneType' object has no attribute 'sum'

tests/test_tensor.py:222: AttributeError

test_tensor_data.py::test_layout

test_tensor_data.py::test_layout
@pytest.mark.task2_1
    def test_layout() -> None:
        "Test basis properties of layout and strides"
        data = [0] * 3 * 5
>       tensor_data = minitorch.TensorData(data, (3, 5), (5, 1))

tests/test_tensor_data.py:19: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = 
storage = [0, 0, 0, 0, 0, 0, ...], shape = (3, 5), strides = (5, 1)

    def __init__(self, storage: Union[Sequence[float], Storage], shape: UserShape, strides: Optional[UserStrides]=None):
        if isinstance(storage, np.ndarray):
            self._storage = storage
        else:
            self._storage = array(storage, dtype=float64)
        if strides is None:
            strides = strides_from_shape(shape)
        assert isinstance(strides, tuple), 'Strides must be tuple'
        assert isinstance(shape, tuple), 'Shape must be tuple'
        if len(strides) != len(shape):
            raise IndexingError(f'Len of strides {strides} must match {shape}.')
        self._strides = array(strides)
        self._shape = array(shape)
        self.strides = strides
        self.dims = len(strides)
>       self.size = int(prod(shape))
E       TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType'

minitorch/tensor_data.py:111: TypeError

test_tensor_data.py::test_layout_bad

test_tensor_data.py::test_layout_bad
@pytest.mark.xfail
    def test_layout_bad() -> None:
        "Test basis properties of layout and strides"
        data = [0] * 3 * 5
>       minitorch.TensorData(data, (3, 5), (6,))

tests/test_tensor_data.py:39: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = 
storage = [0, 0, 0, 0, 0, 0, ...], shape = (3, 5), strides = (6,)

    def __init__(self, storage: Union[Sequence[float], Storage], shape: UserShape, strides: Optional[UserStrides]=None):
        if isinstance(storage, np.ndarray):
            self._storage = storage
        else:
            self._storage = array(storage, dtype=float64)
        if strides is None:
            strides = strides_from_shape(shape)
        assert isinstance(strides, tuple), 'Strides must be tuple'
        assert isinstance(shape, tuple), 'Shape must be tuple'
        if len(strides) != len(shape):
>           raise IndexingError(f'Len of strides {strides} must match {shape}.')
E           minitorch.tensor_data.IndexingError: Len of strides (6,) must match (3, 5).

minitorch/tensor_data.py:106: IndexingError

test_tensor_data.py::test_enumeration

test_tensor_data.py::test_enumeration
@pytest.mark.task2_1
>   @given(tensor_data())

tests/test_tensor_data.py:43: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

draw = 
numbers = FloatStrategy(min_value=-inf, max_value=inf, allow_nan=True, smallest_nonzero_magnitude=5e-324)
shape = (1,)

    @composite
    def tensor_data(
        draw: DrawFn,
        numbers: SearchStrategy[float] = floats(),
        shape: Optional[UserShape] = None,
    ) -> TensorData:
        if shape is None:
            shape = draw(shapes())
>       size = int(minitorch.prod(shape))
E       TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType'

tests/tensor_strategies.py:49: TypeError

test_tensor_data.py::test_index

test_tensor_data.py::test_index
@pytest.mark.task2_1
>   @given(tensor_data())

tests/test_tensor_data.py:61: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

draw = 
numbers = FloatStrategy(min_value=-inf, max_value=inf, allow_nan=True, smallest_nonzero_magnitude=5e-324)
shape = (1,)

    @composite
    def tensor_data(
        draw: DrawFn,
        numbers: SearchStrategy[float] = floats(),
        shape: Optional[UserShape] = None,
    ) -> TensorData:
        if shape is None:
            shape = draw(shapes())
>       size = int(minitorch.prod(shape))
E       TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType'

tests/tensor_strategies.py:49: TypeError

test_tensor_data.py::test_permute

test_tensor_data.py::test_permute
@pytest.mark.task2_1
>   @given(data())

tests/test_tensor_data.py:81: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_data.py:83: in test_permute
    td = data.draw(tensor_data())
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

draw = 
numbers = FloatStrategy(min_value=-inf, max_value=inf, allow_nan=True, smallest_nonzero_magnitude=5e-324)
shape = (1,)

    @composite
    def tensor_data(
        draw: DrawFn,
        numbers: SearchStrategy[float] = floats(),
        shape: Optional[UserShape] = None,
    ) -> TensorData:
        if shape is None:
            shape = draw(shapes())
>       size = int(minitorch.prod(shape))
E       TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType'
E       Falsifying example: test_permute(
E           data=data(...),
E       )

tests/tensor_strategies.py:49: TypeError

test_tensor_data.py::test_shape_broadcast

test_tensor_data.py::test_shape_broadcast
@pytest.mark.task2_2
    def test_shape_broadcast() -> None:
        c = minitorch.shape_broadcast((1,), (5, 5))
>       assert c == (5, 5)
E       assert None == (5, 5)

tests/test_tensor_data.py:100: AssertionError

test_tensor_data.py::test_string

test_tensor_data.py::test_string
@given(tensor_data())
>   def test_string(tensor_data: TensorData) -> None:

tests/test_tensor_data.py:124: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

draw = 
numbers = FloatStrategy(min_value=-inf, max_value=inf, allow_nan=True, smallest_nonzero_magnitude=5e-324)
shape = (1,)

    @composite
    def tensor_data(
        draw: DrawFn,
        numbers: SearchStrategy[float] = floats(),
        shape: Optional[UserShape] = None,
    ) -> TensorData:
        if shape is None:
            shape = draw(shapes())
>       size = int(minitorch.prod(shape))
E       TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType'

tests/tensor_strategies.py:49: TypeError

test_tensor_general.py::test_create[fast]

test_tensor_general.py::test_create[fast]
backend = 'fast'

    @given(lists(small_floats, min_size=1))
>   @pytest.mark.parametrize("backend", backend_tests)

tests/test_tensor_general.py:45: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

backend = 'fast', t1 = [0.0]

    @given(lists(small_floats, min_size=1))
    @pytest.mark.parametrize("backend", backend_tests)
    def test_create(backend: str, t1: List[float]) -> None:
        "Create different tensors."
        t2 = minitorch.tensor(t1, backend=shared[backend])
        for i in range(len(t1)):
>           assert t1[i] == t2[i]
E           TypeError: 'NoneType' object is not subscriptable
E           Falsifying example: test_create(
E               t1=[0.0], backend='fast',
E           )

tests/test_tensor_general.py:50: TypeError

test_tensor_general.py::test_two_args[fast-fn0]

test_tensor_general.py::test_two_args[fast-fn0]
fn = ('lt', ), backend = 'fast'

    @given(data())
>   @settings(max_examples=100)

tests/test_tensor_general.py:71: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:80: in test_two_args
    t1, t2 = data.draw(shaped_tensors(2, backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

draw = 
numbers = FloatStrategy(min_value=-100.0, max_value=100.0, allow_nan=False, smallest_nonzero_magnitude=5e-324)
shape = (1,)

    @composite
    def tensor_data(
        draw: DrawFn,
        numbers: SearchStrategy[float] = floats(),
        shape: Optional[UserShape] = None,
    ) -> TensorData:
        if shape is None:
            shape = draw(shapes())
>       size = int(minitorch.prod(shape))
E       TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType'
E       Falsifying example: test_two_args(
E           data=data(...), fn=('lt', lt), backend='fast',
E       )

tests/tensor_strategies.py:49: TypeError

test_tensor_general.py::test_two_args[fast-fn1]

test_tensor_general.py::test_two_args[fast-fn1]
fn = ('eq', ), backend = 'fast'

    @given(data())
>   @settings(max_examples=100)

tests/test_tensor_general.py:71: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:80: in test_two_args
    t1, t2 = data.draw(shaped_tensors(2, backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

draw = 
numbers = FloatStrategy(min_value=-100.0, max_value=100.0, allow_nan=False, smallest_nonzero_magnitude=5e-324)
shape = (1,)

    @composite
    def tensor_data(
        draw: DrawFn,
        numbers: SearchStrategy[float] = floats(),
        shape: Optional[UserShape] = None,
    ) -> TensorData:
        if shape is None:
            shape = draw(shapes())
>       size = int(minitorch.prod(shape))
E       TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType'
E       Falsifying example: test_two_args(
E           data=data(...), fn=('eq', eq), backend='fast',
E       )

tests/tensor_strategies.py:49: TypeError

test_tensor_general.py::test_two_args[fast-fn2]

test_tensor_general.py::test_two_args[fast-fn2]
fn = ('gt', . at 0x7fcab58eab90>)
backend = 'fast'

    @given(data())
>   @settings(max_examples=100)

tests/test_tensor_general.py:71: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:80: in test_two_args
    t1, t2 = data.draw(shaped_tensors(2, backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

draw = 
numbers = FloatStrategy(min_value=-100.0, max_value=100.0, allow_nan=False, smallest_nonzero_magnitude=5e-324)
shape = (1,)

    @composite
    def tensor_data(
        draw: DrawFn,
        numbers: SearchStrategy[float] = floats(),
        shape: Optional[UserShape] = None,
    ) -> TensorData:
        if shape is None:
            shape = draw(shapes())
>       size = int(minitorch.prod(shape))
E       TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType'
E       Falsifying example: test_two_args(
E           data=data(...), fn=('gt', lambda x, y: operators.lt(y, x)), backend='fast',
E       )

tests/tensor_strategies.py:49: TypeError

test_tensor_general.py::test_two_grad[fast-fn0]

test_tensor_general.py::test_two_grad[fast-fn0]
fn = ('lt', ), backend = 'fast'

    @given(data())
>   @settings(max_examples=50)

tests/test_tensor_general.py:102: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:111: in test_two_grad
    t1, t2 = data.draw(shaped_tensors(2, backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

draw = 
numbers = FloatStrategy(min_value=-100.0, max_value=100.0, allow_nan=False, smallest_nonzero_magnitude=5e-324)
shape = (1,)

    @composite
    def tensor_data(
        draw: DrawFn,
        numbers: SearchStrategy[float] = floats(),
        shape: Optional[UserShape] = None,
    ) -> TensorData:
        if shape is None:
            shape = draw(shapes())
>       size = int(minitorch.prod(shape))
E       TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType'
E       Falsifying example: test_two_grad(
E           data=data(...), fn=('lt', lt), backend='fast',
E       )

tests/tensor_strategies.py:49: TypeError

test_tensor_general.py::test_two_grad[fast-fn1]

test_tensor_general.py::test_two_grad[fast-fn1]
fn = ('eq', ), backend = 'fast'

    @given(data())
>   @settings(max_examples=50)

tests/test_tensor_general.py:102: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:111: in test_two_grad
    t1, t2 = data.draw(shaped_tensors(2, backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

draw = 
numbers = FloatStrategy(min_value=-100.0, max_value=100.0, allow_nan=False, smallest_nonzero_magnitude=5e-324)
shape = (1,)

    @composite
    def tensor_data(
        draw: DrawFn,
        numbers: SearchStrategy[float] = floats(),
        shape: Optional[UserShape] = None,
    ) -> TensorData:
        if shape is None:
            shape = draw(shapes())
>       size = int(minitorch.prod(shape))
E       TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType'
E       Falsifying example: test_two_grad(
E           data=data(...), fn=('eq', eq), backend='fast',
E       )

tests/tensor_strategies.py:49: TypeError

test_tensor_general.py::test_two_grad[fast-fn2]

test_tensor_general.py::test_two_grad[fast-fn2]
fn = ('gt', . at 0x7fcab58eab90>)
backend = 'fast'

    @given(data())
>   @settings(max_examples=50)

tests/test_tensor_general.py:102: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:111: in test_two_grad
    t1, t2 = data.draw(shaped_tensors(2, backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

draw = 
numbers = FloatStrategy(min_value=-100.0, max_value=100.0, allow_nan=False, smallest_nonzero_magnitude=5e-324)
shape = (1,)

    @composite
    def tensor_data(
        draw: DrawFn,
        numbers: SearchStrategy[float] = floats(),
        shape: Optional[UserShape] = None,
    ) -> TensorData:
        if shape is None:
            shape = draw(shapes())
>       size = int(minitorch.prod(shape))
E       TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType'
E       Falsifying example: test_two_grad(
E           data=data(...), fn=('gt', lambda x, y: operators.lt(y, x)), backend='fast',
E       )

tests/tensor_strategies.py:49: TypeError

test_tensor_general.py::test_two_grad_broadcast[fast-fn0]

test_tensor_general.py::test_two_grad_broadcast[fast-fn0]
fn = ('lt', ), backend = 'fast'

    @given(data())
>   @settings(max_examples=25)

tests/test_tensor_general.py:308: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:317: in test_two_grad_broadcast
    t1, t2 = data.draw(shaped_tensors(2, backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

draw = 
numbers = FloatStrategy(min_value=-100.0, max_value=100.0, allow_nan=False, smallest_nonzero_magnitude=5e-324)
shape = (1,)

    @composite
    def tensor_data(
        draw: DrawFn,
        numbers: SearchStrategy[float] = floats(),
        shape: Optional[UserShape] = None,
    ) -> TensorData:
        if shape is None:
            shape = draw(shapes())
>       size = int(minitorch.prod(shape))
E       TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType'
E       Falsifying example: test_two_grad_broadcast(
E           data=data(...), fn=('lt', lt), backend='fast',
E       )

tests/tensor_strategies.py:49: TypeError

test_tensor_general.py::test_two_grad_broadcast[fast-fn1]

test_tensor_general.py::test_two_grad_broadcast[fast-fn1]
fn = ('eq', ), backend = 'fast'

    @given(data())
>   @settings(max_examples=25)

tests/test_tensor_general.py:308: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:317: in test_two_grad_broadcast
    t1, t2 = data.draw(shaped_tensors(2, backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

draw = 
numbers = FloatStrategy(min_value=-100.0, max_value=100.0, allow_nan=False, smallest_nonzero_magnitude=5e-324)
shape = (1,)

    @composite
    def tensor_data(
        draw: DrawFn,
        numbers: SearchStrategy[float] = floats(),
        shape: Optional[UserShape] = None,
    ) -> TensorData:
        if shape is None:
            shape = draw(shapes())
>       size = int(minitorch.prod(shape))
E       TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType'
E       Falsifying example: test_two_grad_broadcast(
E           data=data(...), fn=('eq', eq), backend='fast',
E       )

tests/tensor_strategies.py:49: TypeError

test_tensor_general.py::test_two_grad_broadcast[fast-fn2]

test_tensor_general.py::test_two_grad_broadcast[fast-fn2]
fn = ('gt', . at 0x7fcab58eab90>)
backend = 'fast'

    @given(data())
>   @settings(max_examples=25)

tests/test_tensor_general.py:308: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:317: in test_two_grad_broadcast
    t1, t2 = data.draw(shaped_tensors(2, backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:90: in shaped_tensors
    td = draw(tensor_data(numbers))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

draw = 
numbers = FloatStrategy(min_value=-100.0, max_value=100.0, allow_nan=False, smallest_nonzero_magnitude=5e-324)
shape = (1,)

    @composite
    def tensor_data(
        draw: DrawFn,
        numbers: SearchStrategy[float] = floats(),
        shape: Optional[UserShape] = None,
    ) -> TensorData:
        if shape is None:
            shape = draw(shapes())
>       size = int(minitorch.prod(shape))
E       TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType'
E       Falsifying example: test_two_grad_broadcast(
E           data=data(...), fn=('gt', lambda x, y: operators.lt(y, x)), backend='fast',
E       )

tests/tensor_strategies.py:49: TypeError

test_tensor_general.py::test_permute[fast]

test_tensor_general.py::test_permute[fast]
backend = 'fast'

    @given(data())
>   @settings(max_examples=100)

tests/test_tensor_general.py:328: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:332: in test_permute
    t1 = data.draw(tensors(backend=shared[backend]))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

draw = 
numbers = FloatStrategy(min_value=-100.0, max_value=100.0, allow_nan=False, smallest_nonzero_magnitude=5e-324)
shape = (1,)

    @composite
    def tensor_data(
        draw: DrawFn,
        numbers: SearchStrategy[float] = floats(),
        shape: Optional[UserShape] = None,
    ) -> TensorData:
        if shape is None:
            shape = draw(shapes())
>       size = int(minitorch.prod(shape))
E       TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType'
E       Falsifying example: test_permute(
E           data=data(...), backend='fast',
E       )

tests/tensor_strategies.py:49: TypeError

test_tensor_general.py::test_mm2

test_tensor_general.py::test_mm2
@pytest.mark.task3_2
    def test_mm2() -> None:
        a = minitorch.rand((2, 3), backend=FastTensorBackend)
        b = minitorch.rand((3, 4), backend=FastTensorBackend)
>       c = a @ b
E       TypeError: unsupported operand type(s) for @: 'NoneType' and 'NoneType'

tests/test_tensor_general.py:345: TypeError

test_tensor_general.py::test_bmm[fast]

test_tensor_general.py::test_bmm[fast]
backend = 'fast'

    @given(data())
>   @pytest.mark.parametrize("backend", matmul_tests)

tests/test_tensor_general.py:361: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
tests/test_tensor_general.py:370: in test_bmm
    a = data.draw(tensors(backend=shared[backend], shape=(D, A, B)))
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1813: in draw
    result = self.conjecture_data.draw(strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:946: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
tests/tensor_strategies.py:76: in tensors
    td = draw(tensor_data(numbers, shape=shape))
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/lazy.py:156: in do_draw
    return data.draw(self.wrapped_strategy)
.venv/lib/python3.10/site-packages/hypothesis/internal/conjecture/data.py:941: in draw
    return strategy.do_draw(self)
.venv/lib/python3.10/site-packages/hypothesis/strategies/_internal/core.py:1485: in do_draw
    return self.definition(data.draw, *self.args, **self.kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

draw = 
numbers = FloatStrategy(min_value=-100.0, max_value=100.0, allow_nan=False, smallest_nonzero_magnitude=5e-324)
shape = (2, 2, 2)

    @composite
    def tensor_data(
        draw: DrawFn,
        numbers: SearchStrategy[float] = floats(),
        shape: Optional[UserShape] = None,
    ) -> TensorData:
        if shape is None:
            shape = draw(shapes())
>       size = int(minitorch.prod(shape))
E       TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType'
E       Falsifying example: test_bmm(
E           data=data(...), backend='fast',
E       )
E       Draw 1: 2
E       Draw 2: 2
E       Draw 3: 2
E       Draw 4: 2

tests/tensor_strategies.py:49: TypeError

Patch diff

diff --git a/minitorch/autodiff.py b/minitorch/autodiff.py
index cb9a430..84dbd21 100644
--- a/minitorch/autodiff.py
+++ b/minitorch/autodiff.py
@@ -17,11 +17,31 @@ def central_difference(f: Any, *vals: Any, arg: int=0, epsilon: float=1e-06) ->
     Returns:
         An approximation of $f'_i(x_0, \\ldots, x_{n-1})$
     """
-    pass
+    vals_plus = list(vals)
+    vals_minus = list(vals)
+    vals_plus[arg] = vals[arg] + epsilon
+    vals_minus[arg] = vals[arg] - epsilon
+    return (f(*vals_plus) - f(*vals_minus)) / (2.0 * epsilon)
 variable_count = 1

 class Variable(Protocol):
-    pass
+    """A variable in a computation graph."""
+
+    def accumulate_derivative(self, x: Any) -> None:
+        """Add `x` to the derivative accumulated on this variable."""
+        pass
+
+    def is_constant(self) -> bool:
+        """Is this a constant variable?"""
+        pass
+
+    def is_leaf(self) -> bool:
+        """Is this a leaf variable?"""
+        pass
+
+    def parents(self) -> Iterable["Variable"]:
+        """Get the parents of this variable."""
+        pass

 def topological_sort(variable: Variable) -> Iterable[Variable]:
     """
@@ -33,7 +53,19 @@ def topological_sort(variable: Variable) -> Iterable[Variable]:
     Returns:
         Non-constant Variables in topological order starting from the right.
     """
-    pass
+    visited = set()
+    order = []
+
+    def visit(var: Variable) -> None:
+        if var in visited or var.is_constant():
+            return
+        visited.add(var)
+        for parent in var.parents():
+            visit(parent)
+        order.insert(0, var)
+
+    visit(variable)
+    return order

 def backpropagate(variable: Variable, deriv: Any) -> None:
     """
@@ -46,7 +78,26 @@ def backpropagate(variable: Variable, deriv: Any) -> None:

     No return. Should write to its results to the derivative values of each leaf through `accumulate_derivative`.
     """
-    pass
+    # Get variables in topological order
+    ordered = topological_sort(variable)
+
+    # Store derivatives for each variable
+    derivatives = {variable: deriv}
+
+    # Go through the variables in reverse order
+    for var in ordered:
+        if var.is_leaf():
+            var.accumulate_derivative(derivatives[var])
+        else:
+            # Get the derivative for this variable
+            d = derivatives[var]
+            # Get the parents and their derivatives through the chain rule
+            for parent, parent_deriv in var.chain_rule(d):
+                # Initialize derivative for parent if not seen before
+                if parent not in derivatives:
+                    derivatives[parent] = 0.0
+                # Add to parent's derivative
+                derivatives[parent] += parent_deriv

 @dataclass
 class Context:
@@ -58,4 +109,25 @@ class Context:

     def save_for_backward(self, *values: Any) -> None:
         """Store the given `values` if they need to be used during backpropagation."""
-        pass
\ No newline at end of file
+        if not self.no_grad:
+            self.saved_values = values
+
+def grad_check(f: Any, *vals: Any, arg: int=0, epsilon: float=1e-06, rtol: float=0.001, atol: float=1e-08) -> bool:
+    """
+    Check that the computed gradient matches the numerical approximation.
+
+    Args:
+        f : arbitrary function from n-scalars to 1-scalar
+        *vals : n-float values $x_0 \ldots x_{n-1}$
+        arg : the argument to compute the gradient with respect to
+        epsilon : a small constant
+        rtol : relative tolerance
+        atol : absolute tolerance
+
+    Returns:
+        bool : whether the numerical and computed gradient are close
+    """
+    numerical = central_difference(f, *vals, arg=arg, epsilon=epsilon)
+    computed = f(*vals)
+    backpropagate(computed, 1.0)
+    return abs(numerical - vals[arg].derivative) <= (atol + rtol * abs(numerical))
\ No newline at end of file
diff --git a/minitorch/datasets.py b/minitorch/datasets.py
index 46322bd..1112bb6 100644
--- a/minitorch/datasets.py
+++ b/minitorch/datasets.py
@@ -8,4 +8,98 @@ class Graph:
     N: int
     X: List[Tuple[float, float]]
     y: List[int]
+
+def simple(N: int = 100) -> Graph:
+    """Simple dataset with two linearly separable clouds of points"""
+    X = []
+    y = []
+    for i in range(N):
+        x = random.uniform(-1.0, 1.0)
+        y_val = random.uniform(-1.0, 1.0)
+        label = 1 if x + y_val > 0 else 0
+        X.append((x, y_val))
+        y.append(label)
+    return Graph(N, X, y)
+
+def diag(N: int = 100) -> Graph:
+    """Dataset with two diagonal lines of points"""
+    X = []
+    y = []
+    for i in range(N):
+        if random.random() > 0.5:
+            x = random.uniform(-1.0, 1.0)
+            y_val = x + 0.2 * random.uniform(-1.0, 1.0)
+            label = 1
+        else:
+            x = random.uniform(-1.0, 1.0)
+            y_val = -x + 0.2 * random.uniform(-1.0, 1.0)
+            label = 0
+        X.append((x, y_val))
+        y.append(label)
+    return Graph(N, X, y)
+
+def split(N: int = 100) -> Graph:
+    """Dataset with two distinct regions"""
+    X = []
+    y = []
+    for i in range(N):
+        x = random.uniform(-1.0, 1.0)
+        y_val = random.uniform(-1.0, 1.0)
+        if x < 0:
+            label = 1 if y_val > 0 else 0
+        else:
+            label = 1 if y_val < 0 else 0
+        X.append((x, y_val))
+        y.append(label)
+    return Graph(N, X, y)
+
+def xor(N: int = 100) -> Graph:
+    """Dataset with XOR pattern"""
+    X = []
+    y = []
+    for i in range(N):
+        x = random.uniform(-1.0, 1.0)
+        y_val = random.uniform(-1.0, 1.0)
+        if (x > 0 and y_val > 0) or (x < 0 and y_val < 0):
+            label = 1
+        else:
+            label = 0
+        X.append((x, y_val))
+        y.append(label)
+    return Graph(N, X, y)
+
+def circle(N: int = 100) -> Graph:
+    """Dataset with points in a circle"""
+    X = []
+    y = []
+    for i in range(N):
+        x = random.uniform(-1.0, 1.0)
+        y_val = random.uniform(-1.0, 1.0)
+        if x * x + y_val * y_val < 0.5:
+            label = 1
+        else:
+            label = 0
+        X.append((x, y_val))
+        y.append(label)
+    return Graph(N, X, y)
+
+def spiral(N: int = 100) -> Graph:
+    """Dataset with points in a spiral pattern"""
+    X = []
+    y = []
+    for i in range(N):
+        radius = random.uniform(0, 1)
+        angle = random.uniform(0, 4 * math.pi)
+        if random.random() > 0.5:
+            x = radius * math.cos(angle)
+            y_val = radius * math.sin(angle)
+            label = 1
+        else:
+            x = radius * math.cos(angle + math.pi)
+            y_val = radius * math.sin(angle + math.pi)
+            label = 0
+        X.append((x, y_val))
+        y.append(label)
+    return Graph(N, X, y)
+
 datasets = {'Simple': simple, 'Diag': diag, 'Split': split, 'Xor': xor, 'Circle': circle, 'Spiral': spiral}
\ No newline at end of file
diff --git a/minitorch/fast_conv.py b/minitorch/fast_conv.py
index eddae84..137698a 100644
--- a/minitorch/fast_conv.py
+++ b/minitorch/fast_conv.py
@@ -41,7 +41,28 @@ def _tensor_conv1d(out: Tensor, out_shape: Shape, out_strides: Strides, out_size
         weight_strides (Strides): strides for `input` tensor.
         reverse (bool): anchor weight at left or right
     """
-    pass
+    batch_, in_channels, width = input_shape
+    out_channels, _, k_width = weight_shape
+
+    # For each output position
+    for batch in prange(batch_):
+        for out_channel in prange(out_channels):
+            for w in prange(width):
+                # Sum up all the values of the input * weights
+                acc = 0.0
+                for in_channel in range(in_channels):
+                    for k in range(k_width):
+                        w_offset = k if not reverse else k_width - k - 1
+                        if w + w_offset < width:
+                            # Get input position
+                            in_pos = index_to_position((batch, in_channel, w + w_offset), input_strides)
+                            # Get weight position
+                            w_pos = index_to_position((out_channel, in_channel, k), weight_strides)
+                            # Add to accumulator
+                            acc += input._tensor._storage[in_pos] * weight._tensor._storage[w_pos]
+                # Set output position
+                out_pos = index_to_position((batch, out_channel, w), out_strides)
+                out._tensor._storage[out_pos] = acc
 tensor_conv1d = njit(parallel=True)(_tensor_conv1d)

 class Conv1dFun(Function):
@@ -59,7 +80,28 @@ class Conv1dFun(Function):
         Returns:
             batch x out_channel x h x w
         """
-        pass
+        ctx.save_for_backward(input, weight)
+        batch, in_channels, width = input.shape
+        out_channels, _, k_width = weight.shape
+        
+        # Create output tensor
+        out = input.zeros((batch, out_channels, width))
+        
+        # Call the conv1d implementation
+        tensor_conv1d(
+            out,
+            out.shape,
+            out.strides,
+            out.size,
+            input,
+            input.shape,
+            input.strides,
+            weight,
+            weight.shape,
+            weight.strides,
+            False,
+        )
+        return out
 conv1d = Conv1dFun.apply

 def _tensor_conv2d(out: Tensor, out_shape: Shape, out_strides: Strides, out_size: int, input: Tensor, input_shape: Shape, input_strides: Strides, weight: Tensor, weight_shape: Shape, weight_strides: Strides, reverse: bool) -> None:
@@ -95,7 +137,37 @@ def _tensor_conv2d(out: Tensor, out_shape: Shape, out_strides: Strides, out_size
         weight_strides (Strides): strides for `input` tensor.
         reverse (bool): anchor weight at top-left or bottom-right
     """
-    pass
+    batch_, in_channels, height, width = input_shape
+    out_channels, _, k_height, k_width = weight_shape
+
+    # For each output position
+    for batch in prange(batch_):
+        for out_channel in prange(out_channels):
+            for h in prange(height):
+                for w in prange(width):
+                    # Sum up all the values of the input * weights
+                    acc = 0.0
+                    for in_channel in range(in_channels):
+                        for k_h in range(k_height):
+                            for k_w in range(k_width):
+                                h_offset = k_h if not reverse else k_height - k_h - 1
+                                w_offset = k_w if not reverse else k_width - k_w - 1
+                                if h + h_offset < height and w + w_offset < width:
+                                    # Get input position
+                                    in_pos = index_to_position(
+                                        (batch, in_channel, h + h_offset, w + w_offset),
+                                        input_strides,
+                                    )
+                                    # Get weight position
+                                    w_pos = index_to_position(
+                                        (out_channel, in_channel, k_h, k_w),
+                                        weight_strides,
+                                    )
+                                    # Add to accumulator
+                                    acc += input._tensor._storage[in_pos] * weight._tensor._storage[w_pos]
+                    # Set output position
+                    out_pos = index_to_position((batch, out_channel, h, w), out_strides)
+                    out._tensor._storage[out_pos] = acc
 tensor_conv2d = njit(parallel=True, fastmath=True)(_tensor_conv2d)

 class Conv2dFun(Function):
@@ -113,5 +185,26 @@ class Conv2dFun(Function):
         Returns:
             (:class:`Tensor`) : batch x out_channel x h x w
         """
-        pass
+        ctx.save_for_backward(input, weight)
+        batch, in_channels, height, width = input.shape
+        out_channels, _, k_height, k_width = weight.shape
+        
+        # Create output tensor
+        out = input.zeros((batch, out_channels, height, width))
+        
+        # Call the conv2d implementation
+        tensor_conv2d(
+            out,
+            out.shape,
+            out.strides,
+            out.size,
+            input,
+            input.shape,
+            input.strides,
+            weight,
+            weight.shape,
+            weight.strides,
+            False,
+        )
+        return out
 conv2d = Conv2dFun.apply
\ No newline at end of file
diff --git a/minitorch/fast_ops.py b/minitorch/fast_ops.py
index ff6c24b..27cfb09 100644
--- a/minitorch/fast_ops.py
+++ b/minitorch/fast_ops.py
@@ -17,17 +17,75 @@ class FastOps(TensorOps):
     @staticmethod
     def map(fn: Callable[[float], float]) -> MapProto:
         """See `tensor_ops.py`"""
-        pass
+        def _map(a: Tensor, out: Optional[Tensor] = None) -> Tensor:
+            if out is None:
+                out = a.zeros(a.shape)
+            tensor_map(fn)(
+                out._tensor._storage,
+                out._tensor._shape,
+                out._tensor._strides,
+                a._tensor._storage,
+                a._tensor._shape,
+                a._tensor._strides,
+            )
+            return out
+        return _map
+
+    @staticmethod
+    def cmap(fn: Callable[[float], float]) -> MapProto:
+        """See `tensor_ops.py`"""
+        def _cmap(a: Tensor, out: Optional[Tensor] = None) -> Tensor:
+            if out is None:
+                out = a.zeros(a.shape)
+            tensor_map(fn)(
+                out._tensor._storage,
+                out._tensor._shape,
+                out._tensor._strides,
+                a._tensor._storage,
+                a._tensor._shape,
+                a._tensor._strides,
+            )
+            return out
+        return _cmap

     @staticmethod
     def zip(fn: Callable[[float, float], float]) -> Callable[[Tensor, Tensor], Tensor]:
         """See `tensor_ops.py`"""
-        pass
+        def _zip(a: Tensor, b: Tensor) -> Tensor:
+            c_shape = shape_broadcast(a.shape, b.shape)
+            out = a.zeros(c_shape)
+            tensor_zip(fn)(
+                out._tensor._storage,
+                out._tensor._shape,
+                out._tensor._strides,
+                a._tensor._storage,
+                a._tensor._shape,
+                a._tensor._strides,
+                b._tensor._storage,
+                b._tensor._shape,
+                b._tensor._strides,
+            )
+            return out
+        return _zip

     @staticmethod
     def reduce(fn: Callable[[float, float], float], start: float=0.0) -> Callable[[Tensor, int], Tensor]:
         """See `tensor_ops.py`"""
-        pass
+        def _reduce(a: Tensor, dim: int) -> Tensor:
+            out_shape = list(a.shape)
+            out_shape[dim] = 1
+            out = a.zeros(tuple(out_shape))
+            tensor_reduce(fn)(
+                out._tensor._storage,
+                out._tensor._shape,
+                out._tensor._strides,
+                a._tensor._storage,
+                a._tensor._shape,
+                a._tensor._strides,
+                dim,
+            )
+            return out
+        return _reduce

     @staticmethod
     def matrix_multiply(a: Tensor, b: Tensor) -> Tensor:
@@ -53,7 +111,28 @@ class FastOps(TensorOps):
         Returns:
             New tensor data
         """
-        pass
+        # Setup shapes
+        ls = list(shape_broadcast(a.shape[:-2], b.shape[:-2]))
+        ls.append(a.shape[-2])
+        ls.append(b.shape[-1])
+        assert a.shape[-1] == b.shape[-2]
+
+        # Create output
+        out = a.zeros(tuple(ls))
+
+        # Call main function
+        tensor_matrix_multiply(
+            out._tensor._storage,
+            out._tensor._shape,
+            out._tensor._strides,
+            a._tensor._storage,
+            a._tensor._shape,
+            a._tensor._strides,
+            b._tensor._storage,
+            b._tensor._shape,
+            b._tensor._strides,
+        )
+        return out

 def tensor_map(fn: Callable[[float], float]) -> Callable[[Storage, Shape, Strides, Storage, Shape, Strides], None]:
     """
@@ -71,7 +150,28 @@ def tensor_map(fn: Callable[[float], float]) -> Callable[[Storage, Shape, Stride
     Returns:
         Tensor map function.
     """
-    pass
+    @njit(parallel=True)
+    def _map(out_storage: Storage,
+             out_shape: Shape,
+             out_strides: Strides,
+             in_storage: Storage,
+             in_shape: Shape,
+             in_strides: Strides) -> None:
+        # Check if the tensors are stride-aligned
+        if np.array_equal(out_strides, in_strides) and np.array_equal(out_shape, in_shape):
+            for i in prange(len(out_storage)):
+                out_storage[i] = fn(in_storage[i])
+        else:
+            # Create index buffers
+            out_index = np.zeros(len(out_shape), np.int32)
+            in_index = np.zeros(len(in_shape), np.int32)
+            for i in prange(len(out_storage)):
+                to_index(i, out_shape, out_index)
+                broadcast_index(out_index, out_shape, in_shape, in_index)
+                in_position = index_to_position(in_index, in_strides)
+                out_position = index_to_position(out_index, out_strides)
+                out_storage[out_position] = fn(in_storage[in_position])
+    return _map

 def tensor_zip(fn: Callable[[float, float], float]) -> Callable[[Storage, Shape, Strides, Storage, Shape, Strides, Storage, Shape, Strides], None]:
     """
@@ -90,7 +190,35 @@ def tensor_zip(fn: Callable[[float, float], float]) -> Callable[[Storage, Shape,
     Returns:
         Tensor zip function.
     """
-    pass
+    @njit(parallel=True)
+    def _zip(out_storage: Storage,
+             out_shape: Shape,
+             out_strides: Strides,
+             a_storage: Storage,
+             a_shape: Shape,
+             a_strides: Strides,
+             b_storage: Storage,
+             b_shape: Shape,
+             b_strides: Strides) -> None:
+        # Check if the tensors are stride-aligned
+        if (np.array_equal(out_strides, a_strides) and np.array_equal(out_strides, b_strides) and
+            np.array_equal(out_shape, a_shape) and np.array_equal(out_shape, b_shape)):
+            for i in prange(len(out_storage)):
+                out_storage[i] = fn(a_storage[i], b_storage[i])
+        else:
+            # Create index buffers
+            out_index = np.zeros(len(out_shape), np.int32)
+            a_index = np.zeros(len(a_shape), np.int32)
+            b_index = np.zeros(len(b_shape), np.int32)
+            for i in prange(len(out_storage)):
+                to_index(i, out_shape, out_index)
+                broadcast_index(out_index, out_shape, a_shape, a_index)
+                broadcast_index(out_index, out_shape, b_shape, b_index)
+                a_position = index_to_position(a_index, a_strides)
+                b_position = index_to_position(b_index, b_strides)
+                out_position = index_to_position(out_index, out_strides)
+                out_storage[out_position] = fn(a_storage[a_position], b_storage[b_position])
+    return _zip

 def tensor_reduce(fn: Callable[[float, float], float]) -> Callable[[Storage, Shape, Strides, Storage, Shape, Strides, int], None]:
     """
@@ -108,7 +236,32 @@ def tensor_reduce(fn: Callable[[float, float], float]) -> Callable[[Storage, Sha
     Returns:
         Tensor reduce function
     """
-    pass
+    @njit(parallel=True)
+    def _reduce(out_storage: Storage,
+                out_shape: Shape,
+                out_strides: Strides,
+                in_storage: Storage,
+                in_shape: Shape,
+                in_strides: Strides,
+                reduce_dim: int) -> None:
+        # Create index buffers
+        out_index = np.zeros(len(out_shape), np.int32)
+        in_index = np.zeros(len(in_shape), np.int32)
+        for i in prange(len(out_storage)):
+            to_index(i, out_shape, out_index)
+            # Setup initial
+            in_index[:] = out_index[:]
+            in_index[reduce_dim] = 0
+            in_position = index_to_position(in_index, in_strides)
+            reduced = in_storage[in_position]
+            # Reduce over dimension
+            for j in range(1, in_shape[reduce_dim]):
+                in_index[reduce_dim] = j
+                in_position = index_to_position(in_index, in_strides)
+                reduced = fn(reduced, in_storage[in_position])
+            out_position = index_to_position(out_index, out_strides)
+            out_storage[out_position] = reduced
+    return _reduce

 def _tensor_matrix_multiply(out: Storage, out_shape: Shape, out_strides: Strides, a_storage: Storage, a_shape: Shape, a_strides: Strides, b_storage: Storage, b_shape: Shape, b_strides: Strides) -> None:
     """
@@ -141,5 +294,39 @@ def _tensor_matrix_multiply(out: Storage, out_shape: Shape, out_strides: Strides
     Returns:
         None : Fills in `out`
     """
-    pass
+    # Get dimensions
+    a_batch = a_shape[0] if len(a_shape) > 2 else 1
+    b_batch = b_shape[0] if len(b_shape) > 2 else 1
+    batch = max(a_batch, b_batch)
+    m = a_shape[-2]
+    n = b_shape[-1]
+    p = a_shape[-1]
+
+    # Main loop in parallel
+    for b in prange(batch):
+        for i in range(m):
+            for j in range(n):
+                # Compute output position
+                out_pos = (
+                    (b if len(out_shape) > 2 else 0) * out_strides[0] if len(out_shape) > 2 else 0
+                ) + i * out_strides[-2] + j * out_strides[-1]
+
+                # Initialize accumulator
+                acc = 0.0
+
+                # Inner loop - matrix multiply
+                for k in range(p):
+                    # Compute positions in a and b
+                    a_pos = (
+                        (b if len(a_shape) > 2 else 0) * a_strides[0] if len(a_shape) > 2 else 0
+                    ) + i * a_strides[-2] + k * a_strides[-1]
+                    b_pos = (
+                        (b if len(b_shape) > 2 else 0) * b_strides[0] if len(b_shape) > 2 else 0
+                    ) + k * b_strides[-2] + j * b_strides[-1]
+
+                    # Multiply and accumulate
+                    acc += a_storage[a_pos] * b_storage[b_pos]
+
+                # Store result
+                out[out_pos] = acc
 tensor_matrix_multiply = njit(parallel=True, fastmath=True)(_tensor_matrix_multiply)
\ No newline at end of file
diff --git a/minitorch/scalar.py b/minitorch/scalar.py
index 8537995..e88d057 100644
--- a/minitorch/scalar.py
+++ b/minitorch/scalar.py
@@ -52,6 +52,14 @@ class Scalar:
     def __repr__(self) -> str:
         return 'Scalar(%f)' % self.data

+    def __hash__(self) -> int:
+        return hash(self.unique_id)
+
+    def __eq__(self, other: Any) -> bool:
+        if not isinstance(other, Scalar):
+            return False
+        return self.unique_id == other.unique_id
+
     def __mul__(self, b: ScalarLike) -> Scalar:
         return Mul.apply(self, b)

@@ -62,25 +70,25 @@ class Scalar:
         return Mul.apply(b, Inv.apply(self))

     def __add__(self, b: ScalarLike) -> Scalar:
-        raise NotImplementedError('Need to implement for Task 1.2')
+        return Add.apply(self, b)

     def __bool__(self) -> bool:
         return bool(self.data)

     def __lt__(self, b: ScalarLike) -> Scalar:
-        raise NotImplementedError('Need to implement for Task 1.2')
+        return LT.apply(self, b)

     def __gt__(self, b: ScalarLike) -> Scalar:
-        raise NotImplementedError('Need to implement for Task 1.2')
+        return LT.apply(b, self)

     def __eq__(self, b: ScalarLike) -> Scalar:
-        raise NotImplementedError('Need to implement for Task 1.2')
+        return EQ.apply(self, b)

     def __sub__(self, b: ScalarLike) -> Scalar:
-        raise NotImplementedError('Need to implement for Task 1.2')
+        return Add.apply(self, Neg.apply(b))

     def __neg__(self) -> Scalar:
-        raise NotImplementedError('Need to implement for Task 1.2')
+        return Neg.apply(self)

     def __radd__(self, b: ScalarLike) -> Scalar:
         return self + b
@@ -96,11 +104,35 @@ class Scalar:
         Args:
             x: value to be accumulated
         """
-        pass
+        if self.derivative is None:
+            self.derivative = 0.0
+        self.derivative += x

     def is_leaf(self) -> bool:
         """True if this variable created by the user (no `last_fn`)"""
-        pass
+        return self.history.last_fn is None
+
+    def chain_rule(self, d_output: float) -> Tuple[Tuple[Variable, float], ...]:
+        """
+        Implement the derivative chain-rule.
+
+        Args:
+            d_output (float): derivative of the output
+
+        Returns:
+            List of tuples of (variable, derivative), where each is the derivative of the output with respect to
+            one of the inputs.
+        """
+        if self.history.last_fn is None:
+            return ()
+        derivatives = self.history.last_fn.chain_rule(self.history.ctx, self.history.inputs, d_output)
+        return tuple((var, deriv) for var, deriv in zip(self.history.inputs, derivatives))
+
+    def parents(self) -> Iterable[Variable]:
+        """Get the parents of this variable."""
+        if self.history.last_fn is None:
+            return []
+        return self.history.inputs

     def backward(self, d_output: Optional[float]=None) -> None:
         """
@@ -110,7 +142,9 @@ class Scalar:
             d_output (number, opt): starting derivative to backpropagate through the model
                                    (typically left out, and assumed to be 1.0).
         """
-        pass
+        if d_output is None:
+            d_output = 1.0
+        backpropagate(self, d_output)

 def derivative_check(f: Any, *scalars: Scalar) -> None:
     """
@@ -121,4 +155,14 @@ def derivative_check(f: Any, *scalars: Scalar) -> None:
         f : function from n-scalars to 1-scalar.
         *scalars  : n input scalar values.
     """
-    pass
\ No newline at end of file
+    out = f(*scalars)
+    out.backward()
+
+    for i, scalar in enumerate(scalars):
+        if not scalar.is_leaf():
+            continue
+        numerical = central_difference(f, *scalars, arg=i)
+        assert abs(numerical - scalar.derivative) < 1e-3, (
+            f"Derivative check failed. Variable {i} has derivative {scalar.derivative} but "
+            f"numerical derivative is {numerical}."
+        )
\ No newline at end of file
diff --git a/minitorch/scalar_functions.py b/minitorch/scalar_functions.py
index d8dfe6f..f6d31a8 100644
--- a/minitorch/scalar_functions.py
+++ b/minitorch/scalar_functions.py
@@ -9,11 +9,15 @@ if TYPE_CHECKING:

 def wrap_tuple(x):
     """Turn a possible value into a tuple"""
-    pass
+    if isinstance(x, tuple):
+        return x
+    return (x,)

 def unwrap_tuple(x):
     """Turn a singleton tuple into a value"""
-    pass
+    if len(x) == 1:
+        return x[0]
+    return x

 class ScalarFunction:
     """
@@ -23,33 +27,163 @@ class ScalarFunction:
     This is a static class and is never instantiated. We use `class`
     here to group together the `forward` and `backward` code.
     """
+    @classmethod
+    def chain_rule(cls, ctx: Context, inputs: Tuple[Scalar, ...], d_output: float) -> Tuple[float, ...]:
+        """
+        Implements the chain rule for a function.
+
+        Args:
+            ctx: Context from running forward
+            inputs: Inputs to the function
+            d_output: Derivative of the output
+
+        Returns:
+            List of derivatives of the input
+        """
+        d_inputs = cls.backward(ctx, d_output)
+        return wrap_tuple(d_inputs)
+
+    @classmethod
+    def apply(cls, *vals: ScalarLike) -> Scalar:
+        """
+        Apply function forward to the arguments.
+
+        Args:
+            vals: Values for the function
+
+        Returns:
+            A new Variable with fn as operation.
+        """
+        raw_vals = []
+        scalars = []
+        for v in vals:
+            if isinstance(v, minitorch.scalar.Scalar):
+                scalars.append(v)
+                raw_vals.append(v.data)
+            else:
+                scalars.append(minitorch.scalar.Scalar(v))
+                raw_vals.append(v)
+
+        # Create the context.
+        ctx = Context()
+
+        # Call forward with the variables.
+        c = cls.forward(ctx, *raw_vals)
+        assert isinstance(c, float), "Expected return type float got %s" % (type(c))
+
+        # Create a new variable from the result with a new history.
+        back = minitorch.scalar.ScalarHistory(cls, ctx, scalars)
+        return minitorch.scalar.Scalar(c, back)

 class Add(ScalarFunction):
     """Addition function $f(x, y) = x + y$"""
+    @staticmethod
+    def forward(ctx: Context, a: float, b: float) -> float:
+        return a + b
+
+    @staticmethod
+    def backward(ctx: Context, d_output: float) -> Tuple[float, float]:
+        return d_output, d_output

 class Log(ScalarFunction):
     """Log function $f(x) = log(x)$"""
+    @staticmethod
+    def forward(ctx: Context, a: float) -> float:
+        ctx.save_for_backward(a)
+        return operators.log(a)
+
+    @staticmethod
+    def backward(ctx: Context, d_output: float) -> float:
+        (a,) = ctx.saved_values
+        return operators.log_back(a, d_output)

 class Mul(ScalarFunction):
     """Multiplication function"""
+    @staticmethod
+    def forward(ctx: Context, a: float, b: float) -> float:
+        ctx.save_for_backward(a, b)
+        return operators.mul(a, b)
+
+    @staticmethod
+    def backward(ctx: Context, d_output: float) -> Tuple[float, float]:
+        a, b = ctx.saved_values
+        return b * d_output, a * d_output

 class Inv(ScalarFunction):
     """Inverse function"""
+    @staticmethod
+    def forward(ctx: Context, a: float) -> float:
+        ctx.save_for_backward(a)
+        return operators.inv(a)
+
+    @staticmethod
+    def backward(ctx: Context, d_output: float) -> float:
+        (a,) = ctx.saved_values
+        return operators.inv_back(a, d_output)

 class Neg(ScalarFunction):
     """Negation function"""
+    @staticmethod
+    def forward(ctx: Context, a: float) -> float:
+        return operators.neg(a)
+
+    @staticmethod
+    def backward(ctx: Context, d_output: float) -> float:
+        return -d_output

 class Sigmoid(ScalarFunction):
     """Sigmoid function"""
+    @staticmethod
+    def forward(ctx: Context, a: float) -> float:
+        ctx.save_for_backward(a)
+        return operators.sigmoid(a)
+
+    @staticmethod
+    def backward(ctx: Context, d_output: float) -> float:
+        (a,) = ctx.saved_values
+        sig_a = operators.sigmoid(a)
+        return d_output * sig_a * (1 - sig_a)

 class ReLU(ScalarFunction):
     """ReLU function"""
+    @staticmethod
+    def forward(ctx: Context, a: float) -> float:
+        ctx.save_for_backward(a)
+        return operators.relu(a)
+
+    @staticmethod
+    def backward(ctx: Context, d_output: float) -> float:
+        (a,) = ctx.saved_values
+        return operators.relu_back(a, d_output)

 class Exp(ScalarFunction):
     """Exp function"""
+    @staticmethod
+    def forward(ctx: Context, a: float) -> float:
+        ctx.save_for_backward(a)
+        return operators.exp(a)
+
+    @staticmethod
+    def backward(ctx: Context, d_output: float) -> float:
+        (a,) = ctx.saved_values
+        return d_output * operators.exp(a)

 class LT(ScalarFunction):
     """Less-than function $f(x) =$ 1.0 if x is less than y else 0.0"""
+    @staticmethod
+    def forward(ctx: Context, a: float, b: float) -> float:
+        return operators.lt(a, b)
+
+    @staticmethod
+    def backward(ctx: Context, d_output: float) -> Tuple[float, float]:
+        return 0.0, 0.0

 class EQ(ScalarFunction):
-    """Equal function $f(x) =$ 1.0 if x is equal to y else 0.0"""
\ No newline at end of file
+    """Equal function $f(x) =$ 1.0 if x is equal to y else 0.0"""
+    @staticmethod
+    def forward(ctx: Context, a: float, b: float) -> float:
+        return operators.eq(a, b)
+
+    @staticmethod
+    def backward(ctx: Context, d_output: float) -> Tuple[float, float]:
+        return 0.0, 0.0
\ No newline at end of file
diff --git a/minitorch/tensor_functions.py b/minitorch/tensor_functions.py
index a2e29c9..e73d3fa 100644
--- a/minitorch/tensor_functions.py
+++ b/minitorch/tensor_functions.py
@@ -19,7 +19,35 @@ def wrap_tuple(x):
     pass

 class Function:
-    pass
+    """
+    Base class for function implementations.
+    """
+    @classmethod
+    def apply(cls, *vals: Tensor) -> Tensor:
+        """
+        Apply function forward to the arguments.
+
+        Args:
+            vals: input tensors
+
+        Returns:
+            output tensor
+        """
+        raw_vals = []
+        need_grad = False
+        for v in vals:
+            if v.requires_grad:
+                need_grad = True
+            raw_vals.append(v)
+
+        # Create the context.
+        ctx = Context(not need_grad)
+
+        # Call forward with the variables.
+        c = cls()
+        ret = c.forward(ctx, *raw_vals)
+
+        return ret

 class Neg(Function):
     pass
diff --git a/minitorch/tensor_ops.py b/minitorch/tensor_ops.py
index e5bb9eb..90cc0e8 100644
--- a/minitorch/tensor_ops.py
+++ b/minitorch/tensor_ops.py
@@ -84,7 +84,54 @@ class SimpleOps(TensorOps):
         Returns:
             new tensor data
         """
-        pass
+        def _map(a: Tensor, out: Optional[Tensor] = None) -> Tensor:
+            if out is None:
+                out = a.zeros(a.shape)
+            tensor_map(fn)(
+                a._tensor._storage,
+                a._tensor._shape,
+                a._tensor._strides,
+                out._tensor._storage,
+                out._tensor._shape,
+                out._tensor._strides,
+            )
+            return out
+        return _map
+
+    @staticmethod
+    def cmap(fn: Callable[[float], float]) -> MapProto:
+        """
+        Higher-order tensor map function that creates a new tensor ::
+
+          fn_map = map(fn)
+          out = fn_map(a)
+
+        Simple version::
+
+            for i:
+                for j:
+                    out[i, j] = fn(a[i, j])
+
+        Args:
+            fn: function from float-to-float to apply.
+            a (:class:`TensorData`): tensor to map over
+
+        Returns:
+            new tensor data
+        """
+        def _cmap(a: Tensor, out: Optional[Tensor] = None) -> Tensor:
+            if out is None:
+                out = a.zeros(a.shape)
+            tensor_map(fn)(
+                a._tensor._storage,
+                a._tensor._shape,
+                a._tensor._strides,
+                out._tensor._storage,
+                out._tensor._shape,
+                out._tensor._strides,
+            )
+            return out
+        return _cmap

     @staticmethod
     def zip(fn: Callable[[float, float], float]) -> Callable[['Tensor', 'Tensor'], 'Tensor']:
@@ -115,7 +162,22 @@ class SimpleOps(TensorOps):
         Returns:
             :class:`TensorData` : new tensor data
         """
-        pass
+        def _zip(a: Tensor, b: Tensor) -> Tensor:
+            c_shape = shape_broadcast(a.shape, b.shape)
+            out = a.zeros(c_shape)
+            tensor_zip(fn)(
+                a._tensor._storage,
+                a._tensor._shape,
+                a._tensor._strides,
+                b._tensor._storage,
+                b._tensor._shape,
+                b._tensor._strides,
+                out._tensor._storage,
+                out._tensor._shape,
+                out._tensor._strides,
+            )
+            return out
+        return _zip

     @staticmethod
     def reduce(fn: Callable[[float, float], float], start: float=0.0) -> Callable[['Tensor', int], 'Tensor']:
@@ -141,7 +203,56 @@ class SimpleOps(TensorOps):
         Returns:
             :class:`TensorData` : new tensor
         """
-        pass
+        def _reduce(a: Tensor, dim: int) -> Tensor:
+            out_shape = list(a.shape)
+            out_shape[dim] = 1
+            out = a.zeros(tuple(out_shape))
+            tensor_reduce(fn)(
+                a._tensor._storage,
+                a._tensor._shape,
+                a._tensor._strides,
+                out._tensor._storage,
+                out._tensor._shape,
+                out._tensor._strides,
+                dim,
+            )
+            return out
+        return _reduce
+
+    @staticmethod
+    def matrix_multiply(a: Tensor, b: Tensor) -> Tensor:
+        """
+        Batched matrix multiplication of two tensors.
+
+        Args:
+            a : batch1 x n x m tensor
+            b : batch2 x m x p tensor
+
+        Returns:
+            A tensor of size batch1 x batch2 x n x p
+        """
+        # Extract dimensions
+        batch1, n, m = a.shape
+        batch2, m2, p = b.shape
+        assert m == m2, f"Incompatible dimensions: {m} != {m2}"
+
+        # Create output tensor
+        out = a.zeros((batch1, batch2, n, p))
+
+        # Perform matrix multiplication
+        for i in range(batch1):
+            for j in range(batch2):
+                for k in range(n):
+                    for l in range(p):
+                        sum_val = 0.0
+                        for t in range(m):
+                            sum_val += a._tensor._storage[a._tensor._strides[0] * i + a._tensor._strides[1] * k + a._tensor._strides[2] * t] * \
+                                     b._tensor._storage[b._tensor._strides[0] * j + b._tensor._strides[1] * t + b._tensor._strides[2] * l]
+                        out_idx = out._tensor._strides[0] * i + out._tensor._strides[1] * j + out._tensor._strides[2] * k + out._tensor._strides[3] * l
+                        out._tensor._storage[out_idx] = sum_val
+
+        return out
+
     is_cuda = False

 def tensor_map(fn: Callable[[float], float]) -> Callable[[Storage, Shape, Strides, Storage, Shape, Strides], None]:
@@ -167,7 +278,21 @@ def tensor_map(fn: Callable[[float], float]) -> Callable[[Storage, Shape, Stride
     Returns:
         Tensor map function.
     """
-    pass
+    def _map(in_storage: Storage,
+             in_shape: Shape,
+             in_strides: Strides,
+             out_storage: Storage,
+             out_shape: Shape,
+             out_strides: Strides) -> None:
+        out_index = np.zeros(len(out_shape), np.int32)
+        in_index = np.zeros(len(in_shape), np.int32)
+        for i in range(len(out_storage)):
+            to_index(i, out_shape, out_index)
+            broadcast_index(out_index, out_shape, in_shape, in_index)
+            in_position = index_to_position(in_index, in_strides)
+            out_position = index_to_position(out_index, out_strides)
+            out_storage[out_position] = fn(in_storage[in_position])
+    return _map

 def tensor_zip(fn: Callable[[float, float], float]) -> Callable[[Storage, Shape, Strides, Storage, Shape, Strides, Storage, Shape, Strides], None]:
     """
@@ -192,7 +317,27 @@ def tensor_zip(fn: Callable[[float, float], float]) -> Callable[[Storage, Shape,
     Returns:
         Tensor zip function.
     """
-    pass
+    def _zip(a_storage: Storage,
+             a_shape: Shape,
+             a_strides: Strides,
+             b_storage: Storage,
+             b_shape: Shape,
+             b_strides: Strides,
+             out_storage: Storage,
+             out_shape: Shape,
+             out_strides: Strides) -> None:
+        out_index = np.zeros(len(out_shape), np.int32)
+        a_index = np.zeros(len(a_shape), np.int32)
+        b_index = np.zeros(len(b_shape), np.int32)
+        for i in range(len(out_storage)):
+            to_index(i, out_shape, out_index)
+            broadcast_index(out_index, out_shape, a_shape, a_index)
+            broadcast_index(out_index, out_shape, b_shape, b_index)
+            a_position = index_to_position(a_index, a_strides)
+            b_position = index_to_position(b_index, b_strides)
+            out_position = index_to_position(out_index, out_strides)
+            out_storage[out_position] = fn(a_storage[a_position], b_storage[b_position])
+    return _zip

 def tensor_reduce(fn: Callable[[float, float], float]) -> Callable[[Storage, Shape, Strides, Storage, Shape, Strides, int], None]:
     """
@@ -207,5 +352,28 @@ def tensor_reduce(fn: Callable[[float, float], float]) -> Callable[[Storage, Sha
     Returns:
         Tensor reduce function.
     """
-    pass
+    def _reduce(a_storage: Storage,
+                a_shape: Shape,
+                a_strides: Strides,
+                out_storage: Storage,
+                out_shape: Shape,
+                out_strides: Strides,
+                reduce_dim: int) -> None:
+        out_index = np.zeros(len(out_shape), np.int32)
+        a_index = np.zeros(len(a_shape), np.int32)
+        for i in range(len(out_storage)):
+            to_index(i, out_shape, out_index)
+            out_position = index_to_position(out_index, out_strides)
+            # Setup initial
+            a_index[:] = out_index[:]
+            a_index[reduce_dim] = 0
+            a_position = index_to_position(a_index, a_strides)
+            reduced = a_storage[a_position]
+            # Reduce over dimension
+            for j in range(1, a_shape[reduce_dim]):
+                a_index[reduce_dim] = j
+                a_position = index_to_position(a_index, a_strides)
+                reduced = fn(reduced, a_storage[a_position])
+            out_storage[out_position] = reduced
+    return _reduce
 SimpleBackend = TensorBackend(SimpleOps)
\ No newline at end of file
diff --git a/minitorch/testing.py b/minitorch/testing.py
index 72ab137..209bc02 100644
--- a/minitorch/testing.py
+++ b/minitorch/testing.py
@@ -7,84 +7,114 @@ class MathTest(Generic[A]):
     @staticmethod
     def neg(a: A) -> A:
         """Negate the argument"""
-        pass
+        return operators.neg(a)

     @staticmethod
     def addConstant(a: A) -> A:
         """Add contant to the argument"""
-        pass
+        return operators.add(a, 10.0)

     @staticmethod
     def square(a: A) -> A:
         """Manual square"""
-        pass
+        return operators.mul(a, a)

     @staticmethod
     def cube(a: A) -> A:
         """Manual cube"""
-        pass
+        return operators.mul(operators.mul(a, a), a)

     @staticmethod
     def subConstant(a: A) -> A:
         """Subtract a constant from the argument"""
-        pass
+        return operators.add(a, -5.0)

     @staticmethod
     def multConstant(a: A) -> A:
         """Multiply a constant to the argument"""
-        pass
+        return operators.mul(a, 5.0)

     @staticmethod
     def div(a: A) -> A:
         """Divide by a constant"""
-        pass
+        return operators.mul(a, 0.2)

     @staticmethod
     def inv(a: A) -> A:
         """Invert after adding"""
-        pass
+        return operators.inv(operators.add(a, 2.0))

     @staticmethod
     def sig(a: A) -> A:
         """Apply sigmoid"""
-        pass
+        return operators.sigmoid(a)

     @staticmethod
     def log(a: A) -> A:
         """Apply log to a large value"""
-        pass
+        return operators.log(operators.add(a, 100.0))

     @staticmethod
     def relu(a: A) -> A:
         """Apply relu"""
-        pass
+        return operators.relu(a)

     @staticmethod
     def exp(a: A) -> A:
         """Apply exp to a smaller value"""
-        pass
+        return operators.exp(operators.add(a, -100.0))

     @staticmethod
     def add2(a: A, b: A) -> A:
         """Add two arguments"""
-        pass
+        return operators.add(a, b)

     @staticmethod
     def mul2(a: A, b: A) -> A:
         """Mul two arguments"""
-        pass
+        return operators.mul(a, b)

     @staticmethod
     def div2(a: A, b: A) -> A:
         """Divide two arguments"""
-        pass
+        return operators.mul(a, operators.inv(b))

     @classmethod
     def _tests(cls) -> Tuple[Tuple[str, Callable[[A], A]], Tuple[str, Callable[[A, A], A]], Tuple[str, Callable[[Iterable[A]], A]]]:
         """
         Returns a list of all the math tests.
         """
-        pass
+        one_arg = [
+            ("neg", cls.neg),
+            ("addConstant", cls.addConstant),
+            ("square", cls.square),
+            ("cube", cls.cube),
+            ("subConstant", cls.subConstant),
+            ("multConstant", cls.multConstant),
+            ("div", cls.div),
+            ("inv", cls.inv),
+            ("sig", cls.sig),
+            ("log", cls.log),
+            ("relu", cls.relu),
+            ("exp", cls.exp),
+        ]
+        two_arg = [
+            ("add2", cls.add2),
+            ("mul2", cls.mul2),
+            ("div2", cls.div2),
+        ]
+        return tuple(one_arg), tuple(two_arg), tuple()

 class MathTestVariable(MathTest):
-    pass
\ No newline at end of file
+    @staticmethod
+    def _comp_testing() -> Tuple[Tuple[str, Callable[[A], A]], Tuple[str, Callable[[A, A], A]], Tuple[str, Callable[[Iterable[A]], A]]]:
+        """
+        Returns a list of all the comparison tests.
+        """
+        one_arg = []
+        two_arg = [
+            ("lt", operators.lt),
+            ("eq", operators.eq),
+            ("gt", lambda x, y: operators.lt(y, x)),
+        ]
+        return tuple(one_arg), tuple(two_arg), tuple()
\ No newline at end of file