Beispiel #1
0
def test_two_args(fn, ts):
    t1, t2 = ts
    t3 = fn[1](t1, t2)
    for ind in t3._tensor.indices():
        assert (
            t3[ind] == fn[1](minitorch.Scalar(t1[ind]), minitorch.Scalar(t2[ind])).data
        )
def test_two_args(fn, backend, data):
    "Run forward for all two arg functions above."
    t1, t2 = data.draw(shaped_tensors(2, backend=backend))
    t3 = fn[1](t1, t2)
    for ind in t3._tensor.indices():
        assert (t3[ind] == fn[1](minitorch.Scalar(t1[ind]),
                                 minitorch.Scalar(t2[ind])).data)
Beispiel #3
0
def build_expression(code):
    out = eval(
        code,
        {
            "x": minitorch.Scalar(1.0, name="x"),
            "y": minitorch.Scalar(1.0, name="y"),
            "z": minitorch.Scalar(1.0, name="z"),
        },
    )
    out.name = "out"
    return out
Beispiel #4
0
 def __init__(self, in_size, out_size):
     super().__init__()
     self.weights = []
     self.bias = []
     for i in range(in_size):
         self.weights.append([])
         for j in range(out_size):
             self.weights[i].append(
                 self.add_parameter(
                     f"weight_{i}_{j}",
                     minitorch.Scalar(2 * (random.random() - 0.5))))
     for j in range(out_size):
         self.bias.append(
             self.add_parameter(
                 f"bias_{j}",
                 minitorch.Scalar(2 * (random.random() - 0.5))))
Beispiel #5
0
def test_chain_rule4():
    var1 = minitorch.Scalar(5)
    var2 = minitorch.Scalar(10)

    ctx = minitorch.Context()
    Function2.forward(ctx, var1.data, var2.data)

    back = Function2.chain_rule(ctx=ctx, inputs=[var1, var2], d_output=5)
    back = list(back)
    assert len(back) == 2
    variable, deriv = back[0]
    assert variable.name == var1.name
    assert deriv == 5 * (10 + 1)
    variable, deriv = back[1]
    assert variable.name == var2.name
    assert deriv == 5 * 5
Beispiel #6
0
def test_backprop2():
    # Example 2: F1(0, 0)
    var = minitorch.Scalar(0)
    var2 = Function1.apply(0, var)
    var3 = Function1.apply(0, var2)
    var3.backward(d_output=5)
    assert var.derivative == 5
Beispiel #7
0
def test_backprop3():
    # Example 3: F1(F1(0, v1), F1(0, v1))
    var1 = minitorch.Scalar(0)
    var2 = Function1.apply(0, var1)
    var3 = Function1.apply(0, var1)
    var4 = Function1.apply(var2, var3)
    var4.backward(d_output=5)
    assert var1.derivative == 10
Beispiel #8
0
    def train(self, data, learning_rate, max_epochs=500, log_fn=default_log_fn):
        self.learning_rate = learning_rate
        self.max_epochs = max_epochs
        self.model = Network(self.hidden_layers)
        optim = minitorch.SGD(self.model.parameters(), learning_rate)

        losses = []
        for epoch in range(1, self.max_epochs + 1):
            total_loss = 0.0
            correct = 0
            optim.zero_grad()

            # Forward
            loss = 0
            for i in range(data.N):
                x_1, x_2 = data.X[i]
                y = data.y[i]
                x_1 = minitorch.Scalar(x_1)
                x_2 = minitorch.Scalar(x_2)
                out = self.model.forward((x_1, x_2))

                if y == 1:
                    prob = out
                    correct += 1 if out.data > 0.5 else 0
                else:
                    prob = -out + 1.0
                    correct += 1 if out.data < 0.5 else 0
                loss = -prob.log()
                (loss / data.N).backward()
                total_loss += loss.data

            losses.append(total_loss)

            # Update
            optim.step()

            # Logging
            if epoch % 10 == 0 or epoch == max_epochs:
                log_fn(epoch, total_loss, correct, losses)
Beispiel #9
0
def test_chain_rule3():
    "Check that constrants are ignored and variables get derivatives."
    constant = 10
    var = minitorch.Scalar(5)

    ctx = minitorch.Context()
    Function2.forward(ctx, constant, var.data)

    back = Function2.chain_rule(ctx=ctx, inputs=[constant, var], d_output=5)
    back = list(back)
    assert len(back) == 1
    variable, deriv = back[0]
    assert variable.name == var.name
    assert deriv == 5 * 10
Beispiel #10
0
def test_one_args(fn, t1):
    t2 = fn[1](t1)
    for ind in t2._tensor.indices():
        assert_close(t2[ind], fn[1](minitorch.Scalar(t1[ind])).data)
Beispiel #11
0
                y[j] = y[j] + x * self.weights[i][j].value
        return y


model = Network()
data = DATASET
losses = []
for epoch in range(500):
    total_loss = 0.0
    correct = 0

    # Forward
    for i in range(data.N):
        x_1, x_2 = data.X[i]
        y = data.y[i]
        x_1 = minitorch.Scalar(x_1)
        x_2 = minitorch.Scalar(x_2)
        out = model.forward((x_1, x_2))

        if y == 1:
            prob = out
            correct += 1 if out.data > 0.5 else 0
        else:
            prob = -out + 1.0
            correct += 1 if out.data < 0.5 else 0

        loss = -prob.log()
        loss.backward()
        total_loss += loss.data

    # Update
Beispiel #12
0
 def run_one(self, x):
     return self.model.forward(
         (minitorch.Scalar(x[0], name="x_1"), minitorch.Scalar(x[1], name="x_2"))
     )
def expression():
    x = minitorch.Scalar(10, name="x")
    y = (x + 10.0) * 20
    y.name = "y"
    return y
def scalars(draw, min_value=-100000, max_value=100000):
    val = draw(floats(min_value=min_value, max_value=max_value))
    return minitorch.Scalar(val)
def render_math_sandbox(use_scalar=False):
    st.write("# Sandbox for the Math Functions")

    if use_scalar:
        one, two, red = MathTestVariable._tests()
    else:
        one, two, red = MathTest._tests()
    f_type = st.selectbox("Function Type", ["One Arg", "Two Arg", "Reduce"])
    select = {"One Arg": one, "Two Arg": two, "Reduce": red}

    fn = st.selectbox("Function", select[f_type], format_func=lambda a: a[0])
    name, _, scalar = fn
    if f_type == "One Arg":
        st.write("### " + name)
        render_function(scalar)
        st.write("Function f(x)")
        xs = [((x / 1.0) - 50.0 + 1e-5) for x in range(1, 100)]
        if use_scalar:
            ys = [scalar(minitorch.Scalar(p)).data for p in xs]
        else:
            ys = [scalar(p) for p in xs]
        scatter = go.Scatter(mode="lines", x=xs, y=ys)
        fig = go.Figure(scatter)
        st.write(fig)

        if use_scalar:
            st.write("Derivative f'(x)")
            x_var = [minitorch.Scalar(x) for x in xs]
            for x in x_var:
                out = scalar(x)
                out.backward()
            scatter = go.Scatter(mode="lines",
                                 x=xs,
                                 y=[x.derivative for x in x_var])
            fig = go.Figure(scatter)
            st.write(fig)
            G = graph_builder.GraphBuilder().run(out)
            G.graph["graph"] = {"rankdir": "LR"}
            st.graphviz_chart(nx.nx_pydot.to_pydot(G).to_string())

    if f_type == "Two Arg":

        st.write("### " + name)
        render_function(scalar)
        st.write("Function f(x, y)")
        xs = [((x / 1.0) - 50.0 + 1e-5) for x in range(1, 100)]
        ys = [((x / 1.0) - 50.0 + 1e-5) for x in range(1, 100)]
        if use_scalar:
            zs = [[
                scalar(minitorch.Scalar(x), minitorch.Scalar(y)).data
                for x in xs
            ] for y in ys]
        else:
            zs = [[scalar(x, y) for x in xs] for y in ys]
        scatter = go.Surface(x=xs, y=ys, z=[zs for y in ys])

        fig = go.Figure(scatter)
        st.write(fig)

        if use_scalar:
            a, b = [], []
            for x in xs:
                oa, ob = [], []
                for y in ys:
                    x1 = minitorch.Scalar(x)
                    y1 = minitorch.Scalar(y)
                    out = scalar(x1, y1)
                    out.backward()
                    oa.append((x, y, x1.derivative))
                    ob.append((x, y, y1.derivative))
                a.append(oa)
                b.append(ob)
            st.write("Derivative f'_x(x, y)")

            scatter = go.Surface(
                x=[[c[0] for c in a2] for a2 in a],
                y=[[c[1] for c in a2] for a2 in a],
                z=[[c[2] for c in a2] for a2 in a],
            )
            fig = go.Figure(scatter)
            st.write(fig)
            st.write("Derivative f'_y(x, y)")
            scatter = go.Surface(
                x=[[c[0] for c in a2] for a2 in b],
                y=[[c[1] for c in a2] for a2 in b],
                z=[[c[2] for c in a2] for a2 in b],
            )
            fig = go.Figure(scatter)
            st.write(fig)
    if f_type == "Reduce":
        st.write("### " + name)
        render_function(scalar)
        xs = [((x / 1.0) - 50.0 + 1e-5) for x in range(1, 100)]
        ys = [((x / 1.0) - 50.0 + 1e-5) for x in range(1, 100)]

        scatter = go.Surface(x=xs,
                             y=ys,
                             z=[[scalar([x, y]) for x in xs] for y in ys])
        fig = go.Figure(scatter)
        st.write(fig)
Beispiel #16
0
def expression():
    x = minitorch.Scalar(1., name="x")
    y = minitorch.Scalar(1., name="y")
    z = (x * x) * y + 10. * x
    z.name = "z"
    return z
def test_one_args(fn, backend, data):
    "Run forward for all one arg functions above."
    t1 = data.draw(tensors(backend=backend))
    t2 = fn[1](t1)
    for ind in t2._tensor.indices():
        assert_close(t2[ind], fn[1](minitorch.Scalar(t1[ind])).data)