Ejemplo n.º 1
0
def test_auxiliary_cost_node():
    network = tn.HyperparameterNode(
        "hp",
        tn.SequentialNode("seq", [
            tn.InputNode("x", shape=(3, 4, 5)),
            tn.AuxiliaryCostNode(
                "cost1", {"target": tn.InputNode("y1", shape=(3, 4, 5))}),
            tn.AddConstantNode("a1", value=2),
            tn.AuxiliaryCostNode(
                "cost2", {"target": tn.InputNode("y2", shape=(3, 4, 5))}),
            tn.MultiplyConstantNode("m1", value=2),
            tn.AuxiliaryCostNode(
                "cost3", {"target": tn.InputNode("y3", shape=(3, 4, 5))}),
            tn.ConstantNode("const", value=0),
            tn.InputElementwiseSumNode("cost")
        ]),
        cost_reference="cost",
        cost_function=treeano.utils.squared_error,
    ).network()
    fn = network.function(["x", "y1", "y2", "y3"], ["cost"])
    x = np.random.rand(3, 4, 5).astype(fX)
    ys = [np.random.rand(3, 4, 5).astype(fX) for _ in range(3)]

    def mse(x, y):
        return ((x - y)**2).mean()

    expected_output = (mse(x, ys[0]) + mse(x + 2, ys[1]) +
                       mse(2 * (x + 2), ys[2]))
    np.testing.assert_allclose(fn(x, *ys)[0], expected_output, rtol=1e-5)
Ejemplo n.º 2
0
 def test_same(input_shape, local_sizes, strides, pads, ignore_border):
     res = tn.downsample.pool_output_shape(
         input_shape,
         (2, 3),
         local_sizes,
         strides,
         pads,
         ignore_border,
     )
     # pool2d node assumes 0 padding
     assert pads == (0, 0)
     # pool2d node assumes ignoring border
     assert ignore_border
     network = tn.SequentialNode("s", [
         tn.ConstantNode("c",
                         value=np.random.randn(*input_shape).astype(fX)),
         tn.CustomPool2DNode(
             "p",
             pool_function=T.mean,
             pool_size=local_sizes,
             stride=strides,
         )
     ]).network()
     ans = network["p"].get_vw("default").variable.shape.eval()
     print(ans, res)
     np.testing.assert_equal(ans, res)
Ejemplo n.º 3
0
def test_update_hyperparameters():
    network1 = tn.ConstantNode("c", value=1).network()

    fn1 = network1.function([], ["c"])
    np.testing.assert_equal(fn1()[0], 1)
    fn2 = canopy.handlers.handled_fn(
        network1, [canopy.handlers.update_hyperparameters("c", value=2)], {},
        {"out": "c"})
    np.testing.assert_equal(fn2({})["out"], 2)
Ejemplo n.º 4
0
def test_override_hyperparameters3():
    # testing that canopy.handlers.override_hyperparameters overrides
    # previously set override_hyperparameters
    x1 = np.array(3, dtype=fX)
    x2 = np.array(2, dtype=fX)
    network = tn.ConstantNode("c").network(override_hyperparameters=dict(
        value=x1))

    fn1 = network.function([], ["c"])
    np.testing.assert_equal(fn1()[0], x1)
    fn2 = canopy.handlers.handled_fn(
        network, [canopy.handlers.override_hyperparameters(value=x2)], {},
        {"out": "c"})
    np.testing.assert_equal(fn2({})["out"], x2)
Ejemplo n.º 5
0
def test_scale_hyperparameter():
    network = tn.HyperparameterNode(
        "hp",
        eb.ScaleHyperparameterNode("scale", tn.ConstantNode("c")),
        value=42.0,
        hyperparameter="value",
        start_percent=0.,
        end_percent=1.0,
        start_scale=1.0,
        end_scale=0.1,
        expected_batches=2,
    ).network()

    fn = network.function([], ["c"], include_updates=True)

    np.testing.assert_allclose(42.0, fn()[0], rtol=1e-5)
    np.testing.assert_allclose(42.0 * 0.55, fn()[0], rtol=1e-5)
    np.testing.assert_allclose(42.0 * 0.1, fn()[0], rtol=1e-5)
    np.testing.assert_allclose(42.0 * 0.1, fn()[0], rtol=1e-5)
Ejemplo n.º 6
0
def test_auxiliary_dense_softmax_cce_node():
    network = tn.SequentialNode("seq", [
        tn.InputNode("in", shape=(3, 5)),
        auxiliary_costs.AuxiliaryDenseSoftmaxCCENode(
            "aux",
            {"target": tn.ConstantNode("target", value=np.eye(3).astype(fX))},
            num_units=3,
            cost_reference="foo"),
        tn.IdentityNode("i"),
        tn.InputElementwiseSumNode("foo", ignore_default_input=True)
    ]).network()
    x = np.random.randn(3, 5).astype(fX)
    fn = network.function(["in"], ["i", "foo", "aux_dense"])
    res = fn(x)
    np.testing.assert_equal(res[0], x)
    loss = T.nnet.categorical_crossentropy(
        np.ones((3, 3), dtype=fX) / 3.0,
        np.eye(3).astype(fX),
    ).mean().eval()
    np.testing.assert_allclose(res[1], loss)
Ejemplo n.º 7
0
def test_auxiliary_contraction_penalty_node():
    # testing that both contraction penalty versions return the same thing
    network = tn.SequentialNode(
        "s",
        [tn.InputNode("i", shape=(10, 3)),
         cp.AuxiliaryContractionPenaltyNode(
             "acp",
             tn.DenseNode("d", num_units=9),
             cost_reference="sum"),
         cp.ElementwiseContractionPenaltyNode("cp", input_reference="i"),
         tn.AggregatorNode("a"),
         # zero out rest of network, so that value of sum is just value from
         # auxiliary contraction pentalty node
         tn.ConstantNode("foo", value=0),
         tn.InputElementwiseSumNode("sum")]
    ).network()
    fn = network.function(["i"], ["sum", "a"])
    x = np.random.rand(10, 3).astype(fX)
    res = fn(x)
    np.testing.assert_equal(res[0], res[1])
Ejemplo n.º 8
0
def test_auxiliary_kl_sparsity_penalty_node():
    # testing that both sparsity penalty versions return the same thing
    network = tn.HyperparameterNode(
        "hp",
        tn.SequentialNode(
            "s",
            [
                tn.InputNode("i", shape=(10, 3)),
                tn.DenseNode("d", num_units=9),
                sp.AuxiliaryKLSparsityPenaltyNode("scp", cost_reference="sum"),
                sp.ElementwiseKLSparsityPenaltyNode("sp"),
                tn.AggregatorNode("a"),
                # zero out rest of network, so that value of sum is just the value
                # from auxiliary sparsity pentalty node
                tn.ConstantNode("foo", value=0),
                tn.InputElementwiseSumNode("sum")
            ]),
        sparsity=0.1,
    ).network()
    fn = network.function(["i"], ["sum", "a"])
    x = np.random.rand(10, 3).astype(fX)
    res = fn(x)
    np.testing.assert_equal(res[0], res[1])
Ejemplo n.º 9
0
        shape = network.find_hyperparameter(["shape"])
        network.create_vw(
            "default",
            is_shared=True,
            shape=shape,
            tags={"parameter"},
            default_inits=[],
        )


def reward_fn(x):
    return -T.sqr(x - 3.5).sum(axis=1) + 100


graph = tn.GraphNode("graph", [[
    tn.ConstantNode("state", value=T.zeros((1, 1))),
    ConstantStateNode("mu", shape=(1, 1)),
    tn.ConstantNode("sigma", value=1.),
    REINFORCE.NormalSampleNode("sampled"),
    tn.ApplyNode("reward", fn=reward_fn, shape_fn=lambda x: x[:1]),
    REINFORCE.NormalREINFORCECostNode("REINFORCE")
],
                               [{
                                   "from": "mu",
                                   "to": "sampled",
                                   "to_key": "mu"
                               }, {
                                   "from": "sigma",
                                   "to": "sampled",
                                   "to_key": "sigma"
                               }, {
Ejemplo n.º 10
0
            shape=(),
        )
        baseline_reward = 100
        network.create_vw(
            "default",
            variable=reward + baseline_reward,
            shape=(state_vw.shape[0], ),
            tags={"output"},
        )


BATCH_SIZE = 64
graph = tn.GraphNode("graph", [[
    tn.InputNode("state", shape=(BATCH_SIZE, 10)),
    tn.DenseNode("mu", num_units=2),
    tn.ConstantNode("sigma", value=1.),
    REINFORCE.NormalSampleNode("sampled"),
    RewardNode("reward"),
    REINFORCE.NormalREINFORCECostNode("REINFORCE")
],
                               [{
                                   "from": "state",
                                   "to": "mu"
                               }, {
                                   "from": "mu",
                                   "to": "sampled",
                                   "to_key": "mu"
                               }, {
                                   "from": "sigma",
                                   "to": "sampled",
                                   "to_key": "sigma"