Example #1
0
def test_batch_normalization_node():
    network = tn.AdamNode(
        "adam", {
            "subtree":
            tn.SequentialNode("seq", [
                tn.InputNode("x", shape=(None, 10)),
                batch_normalization.BatchNormalizationNode("bn"),
                tn.DenseNode("d", num_units=1),
            ]),
            "cost":
            tn.TotalCostNode(
                "cost", {
                    "target": tn.InputNode("y", shape=(None, 1)),
                    "pred": tn.ReferenceNode("pred_ref", reference="d"),
                },
                cost_function=treeano.utils.squared_error)
        }).network()

    fn = network.function(["x", "y"], ["cost"], include_updates=True)

    x = 100 + 100 * np.random.randn(100, 10).astype(fX)
    y = np.random.randn(100, 1).astype(fX)
    prev_cost = fn(x, y)[0]
    for _ in range(3):
        cost = fn(x, y)[0]
        assert cost < prev_cost
        prev_cost = cost
Example #2
0
def GradualBatchNormalization(name, **kwargs):
    from treeano.sandbox.nodes import batch_normalization as bn
    return tn.HyperparameterNode(
        name,
        LinearInterpolationNode(
            name + "_interpolate", {
                "early": bn.BatchNormalizationNode(name + "_bn"),
                "late": tn.IdentityNode(name + "_identity")
            }), **kwargs)
Example #3
0
# also rescaling to [0, 1] instead of [0, 255]
X = mnist['data'].reshape(-1, 1, 28, 28).astype(fX) / 255.0
y = mnist['target'].astype("int32")
X_train, X_valid, y_train, y_valid = sklearn.cross_validation.train_test_split(
    X, y, random_state=42)
in_train = {"x": X_train, "y": y_train}
in_valid = {"x": X_valid, "y": y_valid}

# ############################## prepare model ##############################
model = tn.HyperparameterNode(
    "model",
    tn.SequentialNode("seq", [
        tn.InputNode("x", shape=(None, 1, 28, 28)),
        inception.InceptionNode("i1"),
        tn.DnnMaxPoolNode("mp1"),
        bn.BatchNormalizationNode("bn1"),
        inception.InceptionNode("i2"),
        tn.DnnMaxPoolNode("mp2"),
        bn.BatchNormalizationNode("bn2"),
        tn.DenseNode("fc1"),
        tn.ReLUNode("relu3"),
        tn.DenseNode("fc2", num_units=10),
        tn.SoftmaxNode("pred"),
    ]),
    num_filters_1x1=32,
    num_filters_3x3reduce=16,
    num_filters_3x3=32,
    num_filters_5x5reduce=16,
    num_filters_5x5=32,
    num_filters_poolproj=32,
    pool_size=(2, 2),
Example #4
0
# ############################### prepare data ###############################

import du
train, valid = du.tasks.image_tasks.svhn(fX)

# ############################## prepare model ##############################

# - the batch size can be provided as `None` to make the network
#   work for multiple different batch sizes
model = tn.HyperparameterNode(
    "model",
    tn.SequentialNode("seq", [
        tn.InputNode("x", shape=(None, 3, 32, 32)),
        tn.Conv2DWithBiasNode("conv1a"),
        bn.BatchNormalizationNode("bn1a"),
        tn.ReLUNode("relu1a"),
        tn.Conv2DWithBiasNode("conv1"),
        bn.BatchNormalizationNode("bn1"),
        tn.MaxPool2DNode("mp1"),
        tn.ReLUNode("relu1"),
        tn.Conv2DWithBiasNode("conv2a"),
        bn.BatchNormalizationNode("bn2a"),
        tn.ReLUNode("relu2a"),
        tn.Conv2DWithBiasNode("conv2"),
        bn.BatchNormalizationNode("bn2"),
        tn.ReLUNode("relu2"),
        tn.MaxPool2DNode("mp2"),
        tn.DenseNode("fc1"),
        bn.BatchNormalizationNode("bn3"),
        tn.ReLUNode("relu3"),