Пример #1
0
                            num_filters=64,
                            num_channels=64,
                            fm_size=(1,1),
                            batch_size=batch_size,
                            border_mode="same")

# learning rate formula:
# r = 1 - 0.5*(ni/ntot)*(ni/ntot)
# ni = ith layer; ntot = number of layers


# layer_0
model_0=ConvAutoEncoder(layers=[layer_0_en, MaxPoolingSameSize(pool_size=(4,4)), layer_0_de])
out_0=model_0.fprop(images, corruption_level=corruption_level)
cost_0=mean_square_cost(out_0[-1], images)+L2_regularization(model_0.params, 0.005)
updates_0=gd_updates(cost=cost_0, params=model_0.params, method="sgd", learning_rate=0.1)

# layer_0 --> layer_1
model_0_to_1=FeedForward(layers=[layer_0_en, MaxPooling(pool_size=(2,2))]);
out_0_to_1=model_0_to_1.fprop(images);

# layer_1
model_1=ConvAutoEncoder(layers=[layer_1_en, MaxPoolingSameSize(pool_size=(2,2)), layer_1_de])
out_1=model_1.fprop(out_0_to_1[-1], corruption_level=corruption_level)
cost_1=mean_square_cost(out_1[-1], out_0_to_1[-1])+L2_regularization(model_1.params, 0.005)
updates_1=gd_updates(cost=cost_1, params=model_1.params, method="sgd", learning_rate=0.1)

# layer_1 --> layer_2
model_1_to_2=FeedForward(layers=[layer_1_en, MaxPooling(pool_size=(4,4))]);
out_1_to_2=model_1_to_2.fprop(images);
Пример #2
0
layer_4_de = SigmoidConvLayer(filter_size=(1, 1),
                              num_filters=128,
                              num_channels=128,
                              fm_size=(1, 1),
                              batch_size=batch_size,
                              border_mode="same")

# layer_0
model_0 = ConvAutoEncoder(
    layers=[layer_0_en,
            MaxPoolingSameSize(pool_size=(4, 4)), layer_0_de])
out_0 = model_0.fprop(images, corruption_level=corruption_level)
cost_0 = mean_square_cost(out_0[-1], images) + L2_regularization(
    model_0.params, 0.005)
updates_0 = gd_updates(cost=cost_0,
                       params=model_0.params,
                       method="sgd",
                       learning_rate=0.1)

# layer_0 --> layer_1
model_0_to_1 = FeedForward(
    layers=[layer_0_en, MaxPooling(pool_size=(4, 4))])
out_0_to_1 = model_0_to_1.fprop(images)

# layer_1
model_1 = ConvAutoEncoder(
    layers=[layer_1_en,
            MaxPoolingSameSize(pool_size=(2, 2)), layer_1_de])
out_1 = model_1.fprop(out_0_to_1[-1], corruption_level=corruption_level)
cost_1 = mean_square_cost(out_1[-1], out_0_to_1[-1]) + L2_regularization(
    model_1.params, 0.005)
updates_1 = gd_updates(cost=cost_1,
Пример #3
0
                         batch_size=batch_size,
                         border_mode="same")
                                                   
layer_4_de=SigmoidConvLayer(filter_size=(1,1),
                            num_filters=128,
                            num_channels=128,
                            fm_size=(1,1),
                            batch_size=batch_size,
                            border_mode="same")


# layer_0
model_0=ConvAutoEncoder(layers=[layer_0_en, MaxPoolingSameSize(pool_size=(4,4)), layer_0_de])
out_0=model_0.fprop(images, corruption_level=corruption_level)
cost_0=mean_square_cost(out_0[-1], images)+L2_regularization(model_0.params, 0.005)
updates_0=gd_updates(cost=cost_0, params=model_0.params, method="sgd", learning_rate=0.01, momentum=0.9, nesterov=True)

# layer_0 --> layer_1
model_0_to_1=FeedForward(layers=[layer_0_en, MaxPooling(pool_size=(4,4))]);
out_0_to_1=model_0_to_1.fprop(images);

# layer_1
model_1=ConvAutoEncoder(layers=[layer_1_en, MaxPoolingSameSize(pool_size=(2,2)), layer_1_de])
out_1=model_1.fprop(out_0_to_1[-1], corruption_level=corruption_level)
cost_1=mean_square_cost(out_1[-1], out_0_to_1[-1])+L2_regularization(model_1.params, 0.005)
updates_1=gd_updates(cost=cost_1, params=model_1.params, method="sgd", learning_rate=0.01, momentum=0.9, nesterov=True)

# layer_1 --> layer_2
model_1_to_2=FeedForward(layers=[layer_1_en, MaxPooling(pool_size=(2,2))]);
out_1_to_2=model_1_to_2.fprop(images);
Пример #4
0
#                          num_filters=50,
#                          num_channels=50,
#                          fm_size=(18,18),
#                          batch_size=batch_size)
                                                   
# layer_3_de=SigmoidConvLayer(filter_size=(3,3),
#                             num_filters=50,
#                             num_channels=50,
#                             fm_size=(16,16),
#                             batch_size=batch_size,
#                             border_mode="full")

model_0=ConvAutoEncoder(layers=[layer_0_en, layer_0_de])
out_0=model_0.fprop(images, corruption_level=corruption_level)
cost_0=mean_square_cost(out_0[-1], images)+L2_regularization(model_0.params, 0.005)
updates_0=gd_updates(cost=cost_0, params=model_0.params, method="sgd", learning_rate=0.1)

## append a max-pooling layer

model_trans=FeedForward(layers=[layer_0_en, MaxPooling(pool_size=(2,2))]);
out_trans=model_trans.fprop(images);


model_1=ConvAutoEncoder(layers=[layer_1_en, layer_1_de])
out_1=model_1.fprop(out_trans[-1], corruption_level=corruption_level)
cost_1=mean_square_cost(out_1[-1], out_trans[-1])+L2_regularization(model_1.params, 0.005)
updates_1=gd_updates(cost=cost_1, params=model_1.params, method="sgd", learning_rate=0.1)

# model_2=ConvAutoEncoder(layers=[layer_2_en, layer_2_de])
# out_2=model_2.fprop(out_1[0], corruption_level=corruption_level)
# cost_2=mean_square_cost(out_2[-1], out_1[0])+L2_regularization(model_2.params, 0.005)
Пример #5
0
#                          fm_size=(18,18),
#                          batch_size=batch_size)

# layer_3_de=SigmoidConvLayer(filter_size=(3,3),
#                             num_filters=50,
#                             num_channels=50,
#                             fm_size=(16,16),
#                             batch_size=batch_size,
#                             border_mode="full")

model_0 = ConvAutoEncoder(layers=[layer_0_en, layer_0_de])
out_0 = model_0.fprop(images, corruption_level=corruption_level)
cost_0 = mean_square_cost(out_0[-1], images) + L2_regularization(
    model_0.params, 0.005)
updates_0 = gd_updates(cost=cost_0,
                       params=model_0.params,
                       method="sgd",
                       learning_rate=0.1)

## append a max-pooling layer

model_trans = FeedForward(
    layers=[layer_0_en, MaxPooling(pool_size=(2, 2))])
out_trans = model_trans.fprop(images)

model_1 = ConvAutoEncoder(layers=[layer_1_en, layer_1_de])
out_1 = model_1.fprop(out_trans[-1], corruption_level=corruption_level)
cost_1 = mean_square_cost(out_1[-1], out_trans[-1]) + L2_regularization(
    model_1.params, 0.005)
updates_1 = gd_updates(cost=cost_1,
                       params=model_1.params,
                       method="sgd",
Пример #6
0
pool_1 = MaxPooling(pool_size=(2, 2))

flattener = Flattener()

layer_2 = ReLULayer(in_dim=32 * 64, out_dim=800)

layer_3 = SoftmaxLayer(in_dim=800, out_dim=10)

model = FeedForward(
    layers=[layer_0, pool_0, layer_1, pool_1, flattener, layer_2, layer_3])

out = model.fprop(images)
cost = categorical_cross_entropy_cost(out[-1], y)
updates = gd_updates(cost=cost,
                     params=model.params,
                     method="sgd",
                     learning_rate=0.01,
                     momentum=0.9)

extract = theano.function(
    inputs=[idx],
    outputs=layer_0.apply(images),
    givens={X: train_set_x[idx * batch_size:(idx + 1) * batch_size]})
print extract(1).shape

train = theano.function(
    inputs=[idx],
    outputs=cost,
    updates=updates,
    givens={
        X: train_set_x[idx * batch_size:(idx + 1) * batch_size],
Пример #7
0
pool_1=MaxPooling(pool_size=(2,2));

flattener=Flattener();

layer_2=ReLULayer(in_dim=32*64,
                  out_dim=800);
                  
layer_3=SoftmaxLayer(in_dim=800,
                     out_dim=10);
                     
model=FeedForward(layers=[layer_0, pool_0, layer_1, pool_1, flattener, layer_2, layer_3]);

out=model.fprop(images);
cost=categorical_cross_entropy_cost(out[-1], y);
updates=gd_updates(cost=cost, params=model.params, method="sgd", learning_rate=0.01, momentum=0.9);

extract=theano.function(inputs=[idx],
                        outputs=layer_0.apply(images),
                        givens={X: train_set_x[idx * batch_size: (idx + 1) * batch_size]});
print extract(1).shape


train=theano.function(inputs=[idx],
                      outputs=cost,
                      updates=updates,
                      givens={X: train_set_x[idx * batch_size: (idx + 1) * batch_size],
                              y: train_set_y[idx * batch_size: (idx + 1) * batch_size]});

test=theano.function(inputs=[idx],
                     outputs=model.layers[-1].error(out[-1], y),