Пример #1
0
                              border_mode="same")

# layer_0
model_0 = ConvAutoEncoder(
    layers=[layer_0_en,
            MaxPoolingSameSize(pool_size=(4, 4)), layer_0_de])
out_0 = model_0.fprop(images, corruption_level=corruption_level)
cost_0 = mean_square_cost(out_0[-1], images) + L2_regularization(
    model_0.params, 0.005)
updates_0 = gd_updates(cost=cost_0,
                       params=model_0.params,
                       method="sgd",
                       learning_rate=0.1)

# layer_0 --> layer_1
model_0_to_1 = FeedForward(
    layers=[layer_0_en, MaxPooling(pool_size=(4, 4))])
out_0_to_1 = model_0_to_1.fprop(images)

# layer_1
model_1 = ConvAutoEncoder(
    layers=[layer_1_en,
            MaxPoolingSameSize(pool_size=(2, 2)), layer_1_de])
out_1 = model_1.fprop(out_0_to_1[-1], corruption_level=corruption_level)
cost_1 = mean_square_cost(out_1[-1], out_0_to_1[-1]) + L2_regularization(
    model_1.params, 0.005)
updates_1 = gd_updates(cost=cost_1,
                       params=model_1.params,
                       method="sgd",
                       learning_rate=0.1)

# layer_1 --> layer_2
Пример #2
0
    print 'Training epoch %d, cost ' % epoch, np.mean(c_0), str(corr_best[0][0]), min_cost[0], max_iter[0]
    print '                        ', np.mean(c_1), str(corr_best[1][0]), min_cost[1], max_iter[1]
    print '                        '  , np.mean(c_2), str(corr_best[2][0]), min_cost[2], max_iter[2]
    print '                        ' , np.mean(c_3), str(corr_best[3][0]), min_cost[3], max_iter[3]

print "[MESSAGE] The model is trained"

################################## BUILD SUPERVISED MODEL #######################################
                 
flattener=Flattener()
layer_5=ReLULayer(in_dim=50*16*16,
              out_dim=1000)
layer_6=SoftmaxLayer(in_dim=1000,
                 out_dim=10)

model_sup=FeedForward(layers=[layer_0_en, layer_1_en, layer_2_en, layer_3_en, flattener, layer_5, layer_6])

out_sup=model_sup.fprop(images)
cost_sup=categorical_cross_entropy_cost(out_sup[-1], y)
updates=gd_updates(cost=cost_sup, params=model_sup.params, method="sgd", learning_rate=0.1)

train_sup=theano.function(inputs=[idx],
                          outputs=cost_sup,
                          updates=updates,
                          givens={X: train_set_x[idx * batch_size: (idx + 1) * batch_size],
                                  y: train_set_y[idx * batch_size: (idx + 1) * batch_size]})

test_sup=theano.function(inputs=[idx],
                         outputs=model_sup.layers[-1].error(out_sup[-1], y),
                         givens={X: test_set_x[idx * batch_size: (idx + 1) * batch_size],
                                 y: test_set_y[idx * batch_size: (idx + 1) * batch_size]})
Пример #3
0
                            batch_size=batch_size,
                            border_mode="same")

# learning rate formula:
# r = 1 - 0.5*(ni/ntot)*(ni/ntot)
# ni = ith layer; ntot = number of layers


# layer_0
model_0=ConvAutoEncoder(layers=[layer_0_en, MaxPoolingSameSize(pool_size=(4,4)), layer_0_de])
out_0=model_0.fprop(images, corruption_level=corruption_level)
cost_0=mean_square_cost(out_0[-1], images)+L2_regularization(model_0.params, 0.005)
updates_0=gd_updates(cost=cost_0, params=model_0.params, method="sgd", learning_rate=0.1)

# layer_0 --> layer_1
model_0_to_1=FeedForward(layers=[layer_0_en, MaxPooling(pool_size=(2,2))]);
out_0_to_1=model_0_to_1.fprop(images);

# layer_1
model_1=ConvAutoEncoder(layers=[layer_1_en, MaxPoolingSameSize(pool_size=(2,2)), layer_1_de])
out_1=model_1.fprop(out_0_to_1[-1], corruption_level=corruption_level)
cost_1=mean_square_cost(out_1[-1], out_0_to_1[-1])+L2_regularization(model_1.params, 0.005)
updates_1=gd_updates(cost=cost_1, params=model_1.params, method="sgd", learning_rate=0.1)

# layer_1 --> layer_2
model_1_to_2=FeedForward(layers=[layer_1_en, MaxPooling(pool_size=(4,4))]);
out_1_to_2=model_1_to_2.fprop(images);

# layer_2
model_2=ConvAutoEncoder(layers=[layer_2_en, MaxPoolingSameSize(pool_size=(2,2)), layer_2_de])
out_2=model_2.fprop(out_1_to_2[-1], corruption_level=corruption_level)
Пример #4
0
                                                   
# layer_3_de=SigmoidConvLayer(filter_size=(3,3),
#                             num_filters=50,
#                             num_channels=50,
#                             fm_size=(16,16),
#                             batch_size=batch_size,
#                             border_mode="full")

model_0=ConvAutoEncoder(layers=[layer_0_en, layer_0_de])
out_0=model_0.fprop(images, corruption_level=corruption_level)
cost_0=mean_square_cost(out_0[-1], images)+L2_regularization(model_0.params, 0.005)
updates_0=gd_updates(cost=cost_0, params=model_0.params, method="sgd", learning_rate=0.1)

## append a max-pooling layer

model_trans=FeedForward(layers=[layer_0_en, MaxPooling(pool_size=(2,2))]);
out_trans=model_trans.fprop(images);


model_1=ConvAutoEncoder(layers=[layer_1_en, layer_1_de])
out_1=model_1.fprop(out_trans[-1], corruption_level=corruption_level)
cost_1=mean_square_cost(out_1[-1], out_trans[-1])+L2_regularization(model_1.params, 0.005)
updates_1=gd_updates(cost=cost_1, params=model_1.params, method="sgd", learning_rate=0.1)

# model_2=ConvAutoEncoder(layers=[layer_2_en, layer_2_de])
# out_2=model_2.fprop(out_1[0], corruption_level=corruption_level)
# cost_2=mean_square_cost(out_2[-1], out_1[0])+L2_regularization(model_2.params, 0.005)
# updates_2=gd_updates(cost=cost_2, params=model_2.params, method="sgd", learning_rate=0.1)

# model_3=ConvAutoEncoder(layers=[layer_3_en, layer_3_de])
# out_3=model_3.fprop(out_2[0], corruption_level=corruption_level)
Пример #5
0
#                             fm_size=(16,16),
#                             batch_size=batch_size,
#                             border_mode="full")

model_0 = ConvAutoEncoder(layers=[layer_0_en, layer_0_de])
out_0 = model_0.fprop(images, corruption_level=corruption_level)
cost_0 = mean_square_cost(out_0[-1], images) + L2_regularization(
    model_0.params, 0.005)
updates_0 = gd_updates(cost=cost_0,
                       params=model_0.params,
                       method="sgd",
                       learning_rate=0.1)

## append a max-pooling layer

model_trans = FeedForward(
    layers=[layer_0_en, MaxPooling(pool_size=(2, 2))])
out_trans = model_trans.fprop(images)

model_1 = ConvAutoEncoder(layers=[layer_1_en, layer_1_de])
out_1 = model_1.fprop(out_trans[-1], corruption_level=corruption_level)
cost_1 = mean_square_cost(out_1[-1], out_trans[-1]) + L2_regularization(
    model_1.params, 0.005)
updates_1 = gd_updates(cost=cost_1,
                       params=model_1.params,
                       method="sgd",
                       learning_rate=0.1)

# model_2=ConvAutoEncoder(layers=[layer_2_en, layer_2_de])
# out_2=model_2.fprop(out_1[0], corruption_level=corruption_level)
# cost_2=mean_square_cost(out_2[-1], out_1[0])+L2_regularization(model_2.params, 0.005)
# updates_2=gd_updates(cost=cost_2, params=model_2.params, method="sgd", learning_rate=0.1)
Пример #6
0
layer_1 = LCNLayer(filter_size=(5, 5),
                   num_filters=32,
                   num_channels=64,
                   fm_size=(16, 16),
                   batch_size=batch_size,
                   border_mode="full")

pool_1 = MaxPooling(pool_size=(2, 2))

flattener = Flattener()

layer_2 = ReLULayer(in_dim=32 * 64, out_dim=800)

layer_3 = SoftmaxLayer(in_dim=800, out_dim=10)

model = FeedForward(
    layers=[layer_0, pool_0, layer_1, pool_1, flattener, layer_2, layer_3])

out = model.fprop(images)
cost = categorical_cross_entropy_cost(out[-1], y)
updates = gd_updates(cost=cost,
                     params=model.params,
                     method="sgd",
                     learning_rate=0.01,
                     momentum=0.9)

extract = theano.function(
    inputs=[idx],
    outputs=layer_0.apply(images),
    givens={X: train_set_x[idx * batch_size:(idx + 1) * batch_size]})
print extract(1).shape
Пример #7
0
                      num_channels=64,
                      fm_size=(16,16),
                      batch_size=batch_size,
                      border_mode="full");

pool_1=MaxPooling(pool_size=(2,2));

flattener=Flattener();

layer_2=ReLULayer(in_dim=32*64,
                  out_dim=800);
                  
layer_3=SoftmaxLayer(in_dim=800,
                     out_dim=10);
                     
model=FeedForward(layers=[layer_0, pool_0, layer_1, pool_1, flattener, layer_2, layer_3]);

out=model.fprop(images);
cost=categorical_cross_entropy_cost(out[-1], y);
updates=gd_updates(cost=cost, params=model.params, method="sgd", learning_rate=0.01, momentum=0.9);

extract=theano.function(inputs=[idx],
                        outputs=layer_0.apply(images),
                        givens={X: train_set_x[idx * batch_size: (idx + 1) * batch_size]});
print extract(1).shape


train=theano.function(inputs=[idx],
                      outputs=cost,
                      updates=updates,
                      givens={X: train_set_x[idx * batch_size: (idx + 1) * batch_size],
Пример #8
0
        corr_best[1][0]), min_cost[1], max_iter[1]
    print '                        ', np.mean(c_2), str(
        corr_best[2][0]), min_cost[2], max_iter[2]
    print '                        ', np.mean(c_3), str(
        corr_best[3][0]), min_cost[3], max_iter[3]

print "[MESSAGE] The model is trained"

################################## BUILD SUPERVISED MODEL #######################################

flattener = Flattener()
layer_5 = ReLULayer(in_dim=50 * 16 * 16, out_dim=1000)
layer_6 = SoftmaxLayer(in_dim=1000, out_dim=10)

model_sup = FeedForward(layers=[
    layer_0_en, layer_1_en, layer_2_en, layer_3_en, flattener, layer_5, layer_6
])

out_sup = model_sup.fprop(images)
cost_sup = categorical_cross_entropy_cost(out_sup[-1], y)
updates = gd_updates(cost=cost_sup,
                     params=model_sup.params,
                     method="sgd",
                     learning_rate=0.1)

train_sup = theano.function(
    inputs=[idx],
    outputs=cost_sup,
    updates=updates,
    givens={
        X: train_set_x[idx * batch_size:(idx + 1) * batch_size],