示例#1
0
from telaugesa.cost import mean_square_cost

n_epochs = 200
batch_size = 200
nkerns = 64

Xtr, Ytr, Xte, Yte = ds.load_CIFAR10_Processed("../data/CIFAR10/train.npy",
                                               "../data/CIFAR10/train.pkl",
                                               "../data/CIFAR10/test.npy",
                                               "../data/CIFAR10/test.pkl")
Xtr = Xtr.reshape(50000, 3, 32, 32).transpose(0, 2, 3, 1).mean(3)
Xte = Xte.reshape(10000, 3, 32, 32).transpose(0, 2, 3, 1).mean(3)
Xtrain = Xtr.reshape(Xtr.shape[0], Xtr.shape[1] * Xtr.shape[2])
Xtest = Xte.reshape(Xte.shape[0], Xte.shape[1] * Xte.shape[2])

train_set_x, train_set_y = ds.shared_dataset((Xtrain, Ytr))
test_set_x, test_set_y = ds.shared_dataset((Xtest, Yte))

n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size

print "[MESSAGE] The data is loaded"

X = T.matrix("data")
y = T.ivector("label")
idx = T.lscalar()

images = X.reshape((batch_size, 1, 32, 32))
layer_0 = ReLUConvLayer(filter_size=(5, 5),
                        num_filters=nkerns,
                        num_channels=1,
示例#2
0
from telaugesa.convnet import IdentityConvLayer;
from telaugesa.convnet import MaxPoolingSameSize;
from telaugesa.model import ConvAutoEncoder;
from telaugesa.optimize import gd_updates;
from telaugesa.cost import mean_square_cost;
from telaugesa.cost import L2_regularization;

n_epochs=200;
batch_size=200;
nkerns=64;

Xtr, ytr=ds.load_fer_2013("../data/fer2013/fer2013.csv");

Xtr/=255.0;

train_set_x, _=ds.shared_dataset((Xtr, ytr));
n_train_batches=train_set_x.get_value(borrow=True).shape[0]/batch_size;

print "[MESSAGE] The data is loaded"
 
X=T.matrix("data");
y=T.ivector("label");
idx=T.lscalar();
 
images=X.reshape((batch_size, 1, 48, 48))
layer_0=ReLUConvLayer(filter_size=(7, 7),
                      num_filters=nkerns,
                      num_channels=1,
                      fm_size=(48,48),
                      batch_size=batch_size,
                      border_mode="same");
from telaugesa.cost import mean_square_cost;
from telaugesa.cost import categorical_cross_entropy_cost;
from telaugesa.cost import L2_regularization;

n_epochs=50;
batch_size=100;
nkerns=100;

Xtr, Ytr, Xte, Yte=ds.load_CIFAR10("../data/CIFAR10");

Xtr=np.mean(Xtr, 3);
Xte=np.mean(Xte, 3);
Xtrain=Xtr.reshape(Xtr.shape[0], Xtr.shape[1]*Xtr.shape[2])/255.0;
Xtest=Xte.reshape(Xte.shape[0], Xte.shape[1]*Xte.shape[2])/255.0;

train_set_x, train_set_y=ds.shared_dataset((Xtrain, Ytr));
test_set_x, test_set_y=ds.shared_dataset((Xtest, Yte));

n_train_batches=train_set_x.get_value(borrow=True).shape[0]/batch_size;
n_test_batches=test_set_x.get_value(borrow=True).shape[0]/batch_size;

print "[MESSAGE] The data is loaded"

################################## FIRST LAYER #######################################

X=T.matrix("data");
y=T.ivector("label");
idx=T.lscalar();

images=X.reshape((batch_size, 1, 32, 32))
layer_0_en=ReLUConvLayer(filter_size=(7,7),
示例#4
0
from telaugesa.convnet import IdentityConvLayer
from telaugesa.convnet import MaxPoolingSameSize
from telaugesa.model import ConvAutoEncoder
from telaugesa.optimize import gd_updates
from telaugesa.cost import mean_square_cost
from telaugesa.cost import L2_regularization

n_epochs = 200
batch_size = 200
nkerns = 64

Xtr, ytr = ds.load_fer_2013("../data/fer2013/fer2013.csv")

Xtr /= 255.0

train_set_x, _ = ds.shared_dataset((Xtr, ytr))
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size

print "[MESSAGE] The data is loaded"

X = T.matrix("data")
y = T.ivector("label")
idx = T.lscalar()

images = X.reshape((batch_size, 1, 48, 48))
layer_0 = ReLUConvLayer(filter_size=(7, 7),
                        num_filters=nkerns,
                        num_channels=1,
                        fm_size=(48, 48),
                        batch_size=batch_size,
                        border_mode="same")