def test_classification(): # Make dataset n_classes = 2 n_samples = 1000 n_features = 48 x, y = make_classification(n_samples=n_samples, n_features=n_features, n_classes=n_classes, n_informative=n_classes * 2, random_state=1) x = x.astype(dp.float_) y = y.astype(dp.int_) n_train = int(0.8 * n_samples) x_train = x[:n_train] y_train = y[:n_train] x_test = x[n_train:] y_test = y[n_train:] scaler = dp.StandardScaler() x_train = scaler.fit_transform(x_train) x_test = scaler.transform(x_test) # Setup feeds batch_size = 16 train_feed = dp.SupervisedFeed(x_train, y_train, batch_size=batch_size) test_feed = dp.Feed(x_test) # Setup neural network weight_decay = 1e-03 net = dp.NeuralNetwork( layers=[ dp.Affine( n_out=32, weights=dp.Parameter(dp.AutoFiller(), weight_decay=weight_decay), ), dp.ReLU(), dp.Affine( n_out=64, weights=dp.Parameter(dp.AutoFiller(), weight_decay=weight_decay), ), dp.ReLU(), dp.Affine( n_out=n_classes, weights=dp.Parameter(dp.AutoFiller()), ), ], loss=dp.SoftmaxCrossEntropy(), ) # Train neural network learn_rule = dp.Momentum(learn_rate=0.01 / batch_size, momentum=0.9) trainer = dp.GradientDescent(net, train_feed, learn_rule) trainer.train_epochs(n_epochs=10) # Evaluate on test data error = np.mean(net.predict(test_feed) != y_test) print('Test error rate: %.4f' % error) assert error < 0.2
def run(): # Prepare data dataset = dp.datasets.MNIST() x, y = dataset.data(flat=True) x = x.astype(dp.float_) y = y.astype(dp.int_) train_idx, test_idx = dataset.split() x_train = x[train_idx] y_train = y[train_idx] x_test = x[test_idx] y_test = y[test_idx] scaler = dp.UniformScaler(high=255.) x_train = scaler.fit_transform(x_train) x_test = scaler.transform(x_test) batch_size = 128 train_input = dp.SupervisedInput(x_train, y_train, batch_size=batch_size) test_input = dp.SupervisedInput(x_test, y_test) # Setup neural network net = dp.NeuralNetwork( layers=[ dp.FullyConnected( n_output=800, weights=dp.Parameter(dp.AutoFiller(), weight_decay=0.0001), ), dp.Activation('relu'), dp.FullyConnected( n_output=800, weights=dp.Parameter(dp.AutoFiller(), weight_decay=0.0001), ), dp.Activation('relu'), dp.FullyConnected( n_output=dataset.n_classes, weights=dp.Parameter(dp.AutoFiller(), weight_decay=0.0001), ), dp.MultinomialLogReg(), ], ) # Train neural network def val_error(): return net.error(test_input) trainer = dp.StochasticGradientDescent( max_epochs=25, learn_rule=dp.Momentum(learn_rate=0.1, momentum=0.9), ) trainer.train(net, train_input, val_error) # Visualize weights from first layer W = next(np.array(layer.params()[0].array) for layer in net.layers if isinstance(layer, dp.FullyConnected)) W = np.reshape(W.T, (-1, 28, 28)) filepath = os.path.join('mnist', 'mlp_weights.png') dp.misc.img_save(dp.misc.img_tile(dp.misc.img_stretch(W)), filepath) # Evaluate on test data error = net.error(test_input) print('Test error rate: %.4f' % error)
def run(): # Prepare data dataset = dp.datasets.MNIST() x, y = dataset.data(flat=True) x = x.astype(dp.float_)/255.0 y = y.astype(dp.int_) train_idx, test_idx = dataset.split() x_train = x[train_idx] y_train = y[train_idx] x_test = x[test_idx] y_test = y[test_idx] train_input = dp.SupervisedInput(x_train, y_train, batch_size=128) test_input = dp.SupervisedInput(x_test, y_test) # Setup neural network nn = dp.NeuralNetwork( layers=[ dp.Dropout(0.2), dp.DropoutFullyConnected( n_output=800, dropout=0.5, weights=dp.Parameter(dp.NormalFiller(sigma=0.01), penalty=('l2', 0.00001), monitor=True), ), dp.Activation('relu'), dp.DropoutFullyConnected( n_output=800, dropout=0.5, weights=dp.Parameter(dp.NormalFiller(sigma=0.01), penalty=('l2', 0.00001), monitor=True), ), dp.Activation('relu'), dp.DropoutFullyConnected( n_output=dataset.n_classes, weights=dp.Parameter(dp.NormalFiller(sigma=0.01), penalty=('l2', 0.00001), monitor=True), ), dp.MultinomialLogReg(), ], ) # Train neural network def valid_error(): return nn.error(test_input) trainer = dp.StochasticGradientDescent( max_epochs=50, learn_rule=dp.Momentum(learn_rate=0.1, momentum=0.9), ) trainer.train(nn, train_input, valid_error) # Visualize weights from first layer W = next(np.array(layer.params()[0].values) for layer in nn.layers if isinstance(layer, dp.FullyConnected)) W = np.reshape(W.T, (-1, 28, 28)) dp.misc.img_save(dp.misc.img_tile(dp.misc.img_stretch(W)), os.path.join('mnist', 'mlp_dropout_weights.png')) # Evaluate on test data error = nn.error(test_input) print('Test error rate: %.4f' % error)
def affine(n_out, gain, wdecay=0.0, bias=0.0): if bias is None: return ex.nnet.Linear( n_out=n_out, weights=dp.Parameter(dp.AutoFiller(gain), weight_decay=wdecay), ) else: return ex.nnet.Affine( n_out=n_out, bias=bias, weights=dp.Parameter(dp.AutoFiller(gain), weight_decay=wdecay), )
def conv_layer(n_filters): return dp.Convolution( n_filters=n_filters, filter_shape=(5, 5), border_mode='valid', weights=dp.Parameter(dp.AutoFiller(gain=1.39), weight_decay=0.0005), )
def conv_layer(n_filters): return dp.Convolution( n_filters=32, filter_shape=(5, 5), border_mode='full', weights=dp.Parameter(dp.AutoFiller(gain=1.25), weight_decay=0.003), )
def backconv(n_filters, filter_size, stride=2, gain=1.0, wdecay=0.0, bias=0.0): return ex.nnet.BackwardConvolution( n_filters=n_filters, strides=(stride, stride), weights=dp.Parameter(dp.AutoFiller(gain), weight_decay=wdecay), bias=bias, filter_shape=(filter_size, filter_size), border_mode='same', )
def __init__(self, game_name, run_id): self.number_of_actions = len(action_dict[game_name]) valid_actions = action_dict[game_name] net.layers[-2] = dp.FullyConnected(n_output=self.number_of_actions, weights=dp.Parameter( dp.NormalFiller(sigma=0.1), weight_decay=0.004, monitor=False)) self.memory = MemoryD(self.memory_size) self.ale = ALE(valid_actions, run_id, display_screen="false", skip_frames=4, game_ROM='ale/roms/' + game_name + '.bin') self.nnet = net self.q_values = [] self.test_game_scores = []
import deeppy as dp # Setup neural network pool_kwargs = { 'win_shape': (3, 3), 'strides': (2, 2), 'border_mode': 'same', 'method': 'max', } net = dp.NeuralNetwork(layers=[ dp.Convolutional( n_filters=32, filter_shape=(5, 5), border_mode='same', weights=dp.Parameter(dp.NormalFiller(sigma=0.0001), weight_decay=0.004, monitor=True), ), dp.Activation('relu'), dp.Pool(**pool_kwargs), dp.Convolutional( n_filters=32, filter_shape=(5, 5), border_mode='same', weights=dp.Parameter(dp.NormalFiller(sigma=0.01), weight_decay=0.004, monitor=True), ), dp.Activation('relu'), dp.Pool(**pool_kwargs), dp.Convolutional(
def run(): # Prepare data batch_size = 128 dataset = dp.datasets.CIFAR10() x, y = dataset.data() y = y.astype(dp.int_) train_idx, test_idx = dataset.split() x_train = preprocess_imgs(x[train_idx]) y_train = y[train_idx] train_input = dp.SupervisedInput(x_train, y_train, batch_size=batch_size) # Setup neural network pool_kwargs = { 'win_shape': (3, 3), 'strides': (2, 2), 'border_mode': 'same', 'method': 'max', } nn = dp.NeuralNetwork(layers=[ dp.Convolutional( n_filters=32, filter_shape=(5, 5), border_mode='same', weights=dp.Parameter(dp.NormalFiller(sigma=0.0001), penalty=('l2', 0.004), monitor=True), ), dp.Activation('relu'), dp.Pool(**pool_kwargs), dp.Convolutional( n_filters=32, filter_shape=(5, 5), border_mode='same', weights=dp.Parameter(dp.NormalFiller(sigma=0.01), penalty=('l2', 0.004), monitor=True), ), dp.Activation('relu'), dp.Pool(**pool_kwargs), dp.Convolutional( n_filters=64, filter_shape=(5, 5), border_mode='same', weights=dp.Parameter(dp.NormalFiller(sigma=0.01), penalty=('l2', 0.004), monitor=True), ), dp.Activation('relu'), dp.Pool(**pool_kwargs), dp.Flatten(), dp.FullyConnected( n_output=64, weights=dp.Parameter(dp.NormalFiller(sigma=0.1), penalty=('l2', 0.03)), ), dp.Activation('relu'), dp.FullyConnected( n_output=dataset.n_classes, weights=dp.Parameter(dp.NormalFiller(sigma=0.1), penalty=('l2', 0.03)), ), dp.MultinomialLogReg(), ], ) dp.misc.profile(nn, train_input)
def test_classification(): # Make dataset n_classes = 2 n_samples = 1000 n_features = 48 x, y = make_classification( n_samples=n_samples, n_features=n_features, n_classes=n_classes, n_informative=n_classes*2, random_state=1 ) n_train = int(0.8 * n_samples) n_val = int(0.5 * (n_samples - n_train)) x_train = x[:n_train] y_train = y[:n_train] x_val = x[n_train:n_train+n_val] y_val = y[n_train:n_train+n_val] x_test = x[n_train+n_val:] y_test = y[n_train+n_val:] scaler = dp.StandardScaler() x_train = scaler.fit_transform(x_train) x_val = scaler.transform(x_val) x_test = scaler.transform(x_test) # Setup input batch_size = 16 train_input = dp.SupervisedInput(x_train, y_train, batch_size=batch_size) val_input = dp.Input(x_val) test_input = dp.Input(x_test) # Setup neural network weight_decay = 1e-03 net = dp.NeuralNetwork( layers=[ dp.Affine( n_out=32, weights=dp.Parameter(dp.AutoFiller(), weight_decay=weight_decay), ), dp.ReLU(), dp.Affine( n_out=64, weights=dp.Parameter(dp.AutoFiller(), weight_decay=weight_decay), ), dp.ReLU(), dp.Affine( n_out=n_classes, weights=dp.Parameter(dp.AutoFiller()), ), ], loss=dp.SoftmaxCrossEntropy(), ) # Train neural network def val_error(): return np.mean(net.predict(val_input) != y_val) trainer = dp.GradientDescent( min_epochs=10, learn_rule=dp.Momentum(learn_rate=0.01, momentum=0.9), ) trainer.train(net, train_input, val_error) # Evaluate on test data error = np.mean(net.predict(test_input) != y_test) print('Test error rate: %.4f' % error) assert error < 0.2
weight_gain_fc = 1.84 weight_decay_fc = 0.002 net = dp.NeuralNetwork( layers=[ conv_layer(32), dp.Activation('relu'), pool_layer(), conv_layer(64), dp.Activation('relu'), pool_layer(), dp.Flatten(), dp.DropoutFullyConnected( n_out=512, dropout=0.5, weights=dp.Parameter(dp.AutoFiller(weight_gain_fc), weight_decay=weight_decay_fc), ), dp.Activation('relu'), dp.FullyConnected( n_out=dataset.n_classes, weights=dp.Parameter(dp.AutoFiller(weight_gain_fc)), ), ], loss=dp.SoftmaxCrossEntropy(), ) # Train network n_epochs = [50, 15, 15] learn_rate = 0.05 momentum = 0.88 for i, epochs in enumerate(n_epochs):
method='max', ) net = dp.NeuralNetwork( layers=[ conv_layer(32), dp.ReLU(), pool_layer(), conv_layer(32), dp.ReLU(), pool_layer(), conv_layer(64), dp.ReLU(), pool_layer(), dp.Flatten(), dp.Dropout(), dp.Affine(n_out=64, weights=dp.Parameter(dp.AutoFiller(gain=1.25), weight_decay=0.03)), dp.ReLU(), dp.Affine( n_out=dataset.n_classes, weights=dp.Parameter(dp.AutoFiller(gain=1.25)), ) ], loss=dp.SoftmaxCrossEntropy(), ) profile(net, train_input)
def run(): # Prepare data dataset = dp.datasets.MNIST() x, y = dataset.data() x = x[:, np.newaxis, :, :].astype(dp.float_) / 255.0 - 0.5 y = y.astype(dp.int_) train_idx, test_idx = dataset.split() x_train = x[train_idx] y_train = y[train_idx] x_test = x[test_idx] y_test = y[test_idx] train_input = dp.SupervisedInput(x_train, y_train, batch_size=128) test_input = dp.SupervisedInput(x_test, y_test) # Setup neural network nn = dp.NeuralNetwork(layers=[ dp.Convolutional( n_filters=20, filter_shape=(5, 5), weights=dp.Parameter(dp.NormalFiller(sigma=0.1), penalty=('l2', 0.00001)), ), dp.Activation('relu'), dp.Pool( win_shape=(2, 2), strides=(2, 2), method='max', ), dp.Convolutional( n_filters=50, filter_shape=(5, 5), weights=dp.Parameter(dp.NormalFiller(sigma=0.1), penalty=('l2', 0.00001)), ), dp.Activation('relu'), dp.Pool( win_shape=(2, 2), strides=(2, 2), method='max', ), dp.Flatten(), dp.FullyConnected( n_output=500, weights=dp.NormalFiller(sigma=0.01), ), dp.FullyConnected( n_output=dataset.n_classes, weights=dp.NormalFiller(sigma=0.01), ), dp.MultinomialLogReg(), ], ) # Train neural network def valid_error(): return nn.error(test_input) trainer = dp.StochasticGradientDescent( max_epochs=15, learn_rule=dp.Momentum(learn_rate=0.1, momentum=0.9), ) trainer.train(nn, train_input, valid_error) # Visualize convolutional filters to disk for layer_idx, layer in enumerate(nn.layers): if not isinstance(layer, dp.Convolutional): continue W = np.array(layer.params()[0].values) dp.misc.img_save( dp.misc.conv_filter_tile(W), os.path.join('mnist', 'convnet_layer_%i.png' % layer_idx)) # Evaluate on test data error = nn.error(test_input) print('Test error rate: %.4f' % error)
def run(): # Prepare data dataset = dp.datasets.MNIST() x, y = dataset.data() x = x.astype(dp.float_)[:, np.newaxis, :, :] y = y.astype(dp.int_) train_idx, test_idx = dataset.split() x_train = x[train_idx] y_train = y[train_idx] x_test = x[test_idx] y_test = y[test_idx] scaler = dp.UniformScaler(high=255.) x_train = scaler.fit_transform(x_train) x_test = scaler.transform(x_test) batch_size = 128 train_input = dp.SupervisedInput(x_train, y_train, batch_size=batch_size) test_input = dp.SupervisedInput(x_test, y_test) # Setup neural network net = dp.NeuralNetwork(layers=[ dp.Convolutional( n_filters=32, filter_shape=(5, 5), weights=dp.Parameter(dp.AutoFiller(), weight_decay=0.0001), ), dp.Activation('relu'), dp.Pool( win_shape=(3, 3), strides=(2, 2), method='max', ), dp.Convolutional( n_filters=64, filter_shape=(5, 5), weights=dp.Parameter(dp.AutoFiller(), weight_decay=0.0001), ), dp.Activation('relu'), dp.Pool( win_shape=(3, 3), strides=(2, 2), method='max', ), dp.Flatten(), dp.FullyConnected( n_output=128, weights=dp.Parameter(dp.AutoFiller()), ), dp.FullyConnected( n_output=dataset.n_classes, weights=dp.Parameter(dp.AutoFiller()), ), dp.MultinomialLogReg(), ], ) # Train neural network def val_error(): return net.error(test_input) trainer = dp.StochasticGradientDescent( max_epochs=15, learn_rule=dp.Momentum(learn_rate=0.01, momentum=0.9), ) trainer.train(net, train_input, val_error) # Visualize convolutional filters to disk for l, layer in enumerate(net.layers): if not isinstance(layer, dp.Convolutional): continue W = np.array(layer.params()[0].array) filepath = os.path.join('mnist', 'conv_layer_%i.png' % l) dp.misc.img_save(dp.misc.conv_filter_tile(W), filepath) # Evaluate on test data error = net.error(test_input) print('Test error rate: %.4f' % error)
]) for i in range(numberOfModels): cor[i] = np.corrcoef(cnn_rep[:, i], pred_train[:, i])[0, 1] svm_model = OneVsRestClassifier(LinearSVC(random_state=0)) svm_model.fit(cnn_rep[:, 0:numberOfModels], cnn_targets) prediction = svm_model.predict(pred) acc = np.sum(prediction == y_test) / float(np.size(y_test)) # Setup neural network using the stacked autoencoder layers net = dp.NeuralNetwork( [ dp.FullyConnected( n_out=100, #neuronNum[-1], weights=dp.Parameter(dp.AutoFiller()), ), dp.Sigmoid(), dp.FullyConnected( n_out=10, weights=dp.Parameter(dp.AutoFiller()), ), ], loss=dp.loss.MeanSquaredError(), ) # Fine-tune neural network train_input = dp.SupervisedInput(brain_rep, cnn_rep, batch_size=batch_size) test_input = dp.Input(x_test)
def run(): # Prepare data batch_size = 128 dataset = dp.datasets.CIFAR10() x, y = dataset.data() y = y.astype(dp.int_) train_idx, test_idx = dataset.split() x_train = preprocess_imgs(x[train_idx]) y_train = y[train_idx] x_test = preprocess_imgs(x[test_idx]) y_test = y[test_idx] train_input = dp.SupervisedInput(x_train, y_train, batch_size=batch_size) test_input = dp.SupervisedInput(x_test, y_test, batch_size=batch_size) # Setup neural network pool_kwargs = { 'win_shape': (3, 3), 'strides': (2, 2), 'border_mode': 'same', 'method': 'max', } nn = dp.NeuralNetwork(layers=[ dp.Convolutional( n_filters=32, filter_shape=(5, 5), border_mode='same', weights=dp.Parameter(dp.NormalFiller(sigma=0.0001), penalty=('l2', 0.004), monitor=True), ), dp.Activation('relu'), dp.Pool(**pool_kwargs), dp.Convolutional( n_filters=32, filter_shape=(5, 5), border_mode='same', weights=dp.Parameter(dp.NormalFiller(sigma=0.01), penalty=('l2', 0.004), monitor=True), ), dp.Activation('relu'), dp.Pool(**pool_kwargs), dp.Convolutional( n_filters=64, filter_shape=(5, 5), border_mode='same', weights=dp.Parameter(dp.NormalFiller(sigma=0.01), penalty=('l2', 0.004), monitor=True), ), dp.Activation('relu'), dp.Pool(**pool_kwargs), dp.Flatten(), dp.FullyConnected( n_output=64, weights=dp.Parameter(dp.NormalFiller(sigma=0.1), penalty=('l2', 0.03)), ), dp.Activation('relu'), dp.FullyConnected( n_output=dataset.n_classes, weights=dp.Parameter(dp.NormalFiller(sigma=0.1), penalty=('l2', 0.03)), ), dp.MultinomialLogReg(), ], ) # Train neural network n_epochs = [8, 8] learn_rate = 0.001 def valid_error(): return nn.error(test_input) for i, max_epochs in enumerate(n_epochs): lr = learn_rate / 10**i trainer = dp.StochasticGradientDescent( max_epochs=max_epochs, learn_rule=dp.Momentum(learn_rate=lr, momentum=0.9), ) trainer.train(nn, train_input, valid_error) # Visualize convolutional filters to disk for l, layer in enumerate(nn.layers): if not isinstance(layer, dp.Convolutional): continue W = np.array(layer.params()[0].values) dp.misc.img_save(dp.misc.conv_filter_tile(W), os.path.join('cifar10', 'convnet_layer_%i.png' % l)) # Evaluate on test data error = nn.error(test_input) print('Test error rate: %.4f' % error)
def run(): # Prepare MNIST data dataset = dp.datasets.MNIST() x, y = dataset.data(flat=True) x = x.astype(dp.float_) y = y.astype(dp.int_) train_idx, test_idx = dataset.split() x_train = x[train_idx] y_train = y[train_idx] x_test = x[test_idx] y_test = y[test_idx] scaler = dp.UniformScaler(high=255.) x_train = scaler.fit_transform(x_train) x_test = scaler.transform(x_test) # Generate image pairs n_pairs = 100000 x1 = np.empty((n_pairs, 28 * 28), dtype=dp.float_) x2 = np.empty_like(x1, dtype=dp.float_) y = np.empty(n_pairs, dtype=dp.int_) n_imgs = x_train.shape[0] n = 0 while n < n_pairs: i = random.randint(0, n_imgs - 1) j = random.randint(0, n_imgs - 1) if i == j: continue x1[n, ...] = x_train[i] x2[n, ...] = x_train[j] if y_train[i] == y_train[j]: y[n] = 1 else: y[n] = 0 n += 1 # Input to network train_input = dp.SupervisedSiameseInput(x1, x2, y, batch_size=128) test_input = dp.SupervisedInput(x_test, y_test) # Setup network net = dp.SiameseNetwork( siamese_layers=[ dp.Dropout(), dp.FullyConnected( n_output=800, weights=dp.Parameter(dp.AutoFiller(), weight_decay=0.00001), ), dp.Activation('relu'), dp.FullyConnected( n_output=800, weights=dp.Parameter(dp.AutoFiller(), weight_decay=0.00001), ), dp.Activation('relu'), dp.FullyConnected( n_output=2, weights=dp.Parameter(dp.AutoFiller(), weight_decay=0.00001), ), ], loss_layer=dp.ContrastiveLoss(margin=0.5), ) # Train network trainer = dp.StochasticGradientDescent( max_epochs=10, learn_rule=dp.RMSProp(learn_rate=0.001), ) trainer.train(net, train_input) # Visualize feature space feat = net.features(test_input) colors = [ 'tomato', 'lawngreen', 'royalblue', 'gold', 'saddlebrown', 'violet', 'turquoise', 'mediumpurple', 'darkorange', 'darkgray' ] plt.figure() for i in range(10): plt.scatter(feat[y_test == i, 0], feat[y_test == i, 1], s=3, c=colors[i], linewidths=0) plt.legend([str(i) for i in range(10)], scatterpoints=1, markerscale=4) if not os.path.exists('mnist'): os.mkdirs('mnist') plt.savefig(os.path.join('mnist', 'siamese_dists.png'), dpi=200)
else: y[n] = 0 n += 1 # Prepare network feeds batch_size = 128 train_feed = dp.SupervisedSiameseFeed(x1, x2, y, batch_size=batch_size) # Setup network w_gain = 1.5 w_decay = 1e-4 net = dp.SiameseNetwork( siamese_layers=[ dp.Affine( n_out=1024, weights=dp.Parameter(dp.AutoFiller(w_gain), weight_decay=w_decay), ), dp.ReLU(), dp.Affine( n_out=1024, weights=dp.Parameter(dp.AutoFiller(w_gain), weight_decay=w_decay), ), dp.ReLU(), dp.Affine( n_out=2, weights=dp.Parameter(dp.AutoFiller(w_gain)), ), ], loss=dp.ContrastiveLoss(margin=1.0), )
import numpy as np import deeppy as dp net = dp.NeuralNetwork(layers=[ dp.Convolutional( n_filters=32, filter_shape=(5, 5), weights=dp.Parameter(dp.AutoFiller(), weight_decay=0.0001), ), dp.Activation('relu'), dp.Pool( win_shape=(3, 3), strides=(2, 2), method='max', ), dp.Convolutional( n_filters=64, filter_shape=(5, 5), weights=dp.Parameter(dp.AutoFiller(), weight_decay=0.0001), ), dp.Activation('relu'), dp.Pool( win_shape=(3, 3), strides=(2, 2), method='max', ), dp.Flatten(), dp.FullyConnected( n_output=128, weights=dp.Parameter(dp.AutoFiller()), ),