def gridsearch_alpha(self, learning_rate, index, params=None):

        self.l_in = ls.layers.InputLayer(shape=(None, n_input),
                                         input_var=None,
                                         W=params.T)
        self.l_hidden = ls.layers.DenseLayer(
            self.l_in, num_units=15, nonlinearity=ls.nonlinearities.rectify)
        self.network = l_out = ls.layers.DenseLayer(self.l_hidden, num_units=1)
        list_results = np.array([learning_rate.shape[0]], dtype=np.float64)
        for item in learning_rate:
            #Init Neural net
            net1 = NeuralNet(
                layers=self.network,
                # optimization method:
                update=nesterov_momentum,
                update_learning_rate=item,
                update_momentum=0.9,
                regression=
                True,  # flag to indicate we're dealing with regression problem
                max_epochs=800,  # we want to train this many epochs
                #                 verbose=1,
                eval_size=0.4)
            #
            net1.fit(self.X_training, self.y_training)
            self.pred = net1.predict(self.n_sample2)
            name_file = "GeneticParams/saveNeuralNetwork_%s_%s.tdn" % (item,
                                                                       index)
            net1.save_params_to(name_file)
            score_nn = net1.score(self.n_sample2, self.n_test2)
            list_results[item] = score_nn
            print "index=%s,item=%f,score=%f" % (index, item, score_nn)
        return list_results
    def gridsearch_alpha(self,learning_rate,index,params=None):
        hidden_unit = ((index+1)*2)/3
        self.l_in = ls.layers.InputLayer(shape=(None,n_input),input_var=None)
        self.l_hidden = ls.layers.DenseLayer(self.l_in,num_units=15,nonlinearity=ls.nonlinearities.rectify)
        self.network = l_out = ls.layers.DenseLayer(self.l_hidden,num_units=1)
        list_results = np.array([learning_rate.shape[0]],dtype=np.float64)
        for item in learning_rate:
            #Init Neural net
            net1 = NeuralNet(
                layers=self.network,
                # optimization method:
                update=nesterov_momentum,
                update_learning_rate=item,
                update_momentum=0.9,
                regression=True,  # flag to indicate we're dealing with regression problem
                max_epochs=800,  # we want to train this many epochs
#                 verbose=1,
                eval_size = 0.4
            )
            net1.fit(self.X_training,self.y_training)
            self.pred = net1.predict(self.n_sample2)
            name_file = "Params/saveNeuralNetwork_%s_%s.tdn" %(item,index)
            net1.save_params_to(name_file)
            score_nn = net1.score(self.n_sample2,self.n_test2)
            list_results[item] = score_nn
            print "index=%f,item=%f,score=%f"%(index,item,score_nn)
        return list_results
Beispiel #3
0
class network(object):
    def __init__(self,X_train, Y_train):
        #self.__hidden=0

        self.__hidden=int(math.ceil((2*(X_train.shape[1]+ 1))/3))
        self.net= NeuralNet(
            layers=[
                ('input', layers.InputLayer),
                ('hidden', layers.DenseLayer),
                ('output', layers.DenseLayer)
            ],
            input_shape=( None, X_train.shape[1] ),
            hidden_num_units=self.__hidden,
            #hidden_nonlinearity=nonlinearities.tanh,
            output_nonlinearity=None,
            batch_iterator_train=BatchIterator(batch_size=256),
            output_num_units=1,

            on_epoch_finished=[EarlyStopping(patience=50)],
            update=momentum,
            update_learning_rate=theano.shared(np.float32(0.03)),
            update_momentum=theano.shared(np.float32(0.8)),
            regression=True,
            max_epochs=1000,
            verbose=1,
        )

        self.net.fit(X_train,Y_train)

    def predict(self,X):
        return self.net.predict(X)

    def showMetrics(self):
        train_loss = np.array([i["train_loss"] for i in self.net.train_history_])
        valid_loss = np.array([i["valid_loss"] for i in self.net.train_history_])
        pyplot.plot(train_loss, linewidth=3, label="training")
        pyplot.plot(valid_loss, linewidth=3, label="validation")
        pyplot.grid()
        pyplot.legend()
        pyplot.xlabel("epoch")
        pyplot.ylabel("loss")
        # pyplot.ylim(1e-3, 1e-2)
        pyplot.yscale("log")
        pyplot.show()

    def saveNet(self,fname):
        self.net.save_params_to(fname)

    def loadNet(self,fname):
        self.net.load_params_from(fname)
Beispiel #4
0
class network(object):
    def __init__(self, X_train, Y_train):
        #self.__hidden=0

        self.__hidden = int(math.ceil((2 * (X_train.shape[1] + 1)) / 3))
        self.net = NeuralNet(
            layers=[('input', layers.InputLayer),
                    ('hidden', layers.DenseLayer),
                    ('output', layers.DenseLayer)],
            input_shape=(None, X_train.shape[1]),
            hidden_num_units=self.__hidden,
            #hidden_nonlinearity=nonlinearities.tanh,
            output_nonlinearity=None,
            batch_iterator_train=BatchIterator(batch_size=256),
            output_num_units=1,
            on_epoch_finished=[EarlyStopping(patience=50)],
            update=momentum,
            update_learning_rate=theano.shared(np.float32(0.03)),
            update_momentum=theano.shared(np.float32(0.8)),
            regression=True,
            max_epochs=1000,
            verbose=1,
        )

        self.net.fit(X_train, Y_train)

    def predict(self, X):
        return self.net.predict(X)

    def showMetrics(self):
        train_loss = np.array(
            [i["train_loss"] for i in self.net.train_history_])
        valid_loss = np.array(
            [i["valid_loss"] for i in self.net.train_history_])
        pyplot.plot(train_loss, linewidth=3, label="training")
        pyplot.plot(valid_loss, linewidth=3, label="validation")
        pyplot.grid()
        pyplot.legend()
        pyplot.xlabel("epoch")
        pyplot.ylabel("loss")
        # pyplot.ylim(1e-3, 1e-2)
        pyplot.yscale("log")
        pyplot.show()

    def saveNet(self, fname):
        self.net.save_params_to(fname)

    def loadNet(self, fname):
        self.net.load_params_from(fname)
Beispiel #5
0
def main(resume=None):
    l = 300
    dataset = './data/ubiquitous_train.hkl'
    print('Loading dataset {}...'.format(dataset))
    X_train, y_train = hkl.load(dataset)
    X_train = X_train.reshape(-1, 4, 1, l).astype(floatX)
    y_train = np.array(y_train, dtype='int32')
    indice = np.arange(X_train.shape[0])
    np.random.shuffle(indice)
    X_train = X_train[indice]
    y_train = y_train[indice]
    print('X_train shape: {}, y_train shape: {}'.format(X_train.shape, y_train.shape))

    layers = [
            (InputLayer, {'shape': (None, 4, 1, l)}),
            (Conv2DLayer, {'num_filters': 64, 'filter_size': (1, 4)}),
            (Conv2DLayer, {'num_filters': 64, 'filter_size': (1, 3)}),
            (Conv2DLayer, {'num_filters': 64, 'filter_size': (1, 3)}),
            (MaxPool2DLayer, {'pool_size': (1, 2)}),
            (Conv2DLayer, {'num_filters': 64, 'filter_size': (1, 2)}),
            (Conv2DLayer, {'num_filters': 64, 'filter_size': (1, 2)}),
            (Conv2DLayer, {'num_filters': 64, 'filter_size': (1, 2)}),
            (MaxPool2DLayer, {'pool_size': (1, 2)}),
            (DenseLayer, {'num_units': 64}),
            (DropoutLayer, {}),
            (DenseLayer, {'num_units': 64}),
            (DenseLayer, {'num_units': 2, 'nonlinearity': softmax})]

    lr = theano.shared(np.float32(1e-4))

    net = NeuralNet(
            layers=layers,
            max_epochs=100,
            update=adam,
            update_learning_rate=lr,
            train_split=TrainSplit(eval_size=0.1),
            on_epoch_finished=[
                AdjustVariable(lr, target=1e-8, half_life=20)],
            verbose=4)

    if resume != None:
        net.load_params_from(resume)

    net.fit(X_train, y_train)

    net.save_params_to('./models/net_params.pkl')
Beispiel #6
0
class EmotionClassifier:
    def __init__(self,
                 face_size=192,
                 epochs=100,
                 learning_rate=theano.shared(np.cast['float32'](0.1))):
        self.network = NeuralNet(
            layers=[('input', InputLayer), ('conv1', Conv2DLayer),
                    ('conv2', Conv2DLayer), ('pool1', MaxPool2DLayer),
                    ('conv3', Conv2DLayer), ('conv4', Conv2DLayer),
                    ('pool2', MaxPool2DLayer), ('conv5', Conv2DLayer),
                    ('conv6', Conv2DLayer), ('pool3', MaxPool2DLayer),
                    ('conv7', Conv2DLayer), ('conv8', Conv2DLayer),
                    ('pool4', MaxPool2DLayer), ('hidden1', DenseLayer),
                    ('hidden2', DenseLayer), ('output', DenseLayer)],
            input_shape=(None, 1, face_size, face_size),
            conv1_num_filters=32,
            conv1_filter_size=(3, 3),
            conv1_nonlinearity=lasagne.nonlinearities.rectify,
            conv1_W=lasagne.init.GlorotUniform(),
            conv2_num_filters=32,
            conv2_filter_size=(3, 3),
            conv2_nonlinearity=lasagne.nonlinearities.rectify,
            conv2_W=lasagne.init.GlorotUniform(),
            pool1_pool_size=(2, 2),
            conv3_num_filters=32,
            conv3_filter_size=(3, 3),
            conv3_nonlinearity=lasagne.nonlinearities.rectify,
            conv3_W=lasagne.init.GlorotUniform(),
            conv4_num_filters=32,
            conv4_filter_size=(3, 3),
            conv4_nonlinearity=lasagne.nonlinearities.rectify,
            conv4_W=lasagne.init.GlorotUniform(),
            pool2_pool_size=(2, 2),
            conv5_num_filters=64,
            conv5_filter_size=(3, 3),
            conv5_nonlinearity=lasagne.nonlinearities.rectify,
            conv5_W=lasagne.init.GlorotUniform(),
            conv6_num_filters=32,
            conv6_filter_size=(3, 3),
            conv6_nonlinearity=lasagne.nonlinearities.rectify,
            conv6_W=lasagne.init.GlorotUniform(),
            pool3_pool_size=(2, 2),
            conv7_num_filters=32,
            conv7_filter_size=(3, 3),
            conv7_nonlinearity=lasagne.nonlinearities.rectify,
            conv7_W=lasagne.init.GlorotUniform(),
            conv8_num_filters=32,
            conv8_filter_size=(3, 3),
            conv8_nonlinearity=lasagne.nonlinearities.rectify,
            conv8_W=lasagne.init.GlorotUniform(),
            pool4_pool_size=(2, 2),
            hidden1_num_units=4096,
            hidden1_nonlinearity=lasagne.nonlinearities.rectify,
            hidden2_num_units=2048,
            output_nonlinearity=lasagne.nonlinearities.softmax,
            output_num_units=8,
            regression=False,
            update=adadelta,
            # update_momentum=theano.shared(np.cast['float32'](0.9)),
            # on_epoch_finished=[
            #     EarlyStopping(patience=20)
            #     AdjustVariable('update_learning_rate', start=learning_start, stop=learning_end),
            #     AdjustVariable('update_momentum', start=0.9, stop=0.999),
            # ],
            # batch_iterator_train=ShufflingBatchIteratorMixin,
            # batch_iterator_train=BatchIterator(251, shuffle=True),
            max_epochs=epochs,
            verbose=2)

    def train(self, x_train, y_train, epoch=0):
        """
        Fits training data to the Convolutional Neural Network
        :param epoch: number of epochs
        :param x_train: Training x values
        :param y_train: Training y values
        """
        if epoch == 0:
            self.network.fit(x_train, y_train)
        else:
            self.network.fit(x_train, y_train, epoch)

    def predict(self, image):
        return self.network.predict(image)

    def save_network_state(self, paramsname="params.npz"):
        self.network.save_params_to(paramsname)

    def load_network_state(self, paramsname="params.npz"):
        self.network.load_params_from(paramsname)
Beispiel #7
0
normal_stack = create_stack(N)

print("Made stack!")

for k in range(0, 1000):
    saved_accuracy = 10011.0
    data = np.array(normal_stack + random.sample(coords, N))
    val = np.append(np.zeros(N), np.ones(N))
    data, val = shuffle(data, val)
    for i in range(0, int(EPOCHS)):
        nn.fit(data, val)
        cur_accuracy = nn.train_history_[-1]['valid_loss']
        if cur_accuracy - 0.004 > saved_accuracy:
            print("Test Loss Jump! Loading previous network!")
            with suppress_stdout():
                nn.load_params_from("cachedgooglenn2.params")
        else:
            nn.save_params_to('cachedgooglenn2.params')
            saved_accuracy = cur_accuracy
        nn.update_learning_rate *= DECAY

    normal_stack = update_stack(normal_stack, int(K*N), nn)

    print("Data Report: K={3:.2f}, Prob Before={0}, Prob After={1}, Overlap={2}".format(proba_before, proba_after, overlap, K))
    K += KGROWTH
    EPOCHS *= EGROWTH
    for r in range(len(nn.train_history_)):
        nn.train_history_[r]['train_loss'] = 10011.0

nn.save_params_to('googlenn2.params')
    verbose=1,
    )
ae.fit(X_train, X_out)
print
###  expect training / val error of about 0.087 with these parameters
###  if your GPU not fast enough, reduce the number of filters in the conv/deconv step



import pickle
import sys
sys.setrecursionlimit(10000)

pickle.dump(ae, open('mnist/conv_ae.pkl','w'))
#ae = pickle.load(open('mnist/conv_ae.pkl','r'))
ae.save_params_to('mnist/conv_ae.np')



X_train_pred = ae.predict(X_train).reshape(-1, 28, 28) * sigma + mu
X_pred = np.rint(X_train_pred).astype(int)
X_pred = np.clip(X_pred, a_min = 0, a_max = 255)
X_pred = X_pred.astype('uint8')
print X_pred.shape , X.shape



###  show random inputs / outputs side by side

def get_picture_array(X, index):
    array = X[index].reshape(28,28)
Beispiel #9
0
def main():
    data = load_av_letters('data/allData_mouthROIs.mat')

    # create the necessary variable mappings
    data_matrix = data['dataMatrix']
    data_matrix_len = data_matrix.shape[0]
    targets_vec = data['targetsVec']
    vid_len_vec = data['videoLengthVec']
    iter_vec = data['iterVec']

    indexes = create_split_index(data_matrix_len, vid_len_vec, iter_vec)

    # split the data
    train_data = data_matrix[indexes == True]
    train_targets = targets_vec[indexes == True]
    test_data = data_matrix[indexes == False]
    test_targets = targets_vec[indexes == False]

    idx = [i for i, elem in enumerate(test_targets) if elem == 20]

    print(train_data.shape)
    print(test_data.shape)
    print(sum([train_data.shape[0], test_data.shape[0]]))

    # resize the input data to 40 x 30
    train_data_resized = resize_images(train_data).astype(np.float32)

    # normalize the inputs [0 - 1]
    train_data_resized = normalize_input(train_data_resized, centralize=True)

    test_data_resized = resize_images(test_data).astype(np.float32)
    test_data_resized = normalize_input(test_data_resized, centralize=True)

    dic = {}
    dic['trainDataResized'] = train_data_resized
    dic['testDataResized'] = test_data_resized

    """second experiment: overcomplete sigmoid encoder/decoder, squared loss"""
    encode_size = 2500
    sigma = 0.5

    # to get tied weights in the encoder/decoder, create this shared weightMatrix
    # 1200 x 2000
    w1, layer1 = build_encoder_layers(1200, 2500, sigma)

    ae1 = NeuralNet(
        layers=layer1,
        max_epochs=50,
        objective_loss_function=squared_error,
        update=adadelta,
        regression=True,
        verbose=1
    )

    load = True
    save = False
    if load:
        print('[LOAD] layer 1...')
        ae1.load_params_from('layer1.dat')
    else:
        print('[TRAIN] layer 1...')
        ae1.fit(train_data_resized, train_data_resized)

    # save params
    if save:
        print('[SAVE] layer 1...')
        ae1.save_params_to('layer1.dat')

    train_encoded1 = ae1.get_output('encoder', train_data_resized)  # 12293 x 2000

    w2, layer2 = build_encoder_layers(2500, 1250)
    ae2 = NeuralNet(
        layers=layer2,
        max_epochs=50,
        objective_loss_function=squared_error,
        update=adadelta,
        regression=True,
        verbose=1
    )

    load2 = True
    if load2:
        print('[LOAD] layer 2...')
        ae2.load_params_from('layer2.dat')
    else:
        print('[TRAIN] layer 2...')
        ae2.fit(train_encoded1, train_encoded1)

    save2 = False
    if save2:
        print('[SAVE] layer 2...')
        ae2.save_params_to('layer2.dat')

    train_encoded2 = ae2.get_output('encoder', train_encoded1)  # 12293 x 1250

    w3, layer3 = build_encoder_layers(1250, 600)
    ae3 = NeuralNet(
        layers=layer3,
        max_epochs=100,
        objective_loss_function=squared_error,
        update=adadelta,
        regression=True,
        verbose=1
    )

    load3 = True
    if load3:
        print('[LOAD] layer 3...')
        ae3.load_params_from('layer3.dat')
    else:
        ae3.fit(train_encoded2, train_encoded2)

    save3 = False
    if save3:
        print('[SAVE] layer 3...')
        ae3.save_params_to('layer3.dat')

    train_encoded3 = ae3.get_output('encoder', train_encoded2)  # 12293 x 1250

    w4, layer4 = build_bottleneck_layer(600, 100)
    ae4 = NeuralNet(
        layers=layer4,
        max_epochs=100,
        objective_loss_function=squared_error,
        update=adadelta,
        regression=True,
        verbose=1
    )

    load4 = False
    if load4:
        print('[LOAD] layer 4...')
        ae4.load_params_from('layer4.dat')
    else:
        print('[TRAIN] layer 4...')
        ae4.fit(train_encoded3, train_encoded3)

    save4 = True
    if save4:
        print('[SAVE] layer 4...')
        ae4.save_params_to('layer4.dat')

    test_enc1 = ae1.get_output('encoder', test_data_resized)
    test_enc2 = ae2.get_output('encoder', test_enc1)
    test_enc3 = ae3.get_output('encoder', test_enc2)
    test_enc4 = ae4.get_output('encoder', test_enc3)

    decoder4 = create_decoder(100, 600, w4.T)
    decoder4.initialize()
    decoder3 = create_decoder(600, 1250, w3.T)
    decoder3.initialize()
    decoder2 = create_decoder(1250, 2500, w2.T)
    decoder2.initialize()  # initialize the net
    decoder1 = create_decoder(2500, 1200, w1.T)
    decoder1.initialize()

    test_dec3 = decoder4.predict(test_enc4)
    test_dec2 = decoder3.predict(test_dec3)
    test_dec1 = decoder2.predict(test_dec2)
    X_pred = decoder1.predict(test_dec1)

    # plot_loss(ae3)
    # plot_loss(ae2)
    # plot_loss(ae1)
    tile_raster_images(X_pred[4625:4650, :], (30, 40), (5, 5), tile_spacing=(1, 1))
    plt.title('reconstructed')
    tile_raster_images(test_data_resized[4625:4650, :], (30, 40), (5, 5), tile_spacing=(1, 1))
    plt.title('original')
    plt.show()

    """
    accuracy = correct/float(len(y_test))
    print 'Correctly predicted: %f\n\n'%accuracy

    #confusion matrix to visualize the predictions
    cm = confusion_matrix(preds,y_test)
    plt.matshow(cm)
    plt.title('Confusion matrix')
    plt.colorbar()

    #plt.clim(0) to have the lower range of 0 (else darkblue will be minimum value)
    #plt.clim(0)
    plt.ylabel('Predicted label')
    plt.xlabel('True label')

    #save the weights
    net1.save_params_to('../data/saved_moedel')

    #visualize the weights and the confusion matrix
    #visualize.plot_conv_weights(net1.layers_['conv2d2'])
    plt.show()

    #show the "shifted" distribution of the specific feature
    cancer = []
    noncancer = []
    for point,result in zip(X_test,y_test):
        if result == 1:
            cancer.append(point[0][0][3])
            #plt.imshow(point[0], interpolation='nearest')
            #plt.title('Cancer')
        else:
            noncancer.append(point[0][0][3])
else:
    cnn_train = time.time();
    # training a new model
    for epoch in range(Epochs):
        # for every epoch
        for batch in patches_extract_all(Train):
            # for every batch
            inputs, targets = batch;
            # data augmentation
            inputs, targets = data_aug(inputs, targets);
            # run cnn.fit for 1 iteration
            cnn_fit = time.time();
            cnn.fit(inputs, targets.reshape((-1, 1 * 32 * 32)));
        # print 'fitting cnn took: ', time.time()-cnn_fit, 'sec.';
        # for every 10 epoch, print testing accuracy
        '''if epoch % 10 == 0:
			for batch in patches_extract_all(Test):
				inputs, targets = batch;
				predicts = cnn.predict(inputs);
				T = targets.reshape((-1, 1 * 32 * 32)).flatten().astype(np.int32);
				P = (predicts.flatten() > 0.5).astype(np.int32);
				print("======================= {:.4f} =======================".format(accuracy_score(T, P)));'''
    print 'training cnn took: ', time.time() - cnn_train, 'sec.';
    # save the trained model
    cnn.save_params_to('./data_cache/cnn_' + CNNCode + '-' + TrainCode + '.pkl');

# save predicted shadow mask on the test set
cnn_test = time.time();
cnn_pred_mask(cnn, Test);
print 'testing cnn took: ', time.time() - cnn_test, 'sec.';
Beispiel #12
0
        output_num_units=len(z),  # 30 target values
        #dropout1_p=0.1,
        #dropout2_p=0.1,
        # optimization method:
        update=nesterov_momentum,
        update_learning_rate=0.001,
        update_momentum=0.3,

        regression=False,  # flag to indicate we're dealing with regression problem
        max_epochs=1000,  # we want to train this many epochs
        verbose=1,
        )

    net1.fit(X, y)

    with open('net1.pickle', 'wb') as f:
        pickle.dump(net1, f, -1)

    net1.save_params_to("net1_params.pkl")

    train_loss = np.array([i["train_loss"] for i in net1.train_history_])
    valid_loss = np.array([i["valid_loss"] for i in net1.train_history_])
    pyplot.plot(train_loss, linewidth=3, label="train")
    pyplot.plot(valid_loss, linewidth=3, label="valid")
    pyplot.grid()
    pyplot.legend()
    pyplot.xlabel("epoch")
    pyplot.ylabel("loss")
    #pyplot.ylim(1e-3, 1e-2)
    pyplot.yscale("log")
    pyplot.show()

"""Loading data and training Lasagne network using nolearn"""

trainVal2 = trainVal2
print trainImg2.shape

print "Ratio: " + str(1.0 - float(sum(trainVal2)) / float(len(trainVal2)))

best_accuracy = 0.0
print "Training Classifier: 80/20 split"
for i in [1, 2, 3, 4, 6, 8, 10, 40, 100, 250]:
    saved_accuracy = 0.0
    print "Size: " + str(i*2000)
    for epoch in range(0, 25):
        nn = nn.fit(trainImg2[0:2000*i], trainVal2[0:2000*i])
        cur_accuracy = nn.train_history_[-1]['valid_accuracy']
        best_accuracy = max(cur_accuracy, best_accuracy)
        #print "Current Accuracy: " + str(cur_accuracy)
        #print "Saved Accuracy: " + str(saved_accuracy)
        if cur_accuracy + 0.04 < saved_accuracy or cur_accuracy + 0.12 < best_accuracy:
            print "Accuracy Drop! Loading previous network!"
            nn.load_params_from("cachednn.params")
        else:
            nn.save_params_to('cachednn.params')
            saved_accuracy = cur_accuracy

nn.save_params_to('nn_stage2.params')

#pickle.dump(nn, open( "nn_stage2.pkl", "wb" ))
Beispiel #14
0
    def createCSAE(input_height, input_width, X_train, X_out):

        X_train *= np.random.binomial(1, 1-dropout_percent, size=X_train.shape)

        cnn = NeuralNet(layers=[
            ('input', layers.InputLayer),
            ('conv1', layers.Conv2DLayer),
            ('conv11', layers.Conv2DLayer),
            ('conv12', layers.Conv2DLayer),
            ('pool1', layers.MaxPool2DLayer),
            ('conv2', layers.Conv2DLayer),
            ('conv21', layers.Conv2DLayer),
            ('conv22', layers.Conv2DLayer),
            ('pool2', layers.MaxPool2DLayer),
            ('conv3', layers.Conv2DLayer),
            ('conv31', layers.Conv2DLayer),
            ('conv32', layers.Conv2DLayer),
            ('unpool1', Unpool2DLayer),
            ('conv4', layers.Conv2DLayer),
            ('conv41', layers.Conv2DLayer),
            ('conv42', layers.Conv2DLayer),
            ('unpool2', Unpool2DLayer),
            ('conv5', layers.Conv2DLayer),
            ('conv51', layers.Conv2DLayer),
            ('conv52', layers.Conv2DLayer),
            ('conv6', layers.Conv2DLayer),
            ('output_layer', ReshapeLayer),
        ],

            input_shape=(None, 1, input_width, input_height),
            # Layer current size - 1x300x140

            conv1_num_filters=layers_size[0], conv1_filter_size=filter_1, conv1_nonlinearity=activation,
            # conv1_border_mode="same",
            conv1_pad="same",
            conv11_num_filters=layers_size[0], conv11_filter_size=filter_1, conv11_nonlinearity=activation,
            # conv11_border_mode="same",
            conv11_pad="same",
            conv12_num_filters=layers_size[0], conv12_filter_size=filter_1, conv12_nonlinearity=activation,
            # conv12_border_mode="same",
            conv12_pad="same",

            pool1_pool_size=(2, 2),

            conv2_num_filters=layers_size[1], conv2_filter_size=filter_2, conv2_nonlinearity=activation,
            # conv2_border_mode="same",
            conv2_pad="same",
            conv21_num_filters=layers_size[1], conv21_filter_size=filter_2, conv21_nonlinearity=activation,
            # conv21_border_mode="same",
            conv21_pad="same",
            conv22_num_filters=layers_size[1], conv22_filter_size=filter_2, conv22_nonlinearity=activation,
            # conv22_border_mode="same",
            conv22_pad="same",

            pool2_pool_size=(2, 2),

            conv3_num_filters=layers_size[2], conv3_filter_size=filter_3, conv3_nonlinearity=activation,
            # conv3_border_mode="same",
            conv3_pad="same",
            conv31_num_filters=layers_size[2], conv31_filter_size=filter_3, conv31_nonlinearity=activation,
            # conv31_border_mode="same",
            conv31_pad="same",
            conv32_num_filters=1, conv32_filter_size=filter_3, conv32_nonlinearity=activation,
            # conv32_border_mode="same",
            conv32_pad="same",

            unpool1_ds=(2, 2),

            conv4_num_filters=layers_size[3], conv4_filter_size=filter_4, conv4_nonlinearity=activation,
            # conv4_border_mode="same",
            conv4_pad="same",
            conv41_num_filters=layers_size[3], conv41_filter_size=filter_4, conv41_nonlinearity=activation,
            # conv41_border_mode="same",
            conv41_pad="same",
            conv42_num_filters=layers_size[3], conv42_filter_size=filter_4, conv42_nonlinearity=activation,
            # conv42_border_mode="same",
            conv42_pad="same",

            unpool2_ds=(2, 2),

            conv5_num_filters=layers_size[4], conv5_filter_size=filter_5, conv5_nonlinearity=activation,
            # conv5_border_mode="same",
            conv5_pad="same",
            conv51_num_filters=layers_size[4], conv51_filter_size=filter_5, conv51_nonlinearity=activation,
            # conv51_border_mode="same",
            conv51_pad="same",
            conv52_num_filters=layers_size[4], conv52_filter_size=filter_5, conv52_nonlinearity=activation,
            # conv52_border_mode="same",
            conv52_pad="same",

            conv6_num_filters=1, conv6_filter_size=filter_6, conv6_nonlinearity=last_layer_activation,
            # conv6_border_mode="same",
            conv6_pad="same",

            output_layer_shape=(([0], -1)),

            update_learning_rate=learning_rate,
            update_momentum=update_momentum,
            update=nesterov_momentum,
            train_split=TrainSplit(eval_size=train_valid_split),
            batch_iterator_train=FlipBatchIterator(batch_size=batch_size) if flip_batch else BatchIterator(batch_size=batch_size),
            regression=True,
            max_epochs=epochs,
            verbose=1,
            hiddenLayer_to_output=-11)

        cnn.fit(X_train, X_out)

        try:
            pickle.dump(cnn, open(folder_path + CONV_AE_PKL, 'w'))
            # cnn = pickle.load(open(folder_path + CONV_AE_PKL,'r'))
            # cnn.save_weights_to(folder_path + CONV_AE_NP)
            cnn.save_params_to(folder_path + CONV_AE_PARAMS_PKL)
        except:
            print ("Could not pickle cnn")

        X_pred = cnn.predict(X_train).reshape(-1, input_height, input_width)  # * sigma + mu
        # # X_pred = np.rint(X_pred).astype(int)
        # # X_pred = np.clip(X_pred, a_min=0, a_max=255)
        # # X_pred = X_pred.astype('uint8')
        #
        # try:
        #     trian_last_hiddenLayer = cnn.output_hiddenLayer(X_train)
        #     # test_last_hiddenLayer = cnn.output_hiddenLayer(test_x)
        #     pickle.dump(trian_last_hiddenLayer, open(folder_path + 'encode.pkl', 'w'))
        # except:
        #     print "Could not save encoded images"

        print ("Saving some images....")
        for i in range(10):
            index = np.random.randint(X_train.shape[0])
            print (index)

            def get_picture_array(X, index):
                array = np.rint(X[index] * 256).astype(np.int).reshape(input_height, input_width)
                array = np.clip(array, a_min=0, a_max=255)
                return array.repeat(4, axis=0).repeat(4, axis=1).astype(np.uint8())

            original_image = Image.fromarray(get_picture_array(X_out, index))
            # original_image.save(folder_path + 'original' + str(index) + '.png', format="PNG")
            #
            # array = np.rint(trian_last_hiddenLayer[index] * 256).astype(np.int).reshape(input_height/2, input_width/2)
            # array = np.clip(array, a_min=0, a_max=255)
            # encode_image = Image.fromarray(array.repeat(4, axis=0).repeat(4, axis=1).astype(np.uint8()))
            # encode_image.save(folder_path + 'encode' + str(index) + '.png', format="PNG")

            new_size = (original_image.size[0] * 3, original_image.size[1])
            new_im = Image.new('L', new_size)
            new_im.paste(original_image, (0, 0))
            pred_image = Image.fromarray(get_picture_array(X_pred, index))
            # pred_image.save(folder_path + 'pred' + str(index) + '.png', format="PNG")
            new_im.paste(pred_image, (original_image.size[0], 0))

            noise_image = Image.fromarray(get_picture_array(X_train, index))
            new_im.paste(noise_image, (original_image.size[0]*2, 0))
            new_im.save(folder_path+'origin_prediction_noise-'+str(index)+'.png', format="PNG")

            # diff = ImageChops.difference(original_image, pred_image)
            # diff = diff.convert('L')
            # diff.save(folder_path + 'diff' + str(index) + '.png', format="PNG")

            # plt.imshow(new_im)
            # new_size = (original_image.size[0] * 2, original_image.size[1])
            # new_im = Image.new('L', new_size)
            # new_im.paste(original_image, (0, 0))
            # pred_image = Image.fromarray(get_picture_array(X_train, index))
            # # pred_image.save(folder_path + 'noisyInput' + str(index) + '.png', format="PNG")
            # new_im.paste(pred_image, (original_image.size[0], 0))
            # new_im.save(folder_path+'origin_VS_noise-'+str(index)+'.png', format="PNG")
            # plt.imshow(new_im)

        return cnn
                 'flip_filters':False,
                 'W':layer_w_b['conv5'][0],
                 'b':layer_w_b['conv5'][1]}),
    (PoolLayer, {'name':'pool5', 'pool_size': 3, 'stride':3, 'ignore_border':False}), 
    (DenseLayer, {'name':'fc6',
                  'num_units': 4096, 
                  'W': layer_w_b['fc6'][0],
                  'b': layer_w_b['fc6'][1] }),
    (DropoutLayer, {'name': 'drop6', 'p': 0.5 }),
    (DenseLayer, {'name':'fc7',
                  'num_units': 4096, 
                  'W': layer_w_b['fc7'][0],
                  'b': layer_w_b['fc7'][1] })
]

net0 = NeuralNet(
    layers=layers0,
    update=nesterov_momentum,
    update_learning_rate=0.01,
    update_momentum=0.9,
  #  regression=True,  # flag to indicate we're dealing with regression problem
  #  max_epochs=400,  # we want to train this many epochs
    verbose=1,
)

#initialize nolearn net
net0.initialize()

#save weights and biases to the file for future use
net0.save_params_to('nolearn_with_w_b.pkl')
        ('hidden', layers.DenseLayer),
        ('output', layers.DenseLayer),
        ],
    input_shape = (None, 1, 20, 20),
    conv_num_filters = 32, conv_filter_size = (3, 3), 
    pool_pool_size = (2, 2),
	hidden_num_units = 50,
    output_num_units = 2, output_nonlinearity = softmax,

    update_learning_rate=0.01,
    update_momentum = 0.9,

    regression = False,
    max_epochs = 60,
    verbose = 1,
    )

net.fit(train_x, train_y)	
net.save_params_to(CNN_Weights) 

train_loss = np.array([i["train_loss"] for i in net.train_history_])
valid_loss = np.array([i["valid_loss"] for i in net.train_history_])
pyplot.plot(train_loss, linewidth=3, label="train")
pyplot.plot(valid_loss, linewidth=3, label="valid")
pyplot.grid()
pyplot.legend()
pyplot.xlabel("epoch")
pyplot.ylabel("loss")
pyplot.ylim(1e-3, 1.5e-1)
pyplot.yscale("log")
pyplot.show()
Beispiel #17
0
        'W': layer_w_b['fc6'][0],
        'b': layer_w_b['fc6'][1]
    }),
    (DropoutLayer, {
        'name': 'drop6',
        'p': 0.5
    }),
    (DenseLayer, {
        'name': 'fc7',
        'num_units': 4096,
        'W': layer_w_b['fc7'][0],
        'b': layer_w_b['fc7'][1]
    })
]

net0 = NeuralNet(
    layers=layers0,
    update=nesterov_momentum,
    update_learning_rate=0.01,
    update_momentum=0.9,
    #  regression=True,  # flag to indicate we're dealing with regression problem
    #  max_epochs=400,  # we want to train this many epochs
    verbose=1,
)

#initialize nolearn net
net0.initialize()

#save weights and biases to the file for future use
net0.save_params_to('nolearn_with_w_b.pkl')
Beispiel #18
0
def train_tupac(params_dict):
    global lookup, proba_before, proba_after, overlap

    ### CONSTANTS ###

    SIZE = params_dict['size']
    PATCH_SIZE = params_dict['patch_size']
    PATCH_GAP = int(PATCH_SIZE / 2)
    RADIUS = params_dict['radius']
    normalization = params_dict['normalization']

    net_name = params_dict['net_name']

    print("PATCH_SIZE: ", PATCH_SIZE)
    print("Network name: ", net_name)

    N = params_dict['N']
    MN = params_dict['MN']
    K = params_dict['K']
    EPOCHS = params_dict['epochs']
    DECAY = params_dict['decay']
    KGROWTH = params_dict['kgrowth']
    EGROWTH = params_dict['egrowth']

    VALID = 4000

    def inbounds(x, y):
        return x < SIZE - PATCH_SIZE and x > PATCH_SIZE and y < SIZE - PATCH_SIZE and y > PATCH_SIZE

    def totuple(a):
        try:
            return tuple(totuple(i) for i in a)
        except TypeError:
            return a

    def tolist(a):
        try:
            return list(totuple(i) for i in a)
        except TypeError:
            return a

    ### TODO 1: READ ALL DATA ###
    ## Final result: coords contains indexed coords of all mitotic cells ##

    print("\n\nData Reading.")

    img = []
    img_aux = []
    coords = []

    total_coords = []

    cnt = 0

    print("\nReading in original image files:")
    for imgfile in glob.iglob("data/train_tupac/*.jpg"):
        print("\n" + imgfile, end="")
        annotfile = imgfile[:-3] + "csv"
        img_vals = plt.imread(imgfile)

        if normalization:
            cntr = Controller(img)
            img_norm, _, __, __ = macenko(cntr)
            img.append(img_norm)
        else:
            img.append(img_vals)

        csvReader = csv.reader(open(annotfile, 'rb'))
        for row in csvReader:
            minx, miny, maxx, maxy = (SIZE, SIZE, 0, 0)
            random_coords = []
            for i in range(0, len(row) / 2):
                xv, yv = (int(row[2 * i]), int(row[2 * i + 1]))
                if xv > PATCH_SIZE / 2 + 1 and yv > PATCH_SIZE / 2 + 1 and xv < SIZE - PATCH_SIZE / 2 - 1 and yv < SIZE - PATCH_SIZE / 2 - 1:
                    coords.append((yv, xv, cnt))
                    total_coords.append((yv, xv, cnt))

        cnt += 1

    print("\n")
    print('Num images: ', len(img))
    print(len(coords))

    print('not synthesizing image through reflection')

    def get_patches(coords, patchsize=PATCH_SIZE):
        patches = np.zeros((len(coords), patchsize, patchsize, 3))
        i = 0
        for (x, y, img_num) in coords:
            #print x, y
            #print (x - patchsize/2), (x + patchsize/2 + 1), (y - patchsize/2), (y + patchsize/2 + 1)
            patches[i] = img[img_num,
                             (x - patchsize / 2):(x + patchsize / 2 + 1),
                             (y - patchsize / 2):(y + patchsize / 2 + 1), :]
            patches[i] = np.divide(patches[i], 255.0)
            i += 1
        return patches

    ### TODO 2: CREATE AND DESIGN CNN ####
    ## Final result: nn contains desired CNN ##

    print("\n\nCreating and Designing CNN.")

    def roc_robust(y_true, y_proba):
        if sum(y_true) == 0 or sum(y_true) == len(y_true):
            return 0.0
        else:
            return roc_auc_score(y_true, y_proba)

    print("Building Image Perturbation Models/Callbacks:")

    train_iterator_mixins = [
        RandomFlipBatchIteratorMixin,
        AffineTransformBatchIteratorMixin,
        #MeanSubtractBatchiteratorMixin
    ]
    TrainIterator = make_iterator('TrainIterator', train_iterator_mixins)

    test_iterator_mixins = [
        RandomFlipBatchIteratorMixin,
        #MeanSubtractBatchiteratorMixin
    ]
    TestIterator = make_iterator('TestIterator', test_iterator_mixins)

    mean_value = np.mean(np.mean(np.mean(img)))

    train_iterator_kwargs = {
        'batch_size': 20,
        'affine_p': 0.5,
        'affine_scale_choices': np.linspace(start=0.85, stop=1.6, num=10),
        'flip_horizontal_p': 0.5,
        'flip_vertical_p': 0.5,
        'affine_translation_choices': np.arange(-5, 6, 1),
        'affine_rotation_choices': np.linspace(start=-30.0, stop=30.0, num=20),
        #'mean': mean_value,
    }
    train_iterator_tmp = TrainIterator(**train_iterator_kwargs)

    test_iterator_kwargs = {
        'batch_size': 20,
        'flip_horizontal_p': 0.5,
        'flip_vertical_p': 0.5,
        #'mean': mean_value,
    }
    test_iterator_tmp = TestIterator(**test_iterator_kwargs)

    def color_transform(image):
        if random.uniform(0.0, 1.0) < 0.15:
            image[0] = np.multiply(image[0], random.uniform(0.95, 1.05))
        if random.uniform(0.0, 1.0) < 0.15:
            image[1] = np.multiply(image[1], random.uniform(0.95, 1.05))
        if random.uniform(0.0, 1.0) < 0.15:
            image[2] = np.multiply(image[2], random.uniform(0.95, 1.05))
        return np.clip(image, -1.0, 1.0).astype(np.float32)

    radius = PATCH_GAP
    kernel = np.zeros((2 * radius + 1, 2 * radius + 1))
    y, x = np.ogrid[-radius:radius + 1, -radius:radius + 1]
    mask = x**2 + y**2 >= radius**2

    class CustomBatchIterator(BatchIterator):
        def __init__(self, batch_size, built_iterator):
            super(CustomBatchIterator, self).__init__(batch_size=batch_size)
            self.iter = built_iterator

        def transform(self, Xb, yb):
            Xb = get_patches(Xb)
            Xb = Xb.astype(np.float32).swapaxes(1, 3)
            for i in range(0, len(yb)):
                Xb[i] = color_transform(Xb[i])
                for c in range(0, 3):
                    Xb[i, c][mask] = 0.0
            yb = yb.astype(np.uint8)
            Xb, yb = self.iter.transform(Xb, yb)
            #for i in range(0, len(yb)):
            #    plt.imsave("img" + str(yb[i]) + "num" + str(i) + ".png", Xb[i].swapaxes(0, 2))
            return Xb, yb

    train_iterator = CustomBatchIterator(batch_size=20,
                                         built_iterator=train_iterator_tmp)
    test_iterator = CustomBatchIterator(batch_size=20,
                                        built_iterator=test_iterator_tmp)

    # Model Specifications
    net = phf.build_GoogLeNet(PATCH_SIZE, PATCH_SIZE)

    ### TODO 3: DEFINE METHODS TO WORK WITH NORMAL_STACKS ###
    ## Final result: update_stack(stack, times) ###

    proba_before = 0.0
    proba_after = 0.0
    overlap = 0.0

    def prob(coord, net):
        patch = get_patches([coord]).swapaxes(1, 3).astype(np.float32)
        patch2 = patch.swapaxes(2, 3)
        saved_iter = net.batch_iterator_test
        net.batch_iterator_test = test_iterator_tmp
        prob = net.predict_proba(patch)[0, 1]
        prob2 = net.predict_proba(patch2)[0, 1]
        net.batch_iterator_test = saved_iter
        return (prob + prob2) / 2.0

    def create_stack(length):
        global lookup
        normal_stack = []
        while len(normal_stack) < length:
            triple = (random.randint(PATCH_SIZE / 2,
                                     SIZE - PATCH_SIZE / 2 - 1),
                      random.randint(PATCH_SIZE / 2,
                                     SIZE - PATCH_SIZE / 2 - 1),
                      random.randint(-len(img_aux),
                                     len(img) - 1))
            if triple not in lookup:
                normal_stack.append(triple)
                lookup.add(triple)
        return normal_stack

    def update_stack(normal_stack, iters, net):
        global lookup, proba_before, proba_after, overlap
        init_len = len(normal_stack)
        probs = []
        for i in range(0, len(normal_stack)):
            probs.append(prob(normal_stack[i], net))
        proba_before = np.mean(probs)

        for i in range(0, iters):
            triple = (random.randint(PATCH_SIZE / 2,
                                     SIZE - PATCH_SIZE / 2 - 1),
                      random.randint(PATCH_SIZE / 2,
                                     SIZE - PATCH_SIZE / 2 - 1),
                      random.randint(-len(img_aux),
                                     len(img) - 1))
            if triple not in lookup:
                normal_stack.append(triple)
                probs.append(prob(triple, net))
                lookup.add(triple)

        sort_idx = np.argsort(probs)[::-1]
        normal_stack = np.array(normal_stack)[sort_idx, :]
        normal_stack = normal_stack[0:init_len]
        normal_stack = tolist(normal_stack)

        probs = np.array(probs)
        probs = probs[sort_idx]
        probs = probs[0:init_len]

        proba_after = np.mean(probs)

        overlap = 0.0
        for i in sort_idx[0:init_len]:
            if i < init_len:
                overlap += 1.0

        overlap /= init_len

        return normal_stack

    lookup = set(total_coords)

    coords = shuffle(coords)
    valid_sample_mitosis = coords[0:(VALID / 2)]
    coords = coords[(VALID / 2):(len(coords))]
    valid_sample_normal = create_stack(VALID / 2)

    valid_sample = valid_sample_mitosis + valid_sample_normal

    valid_sample_y = np.append(np.ones(VALID / 2), np.zeros(VALID / 2))

    lookup = set(np.append(total_coords, valid_sample_normal))

    def get_validation(train_X, train_y, net):
        return train_X, valid_sample, train_y, valid_sample_y

    nn = NeuralNet(
        net['softmax'],
        max_epochs=1,
        update=adam,
        update_learning_rate=.0001,  #start with a really low learning rate
        #objective_l2=0.0001,

        # batch iteration params
        batch_iterator_train=train_iterator,
        batch_iterator_test=test_iterator,
        train_split=get_validation,
        verbose=3,
    )

    ### TODO 4: Train network on normal stacks ###
    ## Final result: done! ###

    #print('\nLoading Data from Previous Network')

    #nn.load_params_from("cachedgooglenn2.params")

    print("\n\nTraining Network!")

    normal_stack = create_stack(N)

    print("Made stack!")

    for k in range(0, 1000):
        saved_accuracy = 10011.0
        print("Length of coords:", len(coords), "length of sample", MN)
        data = np.array(normal_stack + random.sample(coords, MN))
        val = np.append(np.zeros(N), np.ones(MN))
        data, val = shuffle(data, val)
        for i in range(0, int(EPOCHS)):
            nn.fit(data, val)
            cur_accuracy = nn.train_history_[-1]['valid_loss']
            if cur_accuracy - 0.004 > saved_accuracy:
                print("Test Loss Jump! Loading previous network!")
                with suppress_stdout():
                    nn.load_params_from("data/" + str(net_name) + ".params")
            else:
                nn.save_params_to("data/" + net_name + ".params")
                saved_accuracy = cur_accuracy
            nn.update_learning_rate *= DECAY

        normal_stack = update_stack(normal_stack, int(K * N), nn)

        print(
            "Data Report: K={3:.2f}, Prob Before={0}, Prob After={1}, Overlap={2}"
            .format(proba_before, proba_after, overlap, K))

        accuracy = nn.train_history_[-1]['valid_accuracy']
        nn.save_params_to('checkpoints/' + str(net_name) + '-checkpoint' +
                          str(k) + '-validacc' + str(accuracy) + '.params')

        K += KGROWTH
        EPOCHS *= EGROWTH
        for r in range(len(nn.train_history_)):
            nn.train_history_[r]['train_loss'] = 10011.0

    nn.save_params_to("data/" + str(net_name) + ".params")
def main():
    # load data set
    fname = 'mnist/mnist.pkl.gz'
    if not os.path.isfile(fname):
        testfile = urllib.URLopener()
        testfile.retrieve("http://deeplearning.net/data/mnist/mnist.pkl.gz", fname)
    f = gzip.open(fname, 'rb')
    train_set, valid_set, test_set = cPickle.load(f)
    f.close()
    X, y = train_set
    X = np.rint(X * 256).astype(np.int).reshape((-1, 1, 28, 28))  # convert to (0,255) int range (we'll do our own scaling)
    mu, sigma = np.mean(X.flatten()), np.std(X.flatten())

    X_train = X.astype(np.float64)
    X_train = (X_train - mu) / sigma
    X_train = X_train.astype(np.float32)

    # we need our target to be 1 dimensional
    X_out = X_train.reshape((X_train.shape[0], -1))

    conv_filters = 32
    deconv_filters = 32
    filter_size = 7
    epochs = 20
    encode_size = 40
    layerParam= [
        (layers.InputLayer, {'name': 'input_layer', 'shape': (None, 1, 28, 28)}),
        (layers.Conv2DLayer, {'name': 'conv', 'num_filters': conv_filters, 
            'filter_size': (filter_size, filter_size), 'nonlinearity': None}),
        (layers.MaxPool2DLayer, {'name': 'pool', 'pool_size': (2, 2)}),
        (layers.ReshapeLayer, {'name': 'flatten', 'shape': (([0], -1))}),
        (layers.DenseLayer, {'name': 'encode_layer', 'num_units': encode_size}),
        (layers.DenseLayer, {'name': 'hidden', 
            'num_units': deconv_filters * (28 +filter_size - 1)**2 /4}),
        (layers.ReshapeLayer, {'name': 'unflatten', 
            'shape': (([0], deconv_filters, (28 + filter_size - 1) / 2, (28 + filter_size - 1) / 2 ))}),
        (Unpool2DLayer, {'name': 'unpool', 'ds': (2, 2)}),
        (layers.Conv2DLayer, {'name': 'deconv', 'num_filters': 1,
            'filter_size': (filter_size, filter_size), 'nonlinearity': None}),
        (layers.ReshapeLayer, {'name': 'output_layer', 'shape': (([0], -1))})
    ]

    ae = NeuralNet(
        layers=layerParam,
        update_learning_rate = 0.01,
        update_momentum = 0.975,
        batch_iterator_train=FlipBatchIterator(batch_size=128),
        regression=True,
        max_epochs= epochs,
        verbose=1,
        )
    ae.fit(X_train, X_out)
    print '---------------train end'
    print
    ###  expect training / val error of about 0.087 with these parameters
    ###  if your GPU not fast enough, reduce the number of filters in the conv/deconv step

    # handle the default limitation of pickle
    sys.setrecursionlimit(10000)
    pickle.dump(ae, open('mnist/conv_ae.pkl','w'))
    # ae = pickle.load(open('mnist/conv_ae.pkl','r'))
    ae.save_params_to('mnist/conv_ae.np')


    X_train_pred = ae.predict(X_train).reshape(-1, 28, 28) * sigma + mu
    X_pred = np.rint(X_train_pred).astype(int)
    X_pred = np.clip(X_pred, a_min = 0, a_max = 255)
    X_pred = X_pred.astype('uint8')
    print X_pred.shape , X.shape


    ###  show random inputs / outputs side by side

    for i in range(0, 10):
        get_random_images(X, X_pred, i)

    return

    ## we find the encode layer from our ae, and use it to define an encoding function

    encode_layer_index = map(lambda pair : pair[0], ae.layers).index('encode_layer')
    print '----------encode_layer_index:', encode_layer_index
    encode_layer = ae.get_all_layers()[encode_layer_index]

    def get_output_from_nn(last_layer, X):
        indices = np.arange(128, X.shape[0], 128)
        sys.stdout.flush()

        # not splitting into batches can cause a memory error
        X_batches = np.split(X, indices)
        out = []
        for count, X_batch in enumerate(X_batches):
            out.append(layers.get_output(last_layer, X_batch).eval())
            sys.stdout.flush()
        return np.vstack(out)

    def encode_input(X):
        return get_output_from_nn(encode_layer, X)
    X_encoded = encode_input(X_train)

    next_layer = ae.get_all_layers()[encode_layer_index + 1]
    final_layer = ae.get_all_layers()[-1]
    new_layer = layers.InputLayer(shape = (None, encode_layer.num_units))

    # N.B after we do this, we won't be able to use the original autoencoder , as the layers are broken up
    next_layer.input_layer = new_layer

    def decode_encoded_input(X):
        return get_output_from_nn(final_layer, X)

    X_decoded = decode_encoded_input(X_encoded) * sigma + mu

    X_decoded = np.rint(X_decoded ).astype(int)
    X_decoded = np.clip(X_decoded, a_min = 0, a_max = 255)
    X_decoded  = X_decoded.astype('uint8')
    print X_decoded.shape

    ### check it worked :

    for i in range(10):
        pic_array = get_picture_array(X_decoded, np.random.randint(len(X_decoded)))
        image = Image.fromarray(pic_array)
        image.save('data/t_' + str(i) + '.png', format="PNG")


    return
Beispiel #20
0
def main():
    data = load_av_letters('data/allData_mouthROIs.mat')

    # create the necessary variable mappings
    data_matrix = data['dataMatrix']
    data_matrix_len = data_matrix.shape[0]
    targets_vec = data['targetsVec']
    vid_len_vec = data['videoLengthVec']
    iter_vec = data['iterVec']

    indexes = create_split_index(data_matrix_len, vid_len_vec, iter_vec)

    # split the data
    train_data = data_matrix[indexes == True]
    train_targets = targets_vec[indexes == True]
    test_data = data_matrix[indexes == False]
    test_targets = targets_vec[indexes == False]

    idx = [i for i, elem in enumerate(test_targets) if elem == 20]

    print(train_data.shape)
    print(test_data.shape)
    print(sum([train_data.shape[0], test_data.shape[0]]))

    # resize the input data to 40 x 30
    train_data_resized = resize_images(train_data).astype(np.float32)

    # normalize the inputs [0 - 1]
    train_data_resized = normalize_input(train_data_resized, centralize=True)

    test_data_resized = resize_images(test_data).astype(np.float32)
    test_data_resized = normalize_input(test_data_resized, centralize=True)

    dic = {}
    dic['trainDataResized'] = train_data_resized
    dic['testDataResized'] = test_data_resized
    """second experiment: overcomplete sigmoid encoder/decoder, squared loss"""
    encode_size = 2500
    sigma = 0.5

    # to get tied weights in the encoder/decoder, create this shared weightMatrix
    # 1200 x 2000
    w1, layer1 = build_encoder_layers(1200, 2500, sigma)

    ae1 = NeuralNet(layers=layer1,
                    max_epochs=50,
                    objective_loss_function=squared_error,
                    update=adadelta,
                    regression=True,
                    verbose=1)

    load = True
    save = False
    if load:
        print('[LOAD] layer 1...')
        ae1.load_params_from('layer1.dat')
    else:
        print('[TRAIN] layer 1...')
        ae1.fit(train_data_resized, train_data_resized)

    # save params
    if save:
        print('[SAVE] layer 1...')
        ae1.save_params_to('layer1.dat')

    train_encoded1 = ae1.get_output('encoder',
                                    train_data_resized)  # 12293 x 2000

    w2, layer2 = build_encoder_layers(2500, 1250)
    ae2 = NeuralNet(layers=layer2,
                    max_epochs=50,
                    objective_loss_function=squared_error,
                    update=adadelta,
                    regression=True,
                    verbose=1)

    load2 = True
    if load2:
        print('[LOAD] layer 2...')
        ae2.load_params_from('layer2.dat')
    else:
        print('[TRAIN] layer 2...')
        ae2.fit(train_encoded1, train_encoded1)

    save2 = False
    if save2:
        print('[SAVE] layer 2...')
        ae2.save_params_to('layer2.dat')

    train_encoded2 = ae2.get_output('encoder', train_encoded1)  # 12293 x 1250

    w3, layer3 = build_encoder_layers(1250, 600)
    ae3 = NeuralNet(layers=layer3,
                    max_epochs=100,
                    objective_loss_function=squared_error,
                    update=adadelta,
                    regression=True,
                    verbose=1)

    load3 = True
    if load3:
        print('[LOAD] layer 3...')
        ae3.load_params_from('layer3.dat')
    else:
        ae3.fit(train_encoded2, train_encoded2)

    save3 = False
    if save3:
        print('[SAVE] layer 3...')
        ae3.save_params_to('layer3.dat')

    train_encoded3 = ae3.get_output('encoder', train_encoded2)  # 12293 x 1250

    w4, layer4 = build_bottleneck_layer(600, 100)
    ae4 = NeuralNet(layers=layer4,
                    max_epochs=100,
                    objective_loss_function=squared_error,
                    update=adadelta,
                    regression=True,
                    verbose=1)

    load4 = False
    if load4:
        print('[LOAD] layer 4...')
        ae4.load_params_from('layer4.dat')
    else:
        print('[TRAIN] layer 4...')
        ae4.fit(train_encoded3, train_encoded3)

    save4 = True
    if save4:
        print('[SAVE] layer 4...')
        ae4.save_params_to('layer4.dat')

    test_enc1 = ae1.get_output('encoder', test_data_resized)
    test_enc2 = ae2.get_output('encoder', test_enc1)
    test_enc3 = ae3.get_output('encoder', test_enc2)
    test_enc4 = ae4.get_output('encoder', test_enc3)

    decoder4 = create_decoder(100, 600, w4.T)
    decoder4.initialize()
    decoder3 = create_decoder(600, 1250, w3.T)
    decoder3.initialize()
    decoder2 = create_decoder(1250, 2500, w2.T)
    decoder2.initialize()  # initialize the net
    decoder1 = create_decoder(2500, 1200, w1.T)
    decoder1.initialize()

    test_dec3 = decoder4.predict(test_enc4)
    test_dec2 = decoder3.predict(test_dec3)
    test_dec1 = decoder2.predict(test_dec2)
    X_pred = decoder1.predict(test_dec1)

    # plot_loss(ae3)
    # plot_loss(ae2)
    # plot_loss(ae1)
    tile_raster_images(X_pred[4625:4650, :], (30, 40), (5, 5),
                       tile_spacing=(1, 1))
    plt.title('reconstructed')
    tile_raster_images(test_data_resized[4625:4650, :], (30, 40), (5, 5),
                       tile_spacing=(1, 1))
    plt.title('original')
    plt.show()
    """
Beispiel #21
0
X = X_compressed[()]
y = y_compressed[()]
print('Loaded Data')

# Model Specifications
net = phf.build_GoogLeNet(img_width, img_height)
values = pickle.load(open('\models\\blvc_googlenet.pkl', 'rb'))['param values'][:-2]
lasagne.layers.set_all_param_values(net['pool5/7x7_s1'], values)

# Shift image array to BGR for pretrained caffe models
X = X[:, ::-1, :, :]


net0 = NeuralNet(
    net['softmax'],
    max_epochs=300,
    update=adam,
    update_learning_rate=.00001, #start with a really low learning rate
    #objective_l2=0.0001, 
    
    batch_iterator_train = BatchIterator(batch_size=32),
    batch_iterator_test = BatchIterator(batch_size=32),

    train_split=TrainSplit(eval_size=0.2),
    verbose=3,
)


net0.fit(X, y)
net0.save_params_to('ModelWeights')
        # for every epoch
        for batch in patches_extract_all(Train):
            # for every batch
            inputs, targets = batch
            # data augmentation
            inputs, targets = data_aug(inputs, targets)
            # run cnn.fit for 1 iteration
            #cnn_fit = time.time();
            cnn.fit(inputs, targets.reshape((-1, 1 * 32 * 32)))
        # print 'fitting cnn took: ', time.time()-cnn_fit, 'sec.';
        # for every 10 epoch, print testing accuracy
        '''if epoch % 10 == 0:
			for batch in patches_extract_all(Test):
				inputs, targets = batch;
				predicts = cnn.predict(inputs);
				T = targets.reshape((-1, 1 * 32 * 32)).flatten().astype(np.int32);
				P = (predicts.flatten() > 0.5).astype(np.int32);
				print("======================= {:.4f} =======================".format(accuracy_score(T, P)));'''
    print('training cnn took: ',
          time.time() - cnn_train, 'sec.')
    # save the trained model
    cnn.save_params_to('./data_cache/combined_dataset/cnn_' + CNNCode + '-' +
                       TrainCode + '.pkl')

# save predicted shadow mask on the test set
cnn_test = time.time()
print('testing cnn started at: ', cnn_test, '...')
cnn_pred_mask(cnn, Test)
print('testing cnn took: ',
      time.time() - cnn_test, 'sec.')
def main():
  pickle_file = '/mnt/Data/uniformsample_04_1k_mirror_rot_128x128_norm.cpickle'
  labels_csvfile = '/mnt/Data/trainLabels.csv'

  train_data, train_labels, test_data, test_labels = make_train_and_test_sets(pickle_file, labels_csvfile)

  train_data = train_data.reshape(-1, 3, IMAGE_SIZE, IMAGE_SIZE)
  train_data = train_data.astype('float32')
  test_data = test_data.reshape(-1, 3, imageWidth, imageWidth)
  test_data = test_data.astype('float32')

  numFeatures = train_data[1].size
  numTrainExamples = train_data.shape[0]
  print 'Features = %d' %(numFeatures)
  print 'Train set = %d' %(numTrainExamples)

  print "training data shape: ", train_data.shape
  print "training labels shape: ", train_labels.shape

  layers0 = [
             (InputLayer, {'shape': (None, X.shape[1], X.shape[2], X.shape[3])}),
           
             (Conv2DLayer, {'num_filters': 32, 'filter_size': 3}),
             (Conv2DLayer, {'num_filters': 32, 'filter_size': 3}),
             (Conv2DLayer, {'num_filters': 32, 'filter_size': 3}),
             (MaxPool2DLayer, {'pool_size': 2}),
           
             (Conv2DLayer, {'num_filters': 64, 'filter_size': 3}),
             (Conv2DLayer, {'num_filters': 64, 'filter_size': 3}),
             (MaxPool2DLayer, {'pool_size': 2}),
           
             (Conv2DLayer, {'num_filters': 128, 'filter_size': 3}),
             (Conv2DLayer, {'num_filters': 128, 'filter_size': 3}),
             (MaxPool2DLayer, {'pool_size': 2}),
           
             (DenseLayer, {'num_units': 600}),
             (DropoutLayer, {}),
             (DenseLayer, {'num_units': 600}),
           
             (DenseLayer, {'num_units': 2, 'nonlinearity': softmax}),
             ]

  def regularization_objective(layers, lambda1=0., lambda2=0., *args, **kwargs):
    ''' from nolearn MNIST CNN tutorial'''
    # default loss
    losses = objective(layers, *args, **kwargs)
    # get the layers' weights, but only those that should be regularized
    # (i.e. not the biases)
    weights = get_all_params(layers[-1], regularizable=True)
    # sum of absolute weights for L1
    sum_abs_weights = sum([abs(w).sum() for w in weights])
    # sum of squared weights for L2
    sum_squared_weights = sum([(w ** 2).sum() for w in weights])
    # add weights to regular loss
    losses += lambda1 * sum_abs_weights + lambda2 * sum_squared_weights
    return losses

  clf = NeuralNet(
                  layers=layers0,
                  max_epochs=5,
                 
                  # optimization method
                  update=nesterov_momentum,
                  update_momentum=0.9,
                  update_learning_rate=0.0002,
                 
                  objective=regularization_objective,
                  objective_lambda2=0.0025,
                 
                  train_split=TrainSplit(eval_size=0.1),
                  verbose=1,
                
                  )

  # load parameters from pickle file to continue training from previous epochs or smaller network
  #clf.load_params_from('params1.pickle')
  #clf.initialize()

  for i in range(100):
    print '******************************  ',i,'  ******************************'

    clf.fit(train_data, train_labels)

    clf.save_params_to('params2.pickle')

    preds = clf.predict(test_data)
    #print sum(preds)
    print "Test data accuracy: ", 1.0*sum(preds==test_labels)/test_labels.shape[0]
def main():
    # load data set
    fname = 'imagenet.pkl'
    train_set = pickle.load(open(fname, 'r'))
    X = train_set[0:1000]
    # X = X.astype(np.int).reshape((-1, 3, 256, 256))  # convert to (0,255) int range (we'll do our own scaling)
    # sigma = np.std(X.flatten())
    # mu = np.mean(X.flatten())

    # print np.shape(X[0])
    # print mu
    # print sigma
    # return
    # # <codecell>
    # X_train = X.astype(np.float64)
    # X_train = (X_train - mu) / sigma
    # X_train = X_train.astype(np.float32)
    X_train = X.astype(np.float32)

    # we need our target to be 1 dimensional
    X_out = X_train.reshape((X_train.shape[0], -1))

    # <codecell>
    conv_filters = 32
    deconv_filters = 32
    filter_sizes = 7
    epochs = 20
    encode_size = 40
    ae = NeuralNet(
        layers=[
            ('input', layers.InputLayer),
            ('conv', layers.Conv2DLayer),
            ('pool', layers.MaxPool2DLayer),
            ('flatten', layers.ReshapeLayer),  # output_dense
            ('encode_layer', layers.DenseLayer),
            ('hidden', layers.DenseLayer),  # output_dense
            ('unflatten', layers.ReshapeLayer),
            ('unpool', Unpool2DLayer),
            ('deconv', layers.Conv2DLayer),
            ('output_layer', layers.ReshapeLayer),
            ],
        input_shape=(None, 3, 256, 256),
        conv_num_filters=conv_filters, conv_filter_size = (filter_sizes, filter_sizes),
        conv_nonlinearity=None,
        pool_pool_size=(2, 2),
        flatten_shape=(([0], -1)), # not sure if necessary?
        encode_layer_num_units = encode_size,
        hidden_num_units= deconv_filters * (256 + filter_sizes - 1) ** 2 / 4,
        unflatten_shape=(([0], deconv_filters, (256 + filter_sizes - 1) / 2, (256 + filter_sizes - 1) / 2 )),
        unpool_ds=(2, 2),
        deconv_num_filters=1, deconv_filter_size = (filter_sizes, filter_sizes),
        deconv_nonlinearity=None,
        output_layer_shape = (([0], -1)),
        update_learning_rate = 0.01,
        update_momentum = 0.975,
        batch_iterator_train=FlipBatchIterator(batch_size=128),
        regression=True,
        max_epochs= epochs,
        verbose=1,
        )
    ae.fit(X_train, X_out)
    print '---------------train end'
    print
    ###  expect training / val error of about 0.087 with these parameters
    ###  if your GPU not fast enough, reduce the number of filters in the conv/deconv step

    # <codecell>


    pickle.dump(ae, open('mnist/my_conv_ae.pkl','w'))
    # ae = pickle.load(open('mnist/my_conv_ae.pkl','r'))
    ae.save_params_to('mnist/my_conv_ae.np')

    # <codecell>

    X_train_pred = ae.predict(X_train).reshape(-1, 256, 256) * sigma + mu
    X_pred = np.rint(X_train_pred).astype(int)
    X_pred = np.clip(X_pred, a_min = 0, a_max = 255)
    X_pred = X_pred.astype('uint8')
    print X_pred.shape , X.shape

    # <codecell>

    ###  show random inputs / outputs side by side

    for i in range(0, 10):
        get_random_images(X, X_pred, i)

    return


    return
Beispiel #25
0
    conv2d3_filter_size=(3, 3),
    conv2d3_nonlinearity=lasagne.nonlinearities.rectify,
    maxpool3_pool_size=(2, 2),

    # conv2d4_num_filters = 16,
    # conv2d4_filter_size = (2,2),
    # conv2d4_nonlinearity = lasagne.nonlinearities.rectify,

    # maxpool4_pool_size = (2,2),
    dropout1_p=0.5,

    # dropout2_p = 0.5,
    dense_num_units=16,
    dense_nonlinearity=lasagne.nonlinearities.rectify,

    # dense2_num_units = 16,
    # dense2_nonlinearity = lasagne.nonlinearities.rectify,
    output_nonlinearity=lasagne.nonlinearities.softmax,
    output_num_units=2,
    update=nesterov_momentum,
    update_learning_rate=0.003,
    update_momentum=0.9,
    max_epochs=1000,
    verbose=1,
)

nn = net1.fit(X_train, y_train)  # Train CNN

net1.save_params_to(
    "/Users/Pedro/PycharmProjects/bidhu/docs/train.txt")  # Save CNN parameters
Beispiel #26
0
class EmotionClassifier:
    def __init__(self,
                 data_directory="/home/nicholai/Documents/Emotion Files/",
                 face_data="../FaceData/landmarks.dat",
                 show_image=False,
                 epochs=10,
                 dropout_1=0.5,
                 dropout_2=0.5):
        self.data_dir = data_directory
        self.picture_dir = self.data_dir + "cohn-kanade-images/"
        self.FACS_dir = self.data_dir + "FACS/"
        self.Emotion_dir = self.data_dir + "Emotion/"
        self.detector = dlib.get_frontal_face_detector()
        self.predictor = dlib.shape_predictor(face_data)
        self.face_sz = 200
        self.extra_face_space = 0
        self.face_sz += self.extra_face_space
        self.width = self.face_sz
        self.height = self.face_sz
        self.show_img = show_image
        self.network = NeuralNet(
            layers=[
                ('input', layers.InputLayer),
                ('conv2d1', layers.Conv2DLayer),
                # ('conv2d2', layers.Conv2DLayer),
                ('maxpool1', layers.MaxPool2DLayer),
                # ('conv2d3', layers.Conv2DLayer),
                ('conv2d4', layers.Conv2DLayer),
                ('maxpool2', layers.MaxPool2DLayer),
                ('dropout1', layers.DropoutLayer),
                ('learningLayer', layers.DenseLayer),
                ('learningLayer1', layers.DenseLayer),
                ('output', layers.DenseLayer),
            ],
            # input layer
            input_shape=(None, 1, self.face_sz, self.face_sz),
            # layer conv2d1
            conv2d1_num_filters=32,
            conv2d1_filter_size=(5, 5),
            conv2d1_nonlinearity=lasagne.nonlinearities.rectify,
            conv2d1_W=lasagne.init.GlorotUniform(),
            # layer conv2d2
            # conv2d2_num_filters=32,
            # conv2d2_filter_size=(5, 5),
            # conv2d2_nonlinearity=lasagne.nonlinearities.rectify,
            # conv2d2_W=lasagne.init.GlorotUniform(),
            # layer maxpool1
            maxpool1_pool_size=(5, 5),
            # layer conv2d3
            # conv2d3_num_filters=32,
            # conv2d3_filter_size=(5, 5),
            # conv2d3_nonlinearity=lasagne.nonlinearities.rectify,
            # conv2d3_W=lasagne.init.GlorotUniform(),
            # layer conv2d4
            conv2d4_num_filters=32,
            conv2d4_filter_size=(5, 5),
            conv2d4_nonlinearity=lasagne.nonlinearities.rectify,
            conv2d4_W=lasagne.init.GlorotUniform(),
            # layer maxpool2
            maxpool2_pool_size=(5, 5),
            # dropout1a
            dropout1_p=dropout_1,
            # dense
            learningLayer_num_units=1024,
            learningLayer_nonlinearity=lasagne.nonlinearities.rectify,
            learningLayer1_num_units=512,
            learningLayer1_nonlinearity=lasagne.nonlinearities.rectify,
            # # dropout2
            # # dropout2_p=dropout_2,
            # # dense1
            # dense1_num_units=256,
            # dense1_nonlinearity=lasagne.nonlinearities.rectify,
            # output
            output_nonlinearity=lasagne.nonlinearities.softmax,
            output_num_units=8,
            # optimization method params
            regression=False,
            update=nesterov_momentum,
            update_learning_rate=theano.shared(np.cast['float32'](0.05)),
            update_momentum=theano.shared(np.cast['float32'](0.9)),
            on_epoch_finished=[
                AdjustVariable('update_learning_rate', start=0.05, stop=0.01),
                AdjustVariable('update_momentum', start=0.9, stop=0.999),
            ],
            max_epochs=epochs,
            verbose=2,
        )
        if self.show_img:
            self.win = dlib.image_window()

    def load_training_set(self):
        """
        Loads the CK+ data-set of images, processes the facial key-points of each face, and returns the emotion codes
        of each participant 0-7 (i.e. 0=neutral, 1=anger, 2=contempt, 3=disgust, 4=fear, 5=happy, 6=sadness, 7=surprise)
        :return: Training X (X_Train) and Y (y_train) Data as well as testing X (X_test) and Y (y_test) Data
        """
        x_train = np.zeros((382, self.width, self.height), dtype='float32')
        y_train = np.zeros(382, dtype='int32')
        i = 0
        for root, name, files in os.walk(self.picture_dir):
            files = [file for file in files if file.endswith(".png")]
            if len(files) == 0:
                continue
            fs = sorted(files, key=lambda x: x[:-4])
            emotion = self.get_emotion(fs[-1])
            # sampleImg = self.get_face_image(os.path.join(root, fs[0]))
            # print(sampleImg.shape)
            if emotion != -1:
                if i % 7 == 0:
                    # self.show_faces(os.path.join(root, fs[0]))
                    # self.show_faces(os.path.join(root, fs[-1]))
                    x_train[i] = self.get_face_image(os.path.join(
                        root, fs[0]))  # add the key-points of a neutral face
                    y_train[i] = 0  # emotion code of a neutral face
                    i += 1
                x_train[i] = self.get_face_image(os.path.join(root, fs[-1]))
                y_train[i] = emotion
                i += 1
            print(i)
        return x_train.astype(np.float32).reshape(-1, 1, self.face_sz,
                                                  self.face_sz), y_train

    def load_keypoint_training_set(self):
        x_train = np.zeros((655, 2, 68), dtype='int16')
        y_train = np.zeros(655, dtype='int16')
        i = 0
        for root, name, files in os.walk(self.picture_dir):
            files = [file for file in files if file.endswith(".png")]
            if len(files) == 0:
                continue
            fs = sorted(files, key=lambda x: x[:-4])
            emotion = self.get_emotion(fs[-1])
            # sampleImg = self.get_face_image(os.path.join(root, fs[0]))
            # print(sampleImg.shape)
            if emotion != -1:
                x_train[i] = self.get_keypoints(os.path.join(
                    root, fs[0]))  # add the key-points of a neutral face
                y_train[i] = 0  # emotion code of a neutral face
                i += 1
                x_train[i] = self.get_keypoints(os.path.join(
                    root, fs[-1]))  # add the key-points of an expressed face
                y_train[i] = emotion
                i += 1
            print(i)
        return x_train.astype(np.float32).reshape(-1, 1, 2, 68), y_train

    def get_keypoints(self, image_file):
        """
        Returns the key-point data from the facial recognition process
        :param image_file: a full file path to an image containing a face
        :return: a landmarks list
        """
        img = imageio.imread(image_file)
        details = self.detector(img, 1)
        landmarks = np.zeros((2, 68), dtype='int16')
        if self.show_img:
            self.win.set_image(img)
        for i, j in enumerate(details):
            shape = self.predictor(img, j)
            if self.show_img:
                self.win.add_overlay(shape)
            for k in range(0, 68):
                part = shape.part(k)
                landmarks[0][k] = part.x
                landmarks[1][k] = part.y
        if self.show_img:
            self.win.add_overlay(details)
        return landmarks

    def get_face_image(self, filename):
        img = imageio.imread(filename)
        details = self.detector(img, 1)
        for i, j in enumerate(details):
            shape = self.predictor(img, j)
            for k in range(0, 68):
                part = shape.part(k)
                img[part.y][part.x] = 255
        img = resize(img[j.top():j.bottom(),
                         j.left():j.right()],
                     output_shape=(self.face_sz, self.face_sz),
                     preserve_range=True)
        if len(img.shape) == 3:
            img = rgb2gray(img)
        img = np.asarray(img, dtype='float32') / 255
        return img

    def show_faces(self, filename):
        img = imageio.imread(filename)
        details = self.detector(img, 1)
        for i, j in enumerate(details):
            shape = self.predictor(img, j)
            for k in range(0, 68):
                part = shape.part(k)
                img[part.y][part.x] = 255
        if self.show_img:
            self.win.set_image(img[j.top():j.bottom(), j.left():j.right()])

    def get_full_image(self, filename):
        img = imageio.imread(filename, True)
        img = np.asarray(img, dtype='float32') / 255
        return img[0:self.width, 0:self.height]

    def get_facs(self, filename):
        """
        Basically Take a filename that is formatted like so 'S114_005_00000022.png'
        and turn that into a directory structure which contains a FACS text file
        named 'S114_005_00000022.txt' in ./FACS/S114/005/
        :param filename: Should be the name of the file (only) of the CK+ test picture
        :return: Returns the FACS codes and Emotion code as FACS, Emotion
        """
        fn = filename[:-4].split("_")  # Strip filename
        filepath = os.path.join(self.FACS_dir, fn[0], fn[1],
                                filename[:-4] + "_emotion.txt")
        # Craft the File path of the FACS emotion associated with the emotion changes
        lines = [line.split('\n')
                 for line in open(filepath)]  # Read the FACS codes from file
        return lines

    def get_emotion(self, filename):
        fn = filename[:-4].split("_")
        filepath = os.path.join(self.Emotion_dir, fn[0], fn[1],
                                filename[:-4] + "_emotion.txt")
        # Craft the File path of the FACS emotion associated with the emotion changes
        if os.path.isfile(filepath):
            line = [
                int(float(lines.strip(' ').strip('\n')))
                for lines in open(filepath)
            ]
            return line[0]
        return -1

    def train(self, x_train, y_train, epoch=0):
        """
        Fits training data to the Convolutional Neural Network
        :param epoch: number of epochs
        :param x_train: Training x values
        :param y_train: Training y values
        """
        if epoch == 0:
            self.network.fit(x_train, y_train)
        else:
            self.network.fit(x_train, y_train, epoch)

    def predict(self, image):
        return self.network.predict(image)

    def save_network_state(self, paramsname="params.npz"):
        self.network.save_params_to(paramsname)

    def load_network_state(self, paramsname="params.npz"):
        self.network.load_params_from(paramsname)
Beispiel #27
0
    # conv2d4_num_filters = 16,
    # conv2d4_filter_size = (2,2),
    # conv2d4_nonlinearity = lasagne.nonlinearities.rectify,

    # maxpool4_pool_size = (2,2),

    dropout1_p=0.5,

    # dropout2_p = 0.5,

    dense_num_units=16,
    dense_nonlinearity=lasagne.nonlinearities.rectify,

    # dense2_num_units = 16,
    # dense2_nonlinearity = lasagne.nonlinearities.rectify,

    output_nonlinearity=lasagne.nonlinearities.softmax,
    output_num_units=2,

    update=nesterov_momentum,
    update_learning_rate=0.003,
    update_momentum=0.9,
    max_epochs=1000,
    verbose=1,
)

nn = net1.fit(X_train, y_train)  # Train CNN

net1.save_params_to("/Users/Pedro/PycharmProjects/bidhu/docs/train.txt")  # Save CNN parameters
Beispiel #28
0
    input_shape=(None, num_features),
    dense_num_units=64,
    narrow_num_units=48,
    denseReverse1_num_units=64,
    denseReverse2_num_units=128,
    output_num_units=128,

    #input_nonlinearity = None, #nonlinearities.sigmoid,
    #dense_nonlinearity = nonlinearities.tanh,
    narrow_nonlinearity=nonlinearities.softplus,
    #denseReverse1_nonlinearity = nonlinearities.tanh,
    denseReverse2_nonlinearity=nonlinearities.softplus,
    output_nonlinearity=nonlinearities.linear,  #nonlinearities.softmax,

    #dropout0_p=0.1,
    dropout1_p=0.01,
    dropout2_p=0.001,
    regression=True,
    verbose=1)

ae.initialize()
PrintLayerInfo()(ae)

maybe_this_is_a_history = ae.fit(Z, Z)

#learned_parameters = ae.get_all_params_values()
#np.save("task4/learned_parameter.npy", learned_parameters)

#SaveWeights(path='task4/koebi_train_history_AE')(ae, maybe_this_is_a_history)
ae.save_params_to('task4/koebi_train_history_AE2')
    print "X_training shape must match y_training shape"
print "Generate X_test and y_test"
n_input = 11
print "X_test..."

print "Multi Layer Perceptron..."
#Build layer for MLP
l_in = ls.layers.InputLayer(shape=(None,10),input_var=None)
l_hidden = ls.layers.DenseLayer(l_in,num_units=15,nonlinearity=ls.nonlinearities.sigmoid)
network = l_out = ls.layers.DenseLayer(l_hidden,num_units=1)
print "Neural network initialize"
#Init Neural net
net1 = NeuralNet(
    layers=network,
    # optimization method:
    update=nesterov_momentum,
    update_learning_rate=0.001,
    update_momentum=0.9,
    regression=True,  # flag to indicate we're dealing with regression problem
    max_epochs=400,  # we want to train this many epochs
    verbose=1,
)
#
print "Training time!!!!!....."
net1.fit(X_training,y_training)
net1.save_params_to("saveNeuralNetwork.tdn")
print "Score rate = "
print net1.score(n_sample2,n_test2)
print net1.predict(n_sample2)[0:2]

Beispiel #30
0
f = h5py.File("PosterData.hdf5", "r")
X_compressed = f['X']
y_compressed = f['y']
X = X_compressed[()]
y = y_compressed[()]
print('Loaded Data')

# Model Specifications
net = phf.build_GoogLeNet(img_width, img_height)
values = pickle.load(open('\models\\blvc_googlenet.pkl',
                          'rb'))['param values'][:-2]
lasagne.layers.set_all_param_values(net['pool5/7x7_s1'], values)

# Shift image array to BGR for pretrained caffe models
X = X[:, ::-1, :, :]

net0 = NeuralNet(
    net['softmax'],
    max_epochs=300,
    update=adam,
    update_learning_rate=.00001,  #start with a really low learning rate
    #objective_l2=0.0001,
    batch_iterator_train=BatchIterator(batch_size=32),
    batch_iterator_test=BatchIterator(batch_size=32),
    train_split=TrainSplit(eval_size=0.2),
    verbose=3,
)

net0.fit(X, y)
net0.save_params_to('ModelWeights')