Beispiel #1
0
def init_cnn():
    net = NeuralNet(
        layers=[
            # input layer
            (layers.InputLayer, {
                'shape': (None, bastdm5.classification.settings.CHANNELS,
                          bastdm5.classification.settings.MINI_SEGMENT_LENGTH,
                          bastdm5.classification.settings.MEL_DATA_POINTS)
            }),

            # convolution layers 1
            (layers.Conv2DLayer, {
                'num_filters': 32,
                'filter_size': (8, 1)
            }),
            (layers.MaxPool2DLayer, {
                'pool_size': (4, 1),
                'stride': (2, 1)
            }),

            # convolution layers 2
            (layers.Conv2DLayer, {
                'num_filters': 32,
                'filter_size': (8, 1)
            }),
            (layers.MaxPool2DLayer, {
                'pool_size': (4, 1),
                'stride': (2, 1)
            }),

            # dense layer
            (layers.DenseLayer, {
                'num_units': 100
            }),
            (layers.DropoutLayer, {}),
            (layers.DenseLayer, {
                'num_units': 50
            }),

            # output layer
            (layers.DenseLayer, {
                'num_units': 6,
                'nonlinearity': nonlinearities.softmax
            })
        ],

        # learning rate parameters
        update_learning_rate=0.001,
        update_momentum=0.9,
        regression=False,
        max_epochs=999,
        verbose=1,
    )

    net.batch_iterator_test = TestSegmentBatchIterator(
        batch_size=bastdm5.classification.settings.MINI_BATCH_SIZE)
    y_mapping = utils.load_from_pickle(CNN_Y_MAPPING_PATH)
    net.load_params_from(CNN_WEIGHTS_PATH)

    return net, y_mapping
Beispiel #2
0
	def __call__(self, nn, train_history):
	    current_valid = train_history[-1]['valid_loss']
	    current_epoch = train_history[-1]['epoch']
	    if current_valid < self.best_valid:
	        self.best_valid = current_valid
	        self.best_valid_epoch = current_epoch
	        self.best_weights = nn.get_all_params_values()
	    elif self.best_valid_epoch + self.patience < current_epoch:
	        print("Early stopping.")
	        print("Best valid loss was {:.6f} at epoch {}.".format(
	            self.best_valid, self.best_valid_epoch))
	        nn.load_params_from(self.best_weights)
	        raise StopIteration()
Beispiel #3
0
class network(object):
    def __init__(self,X_train, Y_train):
        #self.__hidden=0

        self.__hidden=int(math.ceil((2*(X_train.shape[1]+ 1))/3))
        self.net= NeuralNet(
            layers=[
                ('input', layers.InputLayer),
                ('hidden', layers.DenseLayer),
                ('output', layers.DenseLayer)
            ],
            input_shape=( None, X_train.shape[1] ),
            hidden_num_units=self.__hidden,
            #hidden_nonlinearity=nonlinearities.tanh,
            output_nonlinearity=None,
            batch_iterator_train=BatchIterator(batch_size=256),
            output_num_units=1,

            on_epoch_finished=[EarlyStopping(patience=50)],
            update=momentum,
            update_learning_rate=theano.shared(np.float32(0.03)),
            update_momentum=theano.shared(np.float32(0.8)),
            regression=True,
            max_epochs=1000,
            verbose=1,
        )

        self.net.fit(X_train,Y_train)

    def predict(self,X):
        return self.net.predict(X)

    def showMetrics(self):
        train_loss = np.array([i["train_loss"] for i in self.net.train_history_])
        valid_loss = np.array([i["valid_loss"] for i in self.net.train_history_])
        pyplot.plot(train_loss, linewidth=3, label="training")
        pyplot.plot(valid_loss, linewidth=3, label="validation")
        pyplot.grid()
        pyplot.legend()
        pyplot.xlabel("epoch")
        pyplot.ylabel("loss")
        # pyplot.ylim(1e-3, 1e-2)
        pyplot.yscale("log")
        pyplot.show()

    def saveNet(self,fname):
        self.net.save_params_to(fname)

    def loadNet(self,fname):
        self.net.load_params_from(fname)
Beispiel #4
0
class network(object):
    def __init__(self, X_train, Y_train):
        #self.__hidden=0

        self.__hidden = int(math.ceil((2 * (X_train.shape[1] + 1)) / 3))
        self.net = NeuralNet(
            layers=[('input', layers.InputLayer),
                    ('hidden', layers.DenseLayer),
                    ('output', layers.DenseLayer)],
            input_shape=(None, X_train.shape[1]),
            hidden_num_units=self.__hidden,
            #hidden_nonlinearity=nonlinearities.tanh,
            output_nonlinearity=None,
            batch_iterator_train=BatchIterator(batch_size=256),
            output_num_units=1,
            on_epoch_finished=[EarlyStopping(patience=50)],
            update=momentum,
            update_learning_rate=theano.shared(np.float32(0.03)),
            update_momentum=theano.shared(np.float32(0.8)),
            regression=True,
            max_epochs=1000,
            verbose=1,
        )

        self.net.fit(X_train, Y_train)

    def predict(self, X):
        return self.net.predict(X)

    def showMetrics(self):
        train_loss = np.array(
            [i["train_loss"] for i in self.net.train_history_])
        valid_loss = np.array(
            [i["valid_loss"] for i in self.net.train_history_])
        pyplot.plot(train_loss, linewidth=3, label="training")
        pyplot.plot(valid_loss, linewidth=3, label="validation")
        pyplot.grid()
        pyplot.legend()
        pyplot.xlabel("epoch")
        pyplot.ylabel("loss")
        # pyplot.ylim(1e-3, 1e-2)
        pyplot.yscale("log")
        pyplot.show()

    def saveNet(self, fname):
        self.net.save_params_to(fname)

    def loadNet(self, fname):
        self.net.load_params_from(fname)
Beispiel #5
0
def loadNet2(netName):
    net = NeuralNet(
        layers=[
            ('input', layers.InputLayer),
            ('conv1', layers.Conv2DLayer),
            ('pool1', layers.MaxPool2DLayer),
            ('dropout1', layers.DropoutLayer),  # !
            ('conv2', layers.Conv2DLayer),
            ('pool2', layers.MaxPool2DLayer),
            ('dropout2', layers.DropoutLayer),  # !
            ('conv3', layers.Conv2DLayer),
            ('pool3', layers.MaxPool2DLayer),
            ('dropout3', layers.DropoutLayer),  # !
            ('hidden4', layers.DenseLayer),
            ('dropout4', layers.DropoutLayer),  # !
            ('hidden5', layers.DenseLayer),
            ('output', layers.DenseLayer),
        ],
        input_shape=(None, 1, 96, 96),
        conv1_num_filters=32, conv1_filter_size=(3, 3), pool1_pool_size=(2, 2),
        dropout1_p=0.1,  # !
        conv2_num_filters=64, conv2_filter_size=(2, 2), pool2_pool_size=(2, 2),
        dropout2_p=0.2,  # !
        conv3_num_filters=128, conv3_filter_size=(2, 2), pool3_pool_size=(2, 2),
        dropout3_p=0.3,  # !
        hidden4_num_units=1000,  # !
        dropout4_p=0.5,
        hidden5_num_units=1000,  # !
        output_num_units=30, output_nonlinearity=None,

        update_learning_rate=theano.shared(float32(0.03)),
        update_momentum=theano.shared(float32(0.9)),

        regression=True,
        batch_iterator_train=FlipBatchIterator(batch_size=128),
        on_epoch_finished=[
            AdjustVariable('update_learning_rate', start=0.03, stop=0.0001),
            AdjustVariable('update_momentum', start=0.9, stop=0.999),
            EarlyStopping(patience=200),
            backupCNN,
        ],
        max_epochs=10000,
        verbose=1,
    )

    net.load_params_from(netName)

    return net
Beispiel #6
0
def main(resume=None):
    l = 300
    dataset = './data/ubiquitous_train.hkl'
    print('Loading dataset {}...'.format(dataset))
    X_train, y_train = hkl.load(dataset)
    X_train = X_train.reshape(-1, 4, 1, l).astype(floatX)
    y_train = np.array(y_train, dtype='int32')
    indice = np.arange(X_train.shape[0])
    np.random.shuffle(indice)
    X_train = X_train[indice]
    y_train = y_train[indice]
    print('X_train shape: {}, y_train shape: {}'.format(X_train.shape, y_train.shape))

    layers = [
            (InputLayer, {'shape': (None, 4, 1, l)}),
            (Conv2DLayer, {'num_filters': 64, 'filter_size': (1, 4)}),
            (Conv2DLayer, {'num_filters': 64, 'filter_size': (1, 3)}),
            (Conv2DLayer, {'num_filters': 64, 'filter_size': (1, 3)}),
            (MaxPool2DLayer, {'pool_size': (1, 2)}),
            (Conv2DLayer, {'num_filters': 64, 'filter_size': (1, 2)}),
            (Conv2DLayer, {'num_filters': 64, 'filter_size': (1, 2)}),
            (Conv2DLayer, {'num_filters': 64, 'filter_size': (1, 2)}),
            (MaxPool2DLayer, {'pool_size': (1, 2)}),
            (DenseLayer, {'num_units': 64}),
            (DropoutLayer, {}),
            (DenseLayer, {'num_units': 64}),
            (DenseLayer, {'num_units': 2, 'nonlinearity': softmax})]

    lr = theano.shared(np.float32(1e-4))

    net = NeuralNet(
            layers=layers,
            max_epochs=100,
            update=adam,
            update_learning_rate=lr,
            train_split=TrainSplit(eval_size=0.1),
            on_epoch_finished=[
                AdjustVariable(lr, target=1e-8, half_life=20)],
            verbose=4)

    if resume != None:
        net.load_params_from(resume)

    net.fit(X_train, y_train)

    net.save_params_to('./models/net_params.pkl')
Beispiel #7
0
def load_finetuned_dbn(path):
    """
    Load a fine tuned Deep Belief Net from file
    :param path: path to deep belief net parameters
    :return: deep belief net
    """
    dbn = NeuralNet(layers=[('input', las.layers.InputLayer),
                            ('l1', las.layers.DenseLayer),
                            ('l2', las.layers.DenseLayer),
                            ('l3', las.layers.DenseLayer),
                            ('l4', las.layers.DenseLayer),
                            ('l5', las.layers.DenseLayer),
                            ('l6', las.layers.DenseLayer),
                            ('l7', las.layers.DenseLayer),
                            ('output', las.layers.DenseLayer)],
                    input_shape=(None, 1200),
                    l1_num_units=2000,
                    l1_nonlinearity=sigmoid,
                    l2_num_units=1000,
                    l2_nonlinearity=sigmoid,
                    l3_num_units=500,
                    l3_nonlinearity=sigmoid,
                    l4_num_units=50,
                    l4_nonlinearity=linear,
                    l5_num_units=500,
                    l5_nonlinearity=sigmoid,
                    l6_num_units=1000,
                    l6_nonlinearity=sigmoid,
                    l7_num_units=2000,
                    l7_nonlinearity=sigmoid,
                    output_num_units=1200,
                    output_nonlinearity=linear,
                    update=nesterov_momentum,
                    update_learning_rate=0.001,
                    update_momentum=0.5,
                    objective_l2=0.005,
                    verbose=1,
                    regression=True)
    with open(path, 'rb') as f:
        pretrained_nn = pickle.load(f)
    if pretrained_nn is not None:
        dbn.load_params_from(path)
    return dbn
class CNN(object):
	__metaclass__ = Singleton
	channels = 3
	image_size = [64,64]
	layers = [ 
		# layer dealing with the input data
		(InputLayer, {'shape': (None, channels, image_size[0], image_size[1])}),
		# first stage of our convolutional layers 
		(Conv2DLayer, {'num_filters': 32, 'filter_size': 9}),
		(Conv2DLayer, {'num_filters': 32, 'filter_size': 5}),
		(MaxPool2DLayer, {'pool_size': 2}),
		# second stage of our convolutional layers
		(Conv2DLayer, {'num_filters': 32, 'filter_size': 5}),
		(Conv2DLayer, {'num_filters': 32, 'filter_size': 3}),
		(MaxPool2DLayer, {'pool_size': 2}),
		# two dense layers with dropout
		(DenseLayer, {'num_units': 256}),
		(DropoutLayer, {}),
		(DenseLayer, {'num_units': 256}),
		# the output layer
		(DenseLayer, {'num_units': 2, 'nonlinearity': softmax}),
	]
	def __init__(self):
		logger = logging.getLogger(__name__)
		logger.info("Initializing neural net...")
		self.net = NeuralNet(layers=self.layers, update_learning_rate=0.0002 )
		self.net.load_params_from("conv_params")
		logger.info("Finished loading parameters")
	
	def resize(self, infile):
		try:
			im = Image.open(infile)
			resized_im = np.array(ImageOps.fit(im, (self.image_size[0], self.image_size[1]), Image.ANTIALIAS), dtype=np.uint8)
			rgb = np.array([resized_im[:,:,0], resized_im[:,:,1], resized_im[:,:,2]])
			return rgb.reshape(1,self.channels,self.image_size[0],self.image_size[1])

		except IOError:
			return "cannot create thumbnail for '%s'" % infile

	def predict(self, X):
		p**n = self.net.predict(X)[0] == 1
		return "true" if p**n else "false"
Beispiel #9
0
def model_train(X_train, y_train, learning_rate=1e-4, epochs=50):
    network = create_network()
    lr = theano.shared(np.float32(learning_rate))
    net = NeuralNet(
        network,
        max_epochs=epochs,
        update=adam,
        update_learning_rate=lr,
        train_split=TrainSplit(eval_size=0.1),
        batch_iterator_train=BatchIterator(batch_size=32),
        batch_iterator_test=BatchIterator(batch_size=64),
        #on_training_started=[LoadBestParam(iteration=val_acc.argmax())],
        on_epoch_finished=[EarlyStopping(patience=5)],
        verbose=1)
    print 'Loading pre-training weights...'
    net.load_params_from(params[val_acc.argmax()])
    print 'Continue to train...'
    net.fit(X_train, y_train)
    print 'Model training finished.'
    return net
Beispiel #10
0
def operate(data):
    # data = [0.0592330098, 0.140761971, 0.0757750273, 0.119381011, 0.0651519895, 0.120247006, 0.0454769731]

    batch_size = len(data)

    thres = 0.4

    net = NeuralNet(
        layers=[('input', layers.InputLayer),
                ('hidden', layers.DenseLayer),
                ('output', layers.DenseLayer),
                ],
        # layer parameters:
        input_shape=(None, batch_size),
        hidden_num_units=batch_size,  # number of units in 'hidden' layer
        hidden_nonlinearity=lasagne.nonlinearities.sigmoid,
        output_nonlinearity=lasagne.nonlinearities.elu,
        output_num_units=batch_size,  # 10 target values for the digits 0, 1, 2, ..., 9

        # optimization method:
        update=nesterov_momentum,
        update_learning_rate=0.1,
        update_momentum=0.9,

        max_epochs=2000,
        verbose=1,

        regression=True,
        objective_loss_function=lasagne.objectives.squared_error
        # custom_score=("validation score", lambda x, y: np.mean(np.abs(x - y)))
    )
    net.load_params_from("/home/loringit/Bulat/neuron/bulik_nn")

    net_answer = net.predict([data])
    result = np.linalg.norm(data - net_answer)
    # return result < thres
    if result < thres:
        return "true"
    else:
        return "false"
Beispiel #11
0
def main(deepL, alignments):
    S, X = make_xy(alignments, deepL['features'])
    h = DictVectorizer(sparse=False)

    X = h.fit_transform(X)
    X = X.astype(floatX)

    # normalize feature matrix
    min_max_scaler = preprocessing.MinMaxScaler()
    X = min_max_scaler.fit_transform(X)

    clf = NeuralNet(
        layers=model(deepL['input_nodes'], deepL['output_nodes']),
        update=nesterov_momentum,
        update_learning_rate=0.01,
        update_momentum=0.9,
        regression=False,
        max_epochs=100,
        verbose=2,
    )

    clf.load_params_from(opt.path + "/model/model.pkl")

    proba = clf.predict_proba(X)

    preds = []
    probs = []
    deepL['Y_rev'].update({10000: "unclassified"})

    for ipx, p in enumerate(proba):
        px = np.argmax(p)
        if p[px] > min_prob[deepL['Y_rev'][px]]:
            preds.append(px)
            probs.append(p[px])
        else:
            preds.append(10000)
            probs.append(0)

    return [[S[ix], deepL['Y_rev'][i], probs[ix]]
            for ix, i in enumerate(preds)]
Beispiel #12
0
def load_finetuned_dbn(path):
    """
    Load a fine tuned Deep Belief Net from file
    :param path: path to deep belief net parameters
    :return: deep belief net
    """
    dbn = NeuralNet(
        layers=[
            ('input', las.layers.InputLayer),
            ('l1', las.layers.DenseLayer),
            ('l2', las.layers.DenseLayer),
            ('l3', las.layers.DenseLayer),
            ('l4', las.layers.DenseLayer),
            ('l5', las.layers.DenseLayer),
            ('l6', las.layers.DenseLayer),
            ('l7', las.layers.DenseLayer),
            ('output', las.layers.DenseLayer)
        ],
        input_shape=(None, 1200),
        l1_num_units=2000, l1_nonlinearity=sigmoid,
        l2_num_units=1000, l2_nonlinearity=sigmoid,
        l3_num_units=500, l3_nonlinearity=sigmoid,
        l4_num_units=50, l4_nonlinearity=linear,
        l5_num_units=500, l5_nonlinearity=sigmoid,
        l6_num_units=1000, l6_nonlinearity=sigmoid,
        l7_num_units=2000, l7_nonlinearity=sigmoid,
        output_num_units=1200, output_nonlinearity=linear,
        update=nesterov_momentum,
        update_learning_rate=0.001,
        update_momentum=0.5,
        objective_l2=0.005,
        verbose=1,
        regression=True
    )
    with open(path, 'rb') as f:
        pretrained_nn = pickle.load(f)
    if pretrained_nn is not None:
        dbn.load_params_from(path)
    return dbn
Beispiel #13
0
def recunstruct_cae(folder_path):
    cnn = NeuralNet()
    cnn.load_params_from(folder_path + CONV_AE_PARAMS_PKL)
    cnn.load_weights_from(folder_path + CONV_AE_NP)
    return cnn
Beispiel #14
0
    total_test_time_points = len(X_test) // NO_TIME_POINTS
    remainder_test_points = len(X_test) % NO_TIME_POINTS

    no_rows = total_test_time_points * NO_TIME_POINTS
    X_test = X_test[0:no_rows, :]

    X_test = X_test.transpose()
    X_test_Samples = np.split(X_test, total_test_time_points, axis=1)
    X_test = np.asarray(X_test_Samples)


###########################################################################
#######get predictions and write to files for series 9 and series 10#######
    print("Testing subject%d...." %(subject))
    params = net.get_all_params_values()
    learned_weights = net.load_params_from(params)
    probabilities = net.predict_proba(X_test)

    sub9 = 'subj{0}_series{1}'.format(subject, 9)
    data_len9 = test_dict[sub9]
    total_time_points9 = data_len9 // NO_TIME_POINTS
    remainder_data9 = data_len9 % NO_TIME_POINTS

    sub10 = 'subj{0}_series{1}'.format(subject, 10)
    data_len10 = test_dict[sub10]
    total_time_points10 = data_len10 // NO_TIME_POINTS
    remainder_data10 = data_len10 % NO_TIME_POINTS

    total_test_points = total_time_points9+total_time_points10

    for i, p in enumerate(probabilities):
Beispiel #15
0
def main():
    data = load_av_letters('data/allData_mouthROIs.mat')

    # create the necessary variable mappings
    data_matrix = data['dataMatrix']
    data_matrix_len = data_matrix.shape[0]
    targets_vec = data['targetsVec']
    vid_len_vec = data['videoLengthVec']
    iter_vec = data['iterVec']

    indexes = create_split_index(data_matrix_len, vid_len_vec, iter_vec)

    # split the data
    train_data = data_matrix[indexes == True]
    train_targets = targets_vec[indexes == True]
    test_data = data_matrix[indexes == False]
    test_targets = targets_vec[indexes == False]

    idx = [i for i, elem in enumerate(test_targets) if elem == 20]

    print(train_data.shape)
    print(test_data.shape)
    print(sum([train_data.shape[0], test_data.shape[0]]))

    # resize the input data to 40 x 30
    train_data_resized = resize_images(train_data).astype(np.float32)

    # normalize the inputs [0 - 1]
    train_data_resized = normalize_input(train_data_resized, centralize=True)

    test_data_resized = resize_images(test_data).astype(np.float32)
    test_data_resized = normalize_input(test_data_resized, centralize=True)

    dic = {}
    dic['trainDataResized'] = train_data_resized
    dic['testDataResized'] = test_data_resized

    """second experiment: overcomplete sigmoid encoder/decoder, squared loss"""
    encode_size = 2500
    sigma = 0.5

    # to get tied weights in the encoder/decoder, create this shared weightMatrix
    # 1200 x 2000
    w1, layer1 = build_encoder_layers(1200, 2500, sigma)

    ae1 = NeuralNet(
        layers=layer1,
        max_epochs=50,
        objective_loss_function=squared_error,
        update=adadelta,
        regression=True,
        verbose=1
    )

    load = True
    save = False
    if load:
        print('[LOAD] layer 1...')
        ae1.load_params_from('layer1.dat')
    else:
        print('[TRAIN] layer 1...')
        ae1.fit(train_data_resized, train_data_resized)

    # save params
    if save:
        print('[SAVE] layer 1...')
        ae1.save_params_to('layer1.dat')

    train_encoded1 = ae1.get_output('encoder', train_data_resized)  # 12293 x 2000

    w2, layer2 = build_encoder_layers(2500, 1250)
    ae2 = NeuralNet(
        layers=layer2,
        max_epochs=50,
        objective_loss_function=squared_error,
        update=adadelta,
        regression=True,
        verbose=1
    )

    load2 = True
    if load2:
        print('[LOAD] layer 2...')
        ae2.load_params_from('layer2.dat')
    else:
        print('[TRAIN] layer 2...')
        ae2.fit(train_encoded1, train_encoded1)

    save2 = False
    if save2:
        print('[SAVE] layer 2...')
        ae2.save_params_to('layer2.dat')

    train_encoded2 = ae2.get_output('encoder', train_encoded1)  # 12293 x 1250

    w3, layer3 = build_encoder_layers(1250, 600)
    ae3 = NeuralNet(
        layers=layer3,
        max_epochs=100,
        objective_loss_function=squared_error,
        update=adadelta,
        regression=True,
        verbose=1
    )

    load3 = True
    if load3:
        print('[LOAD] layer 3...')
        ae3.load_params_from('layer3.dat')
    else:
        ae3.fit(train_encoded2, train_encoded2)

    save3 = False
    if save3:
        print('[SAVE] layer 3...')
        ae3.save_params_to('layer3.dat')

    train_encoded3 = ae3.get_output('encoder', train_encoded2)  # 12293 x 1250

    w4, layer4 = build_bottleneck_layer(600, 100)
    ae4 = NeuralNet(
        layers=layer4,
        max_epochs=100,
        objective_loss_function=squared_error,
        update=adadelta,
        regression=True,
        verbose=1
    )

    load4 = False
    if load4:
        print('[LOAD] layer 4...')
        ae4.load_params_from('layer4.dat')
    else:
        print('[TRAIN] layer 4...')
        ae4.fit(train_encoded3, train_encoded3)

    save4 = True
    if save4:
        print('[SAVE] layer 4...')
        ae4.save_params_to('layer4.dat')

    test_enc1 = ae1.get_output('encoder', test_data_resized)
    test_enc2 = ae2.get_output('encoder', test_enc1)
    test_enc3 = ae3.get_output('encoder', test_enc2)
    test_enc4 = ae4.get_output('encoder', test_enc3)

    decoder4 = create_decoder(100, 600, w4.T)
    decoder4.initialize()
    decoder3 = create_decoder(600, 1250, w3.T)
    decoder3.initialize()
    decoder2 = create_decoder(1250, 2500, w2.T)
    decoder2.initialize()  # initialize the net
    decoder1 = create_decoder(2500, 1200, w1.T)
    decoder1.initialize()

    test_dec3 = decoder4.predict(test_enc4)
    test_dec2 = decoder3.predict(test_dec3)
    test_dec1 = decoder2.predict(test_dec2)
    X_pred = decoder1.predict(test_dec1)

    # plot_loss(ae3)
    # plot_loss(ae2)
    # plot_loss(ae1)
    tile_raster_images(X_pred[4625:4650, :], (30, 40), (5, 5), tile_spacing=(1, 1))
    plt.title('reconstructed')
    tile_raster_images(test_data_resized[4625:4650, :], (30, 40), (5, 5), tile_spacing=(1, 1))
    plt.title('original')
    plt.show()

    """
Beispiel #16
0
	def rodar(self, numerodeimagens):

			np.set_printoptions(threshold=np.nan)
			sourcepath = str(os.getcwd())


			# Allocates space for each new image you want to classify, each line is an image
			X_test = np.zeros((numerodeimagens, 19200), dtype=np.int)

			strfiles =''
			i = 0

			# read the images
			for files in glob.glob(os.path.join(sourcepath, '*.jpeg')):
				X_test[i] = np.asarray(Image.open(files)).reshape(-1)[0:19200]
				strfiles += files + '<br>'
				i += 1



			# Reshape the images to help the CNN execution
			X_test = X_test.reshape((-1, 3, 80, 80))

			# Define the CNN, must be the same CNN saved into your model generated running CNN.py
			net1 = NeuralNet(
				layers=[('input', layers.InputLayer),
						('conv2d1', layers.Conv2DLayer),
						('maxpool1', layers.MaxPool2DLayer),
						('conv2d2', layers.Conv2DLayer),
						('maxpool2', layers.MaxPool2DLayer),
						('conv2d3', layers.Conv2DLayer),
						('maxpool3', layers.MaxPool2DLayer),
						# ('conv2d4', layers.Conv2DLayer),
						# ('maxpool4', layers.MaxPool2DLayer),
						('dropout1', layers.DropoutLayer),
						# s('dropout2', layers.DropoutLayer),
						('dense', layers.DenseLayer),
						# ('dense2', layers.DenseLayer),
						('output', layers.DenseLayer),
						],

				input_shape=(None, 3, 80, 80),

				conv2d1_num_filters=16,
				conv2d1_filter_size=(3, 3),
				conv2d1_nonlinearity=lasagne.nonlinearities.rectify,
				conv2d1_W=lasagne.init.GlorotUniform(),

				maxpool1_pool_size=(2, 2),

				conv2d2_num_filters=16,
				conv2d2_filter_size=(3, 3),
				conv2d2_nonlinearity=lasagne.nonlinearities.rectify,

				maxpool2_pool_size=(2, 2),

				conv2d3_num_filters=16,
				conv2d3_filter_size=(3, 3),
				conv2d3_nonlinearity=lasagne.nonlinearities.rectify,

				maxpool3_pool_size=(2, 2),

				# conv2d4_num_filters = 16,
				# conv2d4_filter_size = (2,2),
				# conv2d4_nonlinearity = lasagne.nonlinearities.rectify,

				# maxpool4_pool_size = (2,2),

				dropout1_p=0.5,

				# dropout2_p = 0.5,

				dense_num_units=16,
				dense_nonlinearity=lasagne.nonlinearities.rectify,

				# dense2_num_units = 16,
				# dense2_nonlinearity = lasagne.nonlinearities.rectify,

				output_nonlinearity=lasagne.nonlinearities.softmax,
				output_num_units=2,

				update=nesterov_momentum,
				update_learning_rate=0.001,
				update_momentum=0.9,
				max_epochs=1000,
				verbose=1,
			)

			net1.load_params_from(os.path.join(sourcepath, "#0#0#0#.txt"))  # Read model



			preds = net1.predict(X_test)  # make predictions


			strfiles = strfiles.replace(str(os.getcwd()), "").replace(".jpeg", '').replace("/", '')


			strfiles = strfiles.split("<br>")


			strpreds = str(preds)


			strpreds = strpreds.replace("1", "yes")
			strpreds = strpreds.replace("0", "no")

			strpreds = strpreds.split(" ")

			for i in range(0, numerodeimagens):
				strpreds[i] += " >> " + strfiles[i]

			strpreds = str(strpreds)

			strpreds = strpreds.replace(",", "<br>")



			strpreds = strpreds.replace("[", "")
			strpreds = strpreds.replace("]", "")
			strpreds = strpreds.replace("'", "")


			return strpreds
Beispiel #17
0
    def flush(self):
        # this flush method is needed for python 3 compatibility.
        # this handles the flush command by doing nothing.
        # you might want to specify some extra behavior here.
        pass


sys.stdout = Logger()

# cnn init
# cnn.load_params_from('./model_trained_on_UCF.pkl');

if os.path.exists('./data_cache/cnn_' + CNNCode + '-' + TrainCode + '.pkl'):
    # check if a trained model already exists
    cnn.load_params_from('./data_cache/cnn_' + CNNCode + '-' + TrainCode + '.pkl');
else:
    cnn_train = time.time();
    # training a new model
    for epoch in range(Epochs):
        # for every epoch
        for batch in patches_extract_all(Train):
            # for every batch
            inputs, targets = batch;
            # data augmentation
            inputs, targets = data_aug(inputs, targets);
            # run cnn.fit for 1 iteration
            cnn_fit = time.time();
            cnn.fit(inputs, targets.reshape((-1, 1 * 32 * 32)));
        # print 'fitting cnn took: ', time.time()-cnn_fit, 'sec.';
        # for every 10 epoch, print testing accuracy
Beispiel #18
0
def create_pretrained_vgg_nn_nolearn():
    '''
    *** This function need only be run once to create and save a nolearn NeuralNet ***
    ***     instance from the origninal lasagne layer weights for the vgg net.     ***
    Create a vgg neural net. Load pretrained weights.
    Pickle the entire net.
    Pickle the mean image.
    Return a nolearn.NeuralNet instance,  mean_image numpy array
    '''
    # define the vgg_s network
    vgg_nn = NeuralNet(
        layers=[
            (InputLayer, {
                'name': 'input',
                'shape': (None, 3, 224, 224)
            }),
            (ConvLayer, {
                'name': 'conv1',
                'num_filters': 96,
                'filter_size': (7, 7),
                'stride': 2,
                'flip_filters': False
            }),
            (NormLayer, {
                'name': 'norm1',
                'alpha': .0001
            }),
            (PoolLayer, {
                'name': 'pool1',
                'pool_size': (3, 3),
                'stride': 3,
                'ignore_border': False
            }),
            (
                ConvLayer,
                {
                    'name': 'conv2',
                    'num_filters': 256,
                    'filter_size': (5, 5),
                    'flip_filters': False
                    #                     'pad':2,
                    #                     'stride':1
                }),
            (PoolLayer, {
                'name': 'pool2',
                'pool_size': (2, 2),
                'stride': 2,
                'ignore_border': False
            }),
            (
                ConvLayer,
                {
                    'name': 'conv3',
                    'num_filters': 512,
                    'filter_size': (3, 3),
                    'pad': 1,
                    #                     'stride':1
                    'flip_filters': False
                }),
            (
                ConvLayer,
                {
                    'name': 'conv4',
                    'num_filters': 512,
                    'filter_size': (3, 3),
                    'pad': 1,
                    #                     'stride':1
                    'flip_filters': False
                }),
            (
                ConvLayer,
                {
                    'name': 'conv5',
                    'num_filters': 512,
                    'filter_size': (3, 3),
                    'pad': 1,
                    #                     'stride':1
                    'flip_filters': False
                }),
            (PoolLayer, {
                'name': 'pool5',
                'pool_size': (3, 3),
                'stride': 3,
                'ignore_border': False
            }),
            (DenseLayer, {
                'name': 'fc6',
                'num_units': 4096
            }),
            (DropoutLayer, {
                'name': 'drop6',
                'p': .5
            }),
            (DenseLayer, {
                'name': 'fc7',
                'num_units': 4096
            }),
        ],

        #        # optimization method:
        update=nesterov_momentum,
        update_learning_rate=0.01,
        update_momentum=0.9,

        #  Do not need these unless trainng the net.
        #     regression=True,  # flag to indicate we're dealing with regression problem
        #     max_epochs=400,  # we want to train this many epochs
        #     verbose=1,
    )

    # upload pretrained weights
    vgg_nn.initialize()
    vgg_nn.load_params_from('./vgg_nolearn_saved_wts_biases.pkl')

    # upload mean image
    model = pickle.load(open('./vgg_cnn_s.pkl'))
    mean_image = model['mean image']

    # pickel the model and the mean image
    with open("/data/mean_image.pkl", 'w') as f:
        pickle.dump(mean_image, f)
    with open("/data/full_vgg.pkl", 'w') as f:
        pickle.dump(vgg_nn, f)

    return vgg_net, mean_image
Beispiel #19
0
def main():
    # Parse command line options
    parser = argparse.ArgumentParser(
        description='Test different nets with 3D data.')
    parser.add_argument('--flair',
                        action='store',
                        dest='flair',
                        default='FLAIR_preprocessed.nii.gz')
    parser.add_argument('--pd',
                        action='store',
                        dest='pd',
                        default='DP_preprocessed.nii.gz')
    parser.add_argument('--t2',
                        action='store',
                        dest='t2',
                        default='T2_preprocessed.nii.gz')
    parser.add_argument('--t1',
                        action='store',
                        dest='t1',
                        default='T1_preprocessed.nii.gz')
    parser.add_argument('--output',
                        action='store',
                        dest='output',
                        default='output.nii.gz')
    parser.add_argument('--no-docker',
                        action='store_false',
                        dest='docker',
                        default=True)

    c = color_codes()
    patch_size = (15, 15, 15)
    options = vars(parser.parse_args())
    batch_size = 10000
    min_size = 30

    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +
          '<Loading the net ' + c['b'] + '1' + c['nc'] + c['g'] + '>' +
          c['nc'])
    net_name = '/usr/local/nets/deep-challenge2016.init.model_weights.pkl' if options['docker'] \
        else './deep-challenge2016.init.model_weights.pkl'
    net = NeuralNet(
        layers=[
            (InputLayer, dict(name='in', shape=(None, 4, 15, 15, 15))),
            (Conv3DDNNLayer,
             dict(name='conv1_1',
                  num_filters=32,
                  filter_size=(5, 5, 5),
                  pad='same')),
            (Pool3DDNNLayer,
             dict(name='avgpool_1',
                  pool_size=2,
                  stride=2,
                  mode='average_inc_pad')),
            (Conv3DDNNLayer,
             dict(name='conv2_1',
                  num_filters=64,
                  filter_size=(5, 5, 5),
                  pad='same')),
            (Pool3DDNNLayer,
             dict(name='avgpool_2',
                  pool_size=2,
                  stride=2,
                  mode='average_inc_pad')),
            (DropoutLayer, dict(name='l2drop', p=0.5)),
            (DenseLayer, dict(name='l1', num_units=256)),
            (DenseLayer,
             dict(name='out', num_units=2,
                  nonlinearity=nonlinearities.softmax)),
        ],
        objective_loss_function=objectives.categorical_crossentropy,
        update=updates.adam,
        update_learning_rate=0.0001,
        verbose=10,
        max_epochs=50,
        train_split=TrainSplit(eval_size=0.25),
        custom_scores=[('dsc', lambda p, t: 2 * np.sum(p * t[:, 1]) / np.sum(
            (p + t[:, 1])))],
    )
    net.load_params_from(net_name)

    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +
          '<Creating the probability map ' + c['b'] + '1' + c['nc'] + c['g'] +
          '>' + c['nc'])
    names = np.array(
        [options['flair'], options['pd'], options['t2'], options['t1']])
    image_nii = load_nii(options['flair'])
    image1 = np.zeros_like(image_nii.get_data())
    print('0% of data tested', end='\r')
    sys.stdout.flush()
    for batch, centers, percent in load_patch_batch_percent(
            names, batch_size, patch_size):
        y_pred = net.predict_proba(batch)
        print('%f%% of data tested' % percent, end='\r')
        sys.stdout.flush()
        [x, y, z] = np.stack(centers, axis=1)
        image1[x, y, z] = y_pred[:, 1]

    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +
          '<Loading the net ' + c['b'] + '2' + c['nc'] + c['g'] + '>' +
          c['nc'])
    net_name = '/usr/local/nets/deep-challenge2016.final.model_weights.pkl' if options['docker'] \
        else './deep-challenge2016.final.model_weights.pkl'
    net = NeuralNet(
        layers=[
            (InputLayer, dict(name='in', shape=(None, 4, 15, 15, 15))),
            (Conv3DDNNLayer,
             dict(name='conv1_1',
                  num_filters=32,
                  filter_size=(5, 5, 5),
                  pad='same')),
            (Pool3DDNNLayer,
             dict(name='avgpool_1',
                  pool_size=2,
                  stride=2,
                  mode='average_inc_pad')),
            (Conv3DDNNLayer,
             dict(name='conv2_1',
                  num_filters=64,
                  filter_size=(5, 5, 5),
                  pad='same')),
            (Pool3DDNNLayer,
             dict(name='avgpool_2',
                  pool_size=2,
                  stride=2,
                  mode='average_inc_pad')),
            (DropoutLayer, dict(name='l2drop', p=0.5)),
            (DenseLayer, dict(name='l1', num_units=256)),
            (DenseLayer,
             dict(name='out', num_units=2,
                  nonlinearity=nonlinearities.softmax)),
        ],
        objective_loss_function=objectives.categorical_crossentropy,
        update=updates.adam,
        update_learning_rate=0.0001,
        batch_iterator_train=BatchIterator(batch_size=4096),
        verbose=10,
        max_epochs=2000,
        train_split=TrainSplit(eval_size=0.25),
        custom_scores=[('dsc', lambda t, p: 2 * np.sum(t * p[:, 1]) / np.sum(
            (t + p[:, 1])))],
    )
    net.load_params_from(net_name)

    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +
          '<Creating the probability map ' + c['b'] + '2' + c['nc'] + c['g'] +
          '>' + c['nc'])
    image2 = np.zeros_like(image_nii.get_data())
    print('0% of data tested', end='\r')
    sys.stdout.flush()
    for batch, centers, percent in load_patch_batch_percent(
            names, batch_size, patch_size):
        y_pred = net.predict_proba(batch)
        print('%f%% of data tested' % percent, end='\r')
        sys.stdout.flush()
        [x, y, z] = np.stack(centers, axis=1)
        image2[x, y, z] = y_pred[:, 1]

    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +
          '<Saving to file ' + c['b'] + options['output'] + c['nc'] + c['g'] +
          '>' + c['nc'])
    image = (image1 * image2) > 0.5

    # filter candidates < min_size
    labels, num_labels = ndimage.label(image)
    lesion_list = np.unique(labels)
    num_elements_by_lesion = ndimage.labeled_comprehension(
        image, labels, lesion_list, np.sum, float, 0)
    filt_min_size = num_elements_by_lesion >= min_size
    lesion_list = lesion_list[filt_min_size]
    image = reduce(np.logical_or, map(lambda lab: lab == labels, lesion_list))

    image_nii.get_data()[:] = np.roll(np.roll(image, 1, axis=0), 1, axis=1)
    path = '/'.join(options['t1'].rsplit('/')[:-1])
    outputname = options['output'].rsplit('/')[-1]
    image_nii.to_filename(os.path.join(path, outputname))

    if not options['docker']:
        path = '/'.join(options['output'].rsplit('/')[:-1])
        case = options['output'].rsplit('/')[-1]
        gt = load_nii(os.path.join(
            path, 'Consensus.nii.gz')).get_data().astype(dtype=np.bool)
        dsc = np.sum(
            2.0 * np.logical_and(gt, image)) / (np.sum(gt) + np.sum(image))
        print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +
              '<DSC value for ' + c['c'] + case + c['g'] + ' = ' + c['b'] +
              str(dsc) + c['nc'] + c['g'] + '>' + c['nc'])
def main():

    parser = argparse.ArgumentParser(description='Test different nets with 3D data.')
    parser.add_argument('-f', '--folder', dest='dir_name', default='/home/sergivalverde/w/CNN/images/CH16')
    parser.add_argument('--flair', action='store', dest='flair', default='FLAIR_preprocessed.nii.gz')
    parser.add_argument('--pd', action='store', dest='pd', default='DP_preprocessed.nii.gz')
    parser.add_argument('--t2', action='store', dest='t2', default='T2_preprocessed.nii.gz')
    parser.add_argument('--t1', action='store', dest='t1', default='T1_preprocessed.nii.gz')
    parser.add_argument('--mask', action='store', dest='mask', default='Consensus.nii.gz')
    parser.add_argument('--old', action='store_true', dest='old', default=False)
    options = vars(parser.parse_args())

    c = color_codes()
    patch_size = (15, 15, 15)
    batch_size = 100000
    # Create the data
    patients = [f for f in sorted(os.listdir(options['dir_name']))
                if os.path.isdir(os.path.join(options['dir_name'], f))]
    flair_names = [os.path.join(options['dir_name'], patient, options['flair']) for patient in patients]
    pd_names = [os.path.join(options['dir_name'], patient, options['pd']) for patient in patients]
    t2_names = [os.path.join(options['dir_name'], patient, options['t2']) for patient in patients]
    t1_names = [os.path.join(options['dir_name'], patient, options['t1']) for patient in patients]
    names = np.stack([name for name in [flair_names, pd_names, t2_names, t1_names]])
    seed = np.random.randint(np.iinfo(np.int32).max)

    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + 'Starting leave-one-out' + c['nc'])

    for i in range(0, 15):
        case = names[0, i].rsplit('/')[-2]
        path = '/'.join(names[0, i].rsplit('/')[:-1])
        print(c['c'] + '[' + strftime("%H:%M:%S") + ']  ' + c['nc'] + 'Patient ' + c['b'] + case + c['nc'])
        print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' + c['g'] +
              '<Running iteration ' + c['b'] + '1' + c['nc'] + c['g'] + '>' + c['nc'])
        net_name = os.path.join(path, 'deep-challenge2016.init.')
        net = NeuralNet(
            layers=[
                (InputLayer, dict(name='in', shape=(None, 4, 15, 15, 15))),
                (Conv3DDNNLayer, dict(name='conv1_1', num_filters=32, filter_size=(5, 5, 5), pad='same')),
                (Pool3DDNNLayer, dict(name='avgpool_1', pool_size=2, stride=2, mode='average_inc_pad')),
                (Conv3DDNNLayer, dict(name='conv2_1', num_filters=64, filter_size=(5, 5, 5), pad='same')),
                (Pool3DDNNLayer, dict(name='avgpool_2', pool_size=2, stride=2, mode='average_inc_pad')),
                (DropoutLayer, dict(name='l2drop', p=0.5)),
                (DenseLayer, dict(name='l1', num_units=256)),
                (DenseLayer, dict(name='out', num_units=2, nonlinearity=nonlinearities.softmax)),
            ],
            objective_loss_function=objectives.categorical_crossentropy,
            update=updates.adam,
            update_learning_rate=0.0001,
            on_epoch_finished=[
                SaveWeights(net_name + 'model_weights.pkl', only_best=True, pickle=False),
                EarlyStopping(patience=10)
            ],
            verbose=10,
            max_epochs=50,
            train_split=TrainSplit(eval_size=0.25),
            custom_scores=[('dsc', lambda p, t: 2 * np.sum(p * t[:, 1]) / np.sum((p + t[:, 1])))],
        )
        flair_name = os.path.join(path, options['flair'])
        pd_name = os.path.join(path, options['pd'])
        t2_name = os.path.join(path, options['t2'])
        t1_name = os.path.join(path, options['t1'])
        names_test = np.array([flair_name, pd_name, t2_name, t1_name])
        outputname1 = os.path.join(path, 'test' + str(i) + '.iter1.nii.gz')
        try:
            net.load_params_from(net_name + 'model_weights.pkl')
        except IOError:
            print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' +
                  c['g'] + 'Loading the data for ' + c['b'] + 'iteration 1' + c['nc'])
            names_lou = np.concatenate([names[:, :i], names[:, i + 1:]], axis=1)
            paths = ['/'.join(name.rsplit('/')[:-1]) for name in names_lou[0, :]]
            mask_names = [os.path.join(p_path, 'Consensus.nii.gz') for p_path in paths]

            x_train, y_train = load_iter1_data(
                names_lou=names_lou,
                mask_names=mask_names,
                patch_size=patch_size,
                seed=seed
            )

            print('                Training vector shape ='
                  ' (' + ','.join([str(length) for length in x_train.shape]) + ')')
            print('                Training labels shape ='
                  ' (' + ','.join([str(length) for length in y_train.shape]) + ')')

            print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' + c['g'] +
                  'Training (' + c['b'] + 'initial' + c['nc'] + c['g'] + ')' + c['nc'])
            # We try to get the last weights to keep improving the net over and over
            net.fit(x_train, y_train)

        try:
            image_nii = load_nii(outputname1)
            image1 = image_nii.get_data()
        except IOError:
            print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' + c['g'] +
                  '<Creating the probability map ' + c['b'] + '1' + c['nc'] + c['g'] + '>' + c['nc'])
            flair_name = os.path.join(path, options['flair'])
            image_nii = load_nii(flair_name)
            image1 = np.zeros_like(image_nii.get_data())
            print('              0% of data tested', end='\r')
            sys.stdout.flush()
            for batch, centers, percent in load_patch_batch_percent(names_test, batch_size, patch_size):
                y_pred = net.predict_proba(batch)
                print('              %f%% of data tested' % percent, end='\r')
                sys.stdout.flush()
                [x, y, z] = np.stack(centers, axis=1)
                image1[x, y, z] = y_pred[:, 1]

            image_nii.get_data()[:] = image1
            image_nii.to_filename(outputname1)

        ''' Here we get the seeds '''
        print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' +
              c['g'] + '<Looking for seeds for the final iteration>' + c['nc'])
        for patient in np.rollaxis(np.concatenate([names[:, :i], names[:, i+1:]], axis=1), 1):
            outputname = os.path.join('/'.join(patient[0].rsplit('/')[:-1]), 'test' + str(i) + '.iter1.nii.gz')
            try:
                load_nii(outputname)
                print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' +
                      c['g'] + '     Patient ' + patient[0].rsplit('/')[-2] + ' already done' + c['nc'])
            except IOError:
                print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' +
                      c['g'] + '     Testing with patient ' + c['b'] + patient[0].rsplit('/')[-2] + c['nc'])
                image_nii = load_nii(patient[0])
                image = np.zeros_like(image_nii.get_data())
                print('    0% of data tested', end='\r')
                for batch, centers, percent in load_patch_batch_percent(patient, 100000, patch_size):
                    y_pred = net.predict_proba(batch)
                    print('    %f%% of data tested' % percent, end='\r')
                    [x, y, z] = np.stack(centers, axis=1)
                    image[x, y, z] = y_pred[:, 1]

                print(c['g'] + '                   -- Saving image ' + c['b'] + outputname + c['nc'])
                image_nii.get_data()[:] = image
                image_nii.to_filename(outputname)

        ''' Here we perform the last iteration '''
        print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' + c['g'] +
              '<Running iteration ' + c['b'] + '2' + c['nc'] + c['g'] + '>' + c['nc'])
        outputname2 = os.path.join(path, 'test' + str(i) + '.old.iter2.nii.gz') if options['old'] \
            else os.path.join(path, 'test' + str(i) + '.new.iter2.nii.gz')
        net_name = os.path.join(path, 'deep-challenge2016.final.old.') if options['old'] \
            else os.path.join(path, 'deep-challenge2016.final.new.')
        net = NeuralNet(
            layers=[
                (InputLayer, dict(name='in', shape=(None, 4, 15, 15, 15))),
                (Conv3DDNNLayer, dict(name='conv1_1', num_filters=32, filter_size=(5, 5, 5), pad='same')),
                (Pool3DDNNLayer, dict(name='avgpool_1', pool_size=2, stride=2, mode='average_inc_pad')),
                (Conv3DDNNLayer, dict(name='conv2_1', num_filters=64, filter_size=(5, 5, 5), pad='same')),
                (Pool3DDNNLayer, dict(name='avgpool_2', pool_size=2, stride=2, mode='average_inc_pad')),
                (DropoutLayer, dict(name='l2drop', p=0.5)),
                (DenseLayer, dict(name='l1', num_units=256)),
                (DenseLayer, dict(name='out', num_units=2, nonlinearity=nonlinearities.softmax)),
            ],
            objective_loss_function=objectives.categorical_crossentropy,
            update=updates.adam,
            update_learning_rate=0.0001,
            on_epoch_finished=[
                SaveWeights(net_name + 'model_weights.pkl', only_best=True, pickle=False),
                EarlyStopping(patience=50)
            ],
            batch_iterator_train=BatchIterator(batch_size=4096),
            verbose=10,
            max_epochs=2000,
            train_split=TrainSplit(eval_size=0.25),
            custom_scores=[('dsc', lambda p, t: 2 * np.sum(p * t[:, 1]) / np.sum((p + t[:, 1])))],
        )

        try:
            net.load_params_from(net_name + 'model_weights.pkl')
        except IOError:
            print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' +
                  c['g'] + 'Loading the data for ' + c['b'] + 'iteration 2' + c['nc'])
            names_lou = np.concatenate([names[:, :i], names[:, i + 1:]], axis=1)
            paths = ['/'.join(name.rsplit('/')[:-1]) for name in names_lou[0, :]]
            roi_names = [os.path.join(p_path, 'test' + str(i) + '.iter1.nii.gz') for p_path in paths]
            mask_names = [os.path.join(p_path, 'Consensus.nii.gz') for p_path in paths]

            x_train, y_train = load_iter2_data(
                names_lou=names_lou,
                mask_names=mask_names,
                roi_names=roi_names,
                patch_size=patch_size,
                seed=seed,
                old=options['old']
            )

            print('              Training vector shape = (' + ','.join([str(length) for length in x_train.shape]) + ')')
            print('              Training labels shape = (' + ','.join([str(length) for length in y_train.shape]) + ')')
            print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' +
                  c['g'] + 'Training (' + c['b'] + 'final' + c['nc'] + c['g'] + ')' + c['nc'])
            net.fit(x_train, y_train)
        try:
            image_nii = load_nii(outputname2)
            image2 = image_nii.get_data()
        except IOError:
            print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' + c['g'] +
                  '<Creating the probability map ' + c['b'] + '2' + c['nc'] + c['g'] + '>' + c['nc'])
            image_nii = load_nii(flair_name)
            image2 = np.zeros_like(image_nii.get_data())
            print('              0% of data tested', end='\r')
            sys.stdout.flush()
            for batch, centers, percent in load_patch_batch_percent(names_test, batch_size, patch_size):
                y_pred = net.predict_proba(batch)
                print('              %f%% of data tested' % percent, end='\r')
                sys.stdout.flush()
                [x, y, z] = np.stack(centers, axis=1)
                image2[x, y, z] = y_pred[:, 1]

            image_nii.get_data()[:] = image2
            image_nii.to_filename(outputname2)

        image = (image1 * image2) > 0.5
        seg = np.roll(np.roll(image, 1, axis=0), 1, axis=1)
        image_nii.get_data()[:] = seg
        outputname_final = os.path.join(path, 'test' + str(i) + '.old.final.nii.gz') if options['old'] \
            else os.path.join(path, 'test' + str(i) + '.new.final.nii.gz')
        image_nii.to_filename(outputname_final)

        gt = load_nii(os.path.join(path, 'Consensus.nii.gz')).get_data().astype(dtype=np.bool)
        dsc = np.sum(2.0 * np.logical_and(gt, seg)) / (np.sum(gt) + np.sum(seg))
        print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' + c['g'] +
              '<DSC value for ' + c['c'] + case + c['g'] + ' = ' + c['b'] + str(dsc) + c['nc'] + c['g'] + '>' + c['nc'])
Beispiel #21
0
def train_tupac(params_dict):
    global lookup, proba_before, proba_after, overlap

    ### CONSTANTS ###

    SIZE = params_dict['size']
    PATCH_SIZE = params_dict['patch_size']
    PATCH_GAP = int(PATCH_SIZE / 2)
    RADIUS = params_dict['radius']
    normalization = params_dict['normalization']

    net_name = params_dict['net_name']

    print("PATCH_SIZE: ", PATCH_SIZE)
    print("Network name: ", net_name)

    N = params_dict['N']
    MN = params_dict['MN']
    K = params_dict['K']
    EPOCHS = params_dict['epochs']
    DECAY = params_dict['decay']
    KGROWTH = params_dict['kgrowth']
    EGROWTH = params_dict['egrowth']

    VALID = 4000

    def inbounds(x, y):
        return x < SIZE - PATCH_SIZE and x > PATCH_SIZE and y < SIZE - PATCH_SIZE and y > PATCH_SIZE

    def totuple(a):
        try:
            return tuple(totuple(i) for i in a)
        except TypeError:
            return a

    def tolist(a):
        try:
            return list(totuple(i) for i in a)
        except TypeError:
            return a

    ### TODO 1: READ ALL DATA ###
    ## Final result: coords contains indexed coords of all mitotic cells ##

    print("\n\nData Reading.")

    img = []
    img_aux = []
    coords = []

    total_coords = []

    cnt = 0

    print("\nReading in original image files:")
    for imgfile in glob.iglob("data/train_tupac/*.jpg"):
        print("\n" + imgfile, end="")
        annotfile = imgfile[:-3] + "csv"
        img_vals = plt.imread(imgfile)

        if normalization:
            cntr = Controller(img)
            img_norm, _, __, __ = macenko(cntr)
            img.append(img_norm)
        else:
            img.append(img_vals)

        csvReader = csv.reader(open(annotfile, 'rb'))
        for row in csvReader:
            minx, miny, maxx, maxy = (SIZE, SIZE, 0, 0)
            random_coords = []
            for i in range(0, len(row) / 2):
                xv, yv = (int(row[2 * i]), int(row[2 * i + 1]))
                if xv > PATCH_SIZE / 2 + 1 and yv > PATCH_SIZE / 2 + 1 and xv < SIZE - PATCH_SIZE / 2 - 1 and yv < SIZE - PATCH_SIZE / 2 - 1:
                    coords.append((yv, xv, cnt))
                    total_coords.append((yv, xv, cnt))

        cnt += 1

    print("\n")
    print('Num images: ', len(img))
    print(len(coords))

    print('not synthesizing image through reflection')

    def get_patches(coords, patchsize=PATCH_SIZE):
        patches = np.zeros((len(coords), patchsize, patchsize, 3))
        i = 0
        for (x, y, img_num) in coords:
            #print x, y
            #print (x - patchsize/2), (x + patchsize/2 + 1), (y - patchsize/2), (y + patchsize/2 + 1)
            patches[i] = img[img_num,
                             (x - patchsize / 2):(x + patchsize / 2 + 1),
                             (y - patchsize / 2):(y + patchsize / 2 + 1), :]
            patches[i] = np.divide(patches[i], 255.0)
            i += 1
        return patches

    ### TODO 2: CREATE AND DESIGN CNN ####
    ## Final result: nn contains desired CNN ##

    print("\n\nCreating and Designing CNN.")

    def roc_robust(y_true, y_proba):
        if sum(y_true) == 0 or sum(y_true) == len(y_true):
            return 0.0
        else:
            return roc_auc_score(y_true, y_proba)

    print("Building Image Perturbation Models/Callbacks:")

    train_iterator_mixins = [
        RandomFlipBatchIteratorMixin,
        AffineTransformBatchIteratorMixin,
        #MeanSubtractBatchiteratorMixin
    ]
    TrainIterator = make_iterator('TrainIterator', train_iterator_mixins)

    test_iterator_mixins = [
        RandomFlipBatchIteratorMixin,
        #MeanSubtractBatchiteratorMixin
    ]
    TestIterator = make_iterator('TestIterator', test_iterator_mixins)

    mean_value = np.mean(np.mean(np.mean(img)))

    train_iterator_kwargs = {
        'batch_size': 20,
        'affine_p': 0.5,
        'affine_scale_choices': np.linspace(start=0.85, stop=1.6, num=10),
        'flip_horizontal_p': 0.5,
        'flip_vertical_p': 0.5,
        'affine_translation_choices': np.arange(-5, 6, 1),
        'affine_rotation_choices': np.linspace(start=-30.0, stop=30.0, num=20),
        #'mean': mean_value,
    }
    train_iterator_tmp = TrainIterator(**train_iterator_kwargs)

    test_iterator_kwargs = {
        'batch_size': 20,
        'flip_horizontal_p': 0.5,
        'flip_vertical_p': 0.5,
        #'mean': mean_value,
    }
    test_iterator_tmp = TestIterator(**test_iterator_kwargs)

    def color_transform(image):
        if random.uniform(0.0, 1.0) < 0.15:
            image[0] = np.multiply(image[0], random.uniform(0.95, 1.05))
        if random.uniform(0.0, 1.0) < 0.15:
            image[1] = np.multiply(image[1], random.uniform(0.95, 1.05))
        if random.uniform(0.0, 1.0) < 0.15:
            image[2] = np.multiply(image[2], random.uniform(0.95, 1.05))
        return np.clip(image, -1.0, 1.0).astype(np.float32)

    radius = PATCH_GAP
    kernel = np.zeros((2 * radius + 1, 2 * radius + 1))
    y, x = np.ogrid[-radius:radius + 1, -radius:radius + 1]
    mask = x**2 + y**2 >= radius**2

    class CustomBatchIterator(BatchIterator):
        def __init__(self, batch_size, built_iterator):
            super(CustomBatchIterator, self).__init__(batch_size=batch_size)
            self.iter = built_iterator

        def transform(self, Xb, yb):
            Xb = get_patches(Xb)
            Xb = Xb.astype(np.float32).swapaxes(1, 3)
            for i in range(0, len(yb)):
                Xb[i] = color_transform(Xb[i])
                for c in range(0, 3):
                    Xb[i, c][mask] = 0.0
            yb = yb.astype(np.uint8)
            Xb, yb = self.iter.transform(Xb, yb)
            #for i in range(0, len(yb)):
            #    plt.imsave("img" + str(yb[i]) + "num" + str(i) + ".png", Xb[i].swapaxes(0, 2))
            return Xb, yb

    train_iterator = CustomBatchIterator(batch_size=20,
                                         built_iterator=train_iterator_tmp)
    test_iterator = CustomBatchIterator(batch_size=20,
                                        built_iterator=test_iterator_tmp)

    # Model Specifications
    net = phf.build_GoogLeNet(PATCH_SIZE, PATCH_SIZE)

    ### TODO 3: DEFINE METHODS TO WORK WITH NORMAL_STACKS ###
    ## Final result: update_stack(stack, times) ###

    proba_before = 0.0
    proba_after = 0.0
    overlap = 0.0

    def prob(coord, net):
        patch = get_patches([coord]).swapaxes(1, 3).astype(np.float32)
        patch2 = patch.swapaxes(2, 3)
        saved_iter = net.batch_iterator_test
        net.batch_iterator_test = test_iterator_tmp
        prob = net.predict_proba(patch)[0, 1]
        prob2 = net.predict_proba(patch2)[0, 1]
        net.batch_iterator_test = saved_iter
        return (prob + prob2) / 2.0

    def create_stack(length):
        global lookup
        normal_stack = []
        while len(normal_stack) < length:
            triple = (random.randint(PATCH_SIZE / 2,
                                     SIZE - PATCH_SIZE / 2 - 1),
                      random.randint(PATCH_SIZE / 2,
                                     SIZE - PATCH_SIZE / 2 - 1),
                      random.randint(-len(img_aux),
                                     len(img) - 1))
            if triple not in lookup:
                normal_stack.append(triple)
                lookup.add(triple)
        return normal_stack

    def update_stack(normal_stack, iters, net):
        global lookup, proba_before, proba_after, overlap
        init_len = len(normal_stack)
        probs = []
        for i in range(0, len(normal_stack)):
            probs.append(prob(normal_stack[i], net))
        proba_before = np.mean(probs)

        for i in range(0, iters):
            triple = (random.randint(PATCH_SIZE / 2,
                                     SIZE - PATCH_SIZE / 2 - 1),
                      random.randint(PATCH_SIZE / 2,
                                     SIZE - PATCH_SIZE / 2 - 1),
                      random.randint(-len(img_aux),
                                     len(img) - 1))
            if triple not in lookup:
                normal_stack.append(triple)
                probs.append(prob(triple, net))
                lookup.add(triple)

        sort_idx = np.argsort(probs)[::-1]
        normal_stack = np.array(normal_stack)[sort_idx, :]
        normal_stack = normal_stack[0:init_len]
        normal_stack = tolist(normal_stack)

        probs = np.array(probs)
        probs = probs[sort_idx]
        probs = probs[0:init_len]

        proba_after = np.mean(probs)

        overlap = 0.0
        for i in sort_idx[0:init_len]:
            if i < init_len:
                overlap += 1.0

        overlap /= init_len

        return normal_stack

    lookup = set(total_coords)

    coords = shuffle(coords)
    valid_sample_mitosis = coords[0:(VALID / 2)]
    coords = coords[(VALID / 2):(len(coords))]
    valid_sample_normal = create_stack(VALID / 2)

    valid_sample = valid_sample_mitosis + valid_sample_normal

    valid_sample_y = np.append(np.ones(VALID / 2), np.zeros(VALID / 2))

    lookup = set(np.append(total_coords, valid_sample_normal))

    def get_validation(train_X, train_y, net):
        return train_X, valid_sample, train_y, valid_sample_y

    nn = NeuralNet(
        net['softmax'],
        max_epochs=1,
        update=adam,
        update_learning_rate=.0001,  #start with a really low learning rate
        #objective_l2=0.0001,

        # batch iteration params
        batch_iterator_train=train_iterator,
        batch_iterator_test=test_iterator,
        train_split=get_validation,
        verbose=3,
    )

    ### TODO 4: Train network on normal stacks ###
    ## Final result: done! ###

    #print('\nLoading Data from Previous Network')

    #nn.load_params_from("cachedgooglenn2.params")

    print("\n\nTraining Network!")

    normal_stack = create_stack(N)

    print("Made stack!")

    for k in range(0, 1000):
        saved_accuracy = 10011.0
        print("Length of coords:", len(coords), "length of sample", MN)
        data = np.array(normal_stack + random.sample(coords, MN))
        val = np.append(np.zeros(N), np.ones(MN))
        data, val = shuffle(data, val)
        for i in range(0, int(EPOCHS)):
            nn.fit(data, val)
            cur_accuracy = nn.train_history_[-1]['valid_loss']
            if cur_accuracy - 0.004 > saved_accuracy:
                print("Test Loss Jump! Loading previous network!")
                with suppress_stdout():
                    nn.load_params_from("data/" + str(net_name) + ".params")
            else:
                nn.save_params_to("data/" + net_name + ".params")
                saved_accuracy = cur_accuracy
            nn.update_learning_rate *= DECAY

        normal_stack = update_stack(normal_stack, int(K * N), nn)

        print(
            "Data Report: K={3:.2f}, Prob Before={0}, Prob After={1}, Overlap={2}"
            .format(proba_before, proba_after, overlap, K))

        accuracy = nn.train_history_[-1]['valid_accuracy']
        nn.save_params_to('checkpoints/' + str(net_name) + '-checkpoint' +
                          str(k) + '-validacc' + str(accuracy) + '.params')

        K += KGROWTH
        EPOCHS *= EGROWTH
        for r in range(len(nn.train_history_)):
            nn.train_history_[r]['train_loss'] = 10011.0

    nn.save_params_to("data/" + str(net_name) + ".params")
Beispiel #22
0
def main():

    seed = 12345
    np.random.seed(seed)
    set_lasagne_rng(RandomState(seed))

    LOOKUP_PATH = os.path.join(WDIR, 'data', 'HIV.pkl')
    lookup = pickle.load(open(LOOKUP_PATH, 'rb'))
    data_list = lookup['data']
    y = lookup['y']
    labels = lookup['labels']
    nmark = len(labels)

    # event occurence list
    occurred = [x for i, x in enumerate(data_list) if y[i, 1] == 1]
    not_occurred = [x for i, x in enumerate(data_list) if y[i, 1] == 0]
    y1 = y[y[:, 1] == 1]
    y0 = y[y[:, 1] == 0]

    # split the examples randomly into a training (2/3) and test (1/3) cohort
    # both cohorts should contain equal percentage of cencored data
    sep1 = len(y1) / 3
    sep0 = len(y0) / 3

    # include only uncensored data from the training cohort for training CellCnn
    tr_list = occurred[sep1:]
    tr_stime = y1[sep1:, 0].astype(float)

    # transform survival times to [-1, 1] interval by ranking them
    tr_stime = (ss.rankdata(tr_stime) / (0.5 * len(tr_stime))) - 1

    # fit scaler to all training data
    sc = StandardScaler()
    sc.fit(np.vstack(occurred[sep1:] + not_occurred[sep0:]))
    tr_list = [sc.transform(x) for x in tr_list]

    # the test cohort
    validation_list = [
        sc.transform(x) for x in (occurred[:sep1] + not_occurred[:sep0])
    ]
    y_valid = np.vstack([y1[:sep1], y0[:sep0]])

    # cross validation on the training cohort
    nfold = 10
    nfilter = 3

    skf = KFold(len(tr_list), n_folds=nfold, shuffle=True)
    committee = []
    valid_accuracy = []
    accum_w = np.empty((nfilter * nfold, nmark + 2))

    for ifold, (train_index, test_index) in enumerate(skf):
        cv_train_samples = [tr_list[t_idx] for t_idx in train_index]
        cv_test_samples = [tr_list[t_idx] for t_idx in test_index]
        cv_y_train = list(tr_stime[train_index])
        cv_y_test = list(tr_stime[test_index])

        results = train_model(cv_train_samples,
                              cv_y_train,
                              labels,
                              valid_samples=cv_test_samples,
                              valid_phenotypes=cv_y_test,
                              ncell=500,
                              nsubset=200,
                              subset_selection='random',
                              nrun=3,
                              pooling='mean',
                              regression=True,
                              nfilter=nfilter,
                              learning_rate=0.03,
                              momentum=0.9,
                              l2_weight_decay_conv=1e-8,
                              l2_weight_decay_out=1e-8,
                              max_epochs=20,
                              verbose=1,
                              select_filters='best',
                              accur_thres=-1)

        net_dict = results['best_net']

        # update the committee of networks
        committee.append(net_dict)
        valid_accuracy.append(results['best_accuracy'])
        w_tot = param_vector(net_dict, regression=True)

        # add weights to accumulator
        accum_w[ifold * nfilter:(ifold + 1) * nfilter] = w_tot

    save_path = os.path.join(OUTDIR, 'network_committee.pkl')
    with open(save_path, 'wb') as f:
        pickle.dump((committee, valid_accuracy), f, -1)
    '''
    committee, valid_accuracy = pickle.load(open(save_path, 'r'))    
    # retrieve the filter weights
    for ifold, net_dict in enumerate(committee):
        w_tot = param_vector(net_dict, regression=True)
                
        # add weights to accumulator    
        accum_w[ifold*nfilter:(ifold+1)*nfilter] = w_tot
    '''

    # choose the strong signatures (all of them)
    w_strong = accum_w

    # members of each cluster should have cosine similarity > 0.7
    # equivalently, cosine distance < 0.3
    Z = linkage(w_strong, 'average', metric='cosine')
    clusters = fcluster(Z, .3, criterion='distance') - 1

    n_clusters = len(np.unique(clusters))
    print '%d clusters chosen' % (n_clusters)

    # plot the discovered filter profiles
    plt.figure(figsize=(3, 2))
    idx = range(nmark) + [nmark + 1]
    clmap = sns.clustermap(pd.DataFrame(w_strong[:, idx],
                                        columns=labels + ['survival']),
                           method='average',
                           metric='cosine',
                           row_linkage=Z,
                           col_cluster=False,
                           robust=True,
                           yticklabels=clusters)
    clmap.cax.set_visible(False)
    fig_path = os.path.join(OUTDIR, 'HIV_clmap.eps')
    clmap.savefig(fig_path, format='eps')
    plt.close()

    # generate the consensus filter profiles
    c = Counter(clusters)
    cons = []
    for key, val in c.items():
        if val > nfold / 2:
            cons.append(np.mean(w_strong[clusters == key], axis=0))
    cons_mat = np.vstack(cons)

    # plot the consensus filter profiles
    plt.figure(figsize=(10, 3))
    idx = range(nmark) + [nmark + 1]
    ax = sns.heatmap(pd.DataFrame(cons_mat[:, idx],
                                  columns=labels + ['survival']),
                     robust=True,
                     yticklabels=False)
    plt.xticks(rotation=90)
    ax.tick_params(axis='both', which='major', labelsize=20)
    plt.tight_layout()
    fig_path = os.path.join(OUTDIR, 'clmap_consensus.eps')
    plt.savefig(fig_path, format='eps')
    plt.close()

    # create an ensemble of neural networks
    ncell_cons = 3000
    ncell_voter = 3000
    layers_voter = [(layers.InputLayer, {
        'name': 'input',
        'shape': (None, nmark, ncell_voter)
    }),
                    (layers.Conv1DLayer, {
                        'name': 'conv',
                        'num_filters': nfilter,
                        'filter_size': 1
                    }),
                    (layers.Pool1DLayer, {
                        'name': 'meanPool',
                        'pool_size': ncell_voter,
                        'mode': 'average_exc_pad'
                    }),
                    (layers.DenseLayer, {
                        'name': 'output',
                        'num_units': 1,
                        'nonlinearity': T.tanh
                    })]

    # predict on the test cohort
    small_data_list_v = [
        x[:ncell_cons].T.reshape(1, nmark, ncell_cons) for x in validation_list
    ]
    data_v = np.vstack(small_data_list_v)
    stime, censor = y_valid[:, 0], y_valid[:, 1]

    # committee of the best nfold/2 models
    voter_risk_pred = list()
    for ifold in np.argsort(valid_accuracy):
        voter = NeuralNet(layers=layers_voter,
                          update=nesterov_momentum,
                          update_learning_rate=0.001,
                          regression=True,
                          max_epochs=5,
                          verbose=0)
        voter.load_params_from(committee[ifold])
        voter.initialize()
        # rank the risk predictions
        voter_risk_pred.append(ss.rankdata(-np.squeeze(voter.predict(data_v))))
    all_voters = np.vstack(voter_risk_pred)

    # compute mean rank per individual
    risk_p = np.mean(all_voters, axis=0)
    g1 = np.squeeze(risk_p > np.median(risk_p))
    voters_pval_v = logrank_pval(stime, censor, g1)
    fig_v = os.path.join(OUTDIR, 'cellCnn_cox_test.eps')
    plot_KM(stime, censor, g1, voters_pval_v, fig_v)

    # filter-activating cells
    data_t = np.vstack(small_data_list_v)
    data_stack = np.vstack([x for x in np.swapaxes(data_t, 2, 1)])

    # finally define a network from the consensus filters
    nfilter_cons = cons_mat.shape[0]
    ncell_cons = 3000
    layers_cons = [(layers.InputLayer, {
        'name': 'input',
        'shape': (None, nmark, ncell_cons)
    }),
                   (layers.Conv1DLayer, {
                       'name': 'conv',
                       'b': init.Constant(cons_mat[:, -2]),
                       'W': cons_mat[:, :-2].reshape(nfilter_cons, nmark, 1),
                       'num_filters': nfilter_cons,
                       'filter_size': 1
                   }),
                   (layers.Pool1DLayer, {
                       'name': 'meanPool',
                       'pool_size': ncell_cons,
                       'mode': 'average_exc_pad'
                   }),
                   (layers.DenseLayer, {
                       'name': 'output',
                       'num_units': 1,
                       'W': np.sign(cons_mat[:, -1:]),
                       'b': init.Constant(0.),
                       'nonlinearity': T.tanh
                   })]

    net_cons = NeuralNet(layers=layers_cons,
                         update=nesterov_momentum,
                         update_learning_rate=0.001,
                         regression=True,
                         max_epochs=5,
                         verbose=0)
    net_cons.initialize()

    # get the representation after mean pooling
    xs = T.tensor3('xs').astype(theano.config.floatX)
    act_conv = theano.function([xs], lh.get_output(net_cons.layers_['conv'],
                                                   xs))

    # and apply to the test data
    act_tot = act_conv(data_t)
    act_tot = np.swapaxes(act_tot, 2, 1)
    act_stack = np.vstack([x for x in act_tot])
    idx = range(7) + [8, 9]

    for i_map in range(nfilter_cons):
        val = act_stack[:, i_map]
        descending_order = np.argsort(val)[::-1]
        val_cumsum = np.cumsum(val[descending_order])
        data_sorted = data_stack[descending_order]
        thres = 0.75 * val_cumsum[-1]
        res_data = data_sorted[val_cumsum < thres]
        fig_path = os.path.join(OUTDIR, 'filter_' + str(i_map) + '_active.eps')
        plot_marker_distribution([res_data[:, idx], data_stack[:, idx]],
                                 ['filter ' + str(i_map), 'all'],
                                 [labels[l]
                                  for l in idx], (3, 3), fig_path, 24)
Beispiel #23
0
class CNN:
    def __init__(self,subject):
        self.convnet = NeuralNet(layers=[])
        self.subject = subject

    def make_cnn(self,X,y):
        #FSIZE = (int(np.floor(X.shape[2])), int(np.floor(X.shape[3]/4)))
        
        
        #FSIZE3 = (2,2)
        NUM_FILTERS1 = 16
        NUM_FILTERS2 = 32
        NUM_FILTERS3 = 256

        FSIZE1 = (X.shape[2],1)
        FSIZE2 = (NUM_FILTERS1,2)
        FSIZE3 = (NUM_FILTERS2,3)

        #x = theano.tensor.tensor4()
        #ax = theano.tensor.scalar()
        # geom_mean = theano.function(
        #     [x,axis = 3],
        #     theano.tensor.exp(theano.tensor.mean(theano.tensor.log(x), axis=axis, dtype='float32'))
        #     )
        # l2_norm = theano.function(
        #     [x,axis = 3],
        #     x.norm(2,axis=axis)
        #     )
        def geom_mean(x,axis=None):
            # x = theano.tensor.as_tensor_variable(x)
            # log = theano.tensor.log(x)
            # m = theano.tensor.mean(log,axis=axis)
            # g = m
            log = np.log(x)
            m = log.mean(axis = axis)
            g = np.exp(m)

            #g = theano.tensor.exp(m)
            #g = theano.tensor.exp(theano.tensor.mean(theano.tensor.log(x), axis=axis))
            print "gmean",g.type,g
            return g
        
        def l2_norm(x,axis=None):
            x = theano.tensor.as_tensor_variable(x)
            s = theano.tensor.sum(x,axis=axis)

            #l = x.norm(2, axis=axis)
            print "norm",l.type,l
            return l

        def me(x,axis=None):
            x = theano.tensor.as_tensor_variable(x)
            m = theano.tensor.mean(x,axis=axis)
            print "mean",m.type,m
            return m
        #print type(theano.tensor.mean),type(geom_mean),type(l2_norm)
        #learning_rate = 0.0001
        #learning_rate = 0.0005
        #learning_rate = .001
        learning_rate = .00001
        # if 'pat' in self.subject:
        #      learning_rate = 0.0001
        #FSIZE1 = (1, 2)
        #FSIZE2 = (1, X.shape[3])
        
        convnet = NeuralNet(
            layers = [
                (InputLayer,{'shape' : (None,1 , X.shape[2],X.shape[3])}),

                (Conv2DLayer,{'num_filters' : NUM_FILTERS1, 'filter_size' : FSIZE1}),

                (DropoutLayer,{'p' : .75}),
                
                (ReshapeLayer,{'shape' : ([0],[2],[1],[3])}),

                (Conv2DLayer,{'name': 'conv2', 'num_filters' : NUM_FILTERS2, 'filter_size' : FSIZE2}),

                #(DropoutLayer,{'p' : .85}),
                
                #(ReshapeLayer,{'shape' : ([0],[2],[1],[3])}),

                #(Conv2DLayer,{'name' : 'conv3', 'num_filters' : NUM_FILTERS3, 'filter_size' : FSIZE3}),
                
                (GlobalPoolLayer,{'name' : 'g1', 'incoming' : 'conv2','pool_function' : me }),
                (GlobalPoolLayer,{'name' : 'g2', 'incoming' : 'conv2','pool_function' : theano.tensor.max }),
                (GlobalPoolLayer,{'name' : 'g3', 'incoming' : 'conv2','pool_function' : theano.tensor.min }),
                (GlobalPoolLayer,{'name' : 'g4', 'incoming' : 'conv2','pool_function' : theano.tensor.var }),
                #(GlobalPoolLayer,{'name' : 'g5', 'incoming' : 'conv2','pool_function' : geom_mean}),
                #(GlobalPoolLayer,{'name' : 'g6', 'incoming' : 'conv2','pool_function' : l2_norm }),
                
                (ConcatLayer,{'incomings' : ['g1','g2','g3','g4']}),#]}),#
                
                (DenseLayer, {'num_units': 256}),
                (DropoutLayer,{'p':.5}),
                (DenseLayer, {'num_units': 256}),

                (DenseLayer, {'num_units': 2, 'nonlinearity': softmax}),
            ],

            update_learning_rate=theano.shared(float32(learning_rate)),
            update_momentum=theano.shared(float32(0.9)),
            verbose=1,
            max_epochs = 100000,
            on_epoch_finished=[
            EarlyStopping(patience=100)
            ],
            )
        return convnet

    def fit(self,X,y,xt,yt):
        
        X,y,xt,yt = formatData(X,y=y,Xt=xt,yt=yt)
        self.convnet = self.make_cnn(X,y)
        print "shape",X.shape
        self.convnet.fit(X,y,xt,yt)
        

    def predict_proba(self,X):
        X,_,_,_ = formatData(X)
        return self.convnet.predict_proba(X)

    def predict(self,X):
        X,_,_,_ = formatData(X)
        return self.convnet.predict(X)

    def get_params(self,deep):
        return self.convnet.get_params()
    def load_params_from(self,net):
        return self.convnet.load_params_from(net)
Beispiel #24
0
    regression=False,
    objective_loss_function=objectives.categorical_crossentropy,

    update=updates.adam,
    update_learning_rate=1e-3,

    batch_iterator_train=train_iterator,
    batch_iterator_test=test_iterator,

    on_epoch_finished=[
        save_weights,
        save_training_history,
        plot_training_history,
        early_stopping
    ],

    verbose=10,
    max_epochs=100
)

if __name__ == '__main__':
    X_train, X_test, y_train, y_test = load_data(test_size=0.25, random_state=42)
    net.fit(X_train, y_train)

    # Load the best weights from pickled model
    net.load_params_from('./examples/mnist/model_weights.pkl')

    score = net.score(X_test, y_test)
    print 'Final score %.4f' % score
Beispiel #25
0
def build_dbn():
	cols = ['Start']

	t = 1
	channels = 14
	batch_size = None  #None = arbitary batch size
	hidden_layer_size = 100  #change to 1024
	N_EVENTS = 1
	max_epochs = 100
	NO_TIME_POINTS = 70 #311


	ids_tot = []
	pred_tot = []
	test_dict = dict()

	test_total = 0


	X, y = load_dataset()
	X_test, ids_tot, test_dict, test_total = load_testdata()
	

	net = NeuralNet(
		layers=[
			('input', layers.InputLayer),
			('dropout1', layers.DropoutLayer),
			('conv1', layers.Conv1DLayer),
			('conv2', layers.Conv1DLayer),
			('pool1', layers.MaxPool1DLayer),
			('dropout2', layers.DropoutLayer),
			('hidden4', layers.DenseLayer),
			('dropout3', layers.DropoutLayer),
			('hidden5', layers.DenseLayer),
			('dropout4', layers.DropoutLayer),
			('output', layers.DenseLayer),
		],
		input_shape=(None, channels, NO_TIME_POINTS),
		dropout1_p=0.5,
		conv1_num_filters=4, conv1_filter_size=1,
		conv2_num_filters=8, conv2_filter_size=4, pool1_pool_size=4,
		dropout2_p=0.5, hidden4_num_units=hidden_layer_size,
		dropout3_p=0.5, hidden5_num_units=hidden_layer_size,
		dropout4_p=0.5, output_num_units=N_EVENTS, output_nonlinearity=sigmoid,

		batch_iterator_train = BatchIterator(batch_size=1000),
		batch_iterator_test = BatchIterator(batch_size=1000),

		y_tensor_type=theano.tensor.matrix,
		update=nesterov_momentum,
		update_learning_rate=theano.shared(float(0.03)),
		update_momentum=theano.shared(float(0.9)),

		objective_loss_function=loss,
		regression=True,

		max_epochs=max_epochs,
		verbose=1,
	)

	
	###process training data####
	X = data_preprocess(X)
	total_time_points = len(X) // NO_TIME_POINTS

	no_rows = total_time_points * NO_TIME_POINTS
	print X.shape
	print total_time_points
	print no_rows

	X = X[0:no_rows, :]

	print X.shape

	X = X.transpose()
	X_Samples = np.split(X, total_time_points, axis=1)
	X = np.asarray(X_Samples)
	print X.shape


	y = y[0:no_rows, :]
	y = y[::NO_TIME_POINTS, :]





	print("Training trial %d...." %(t))
	net.fit(X,y)

	####process test data####
	print("Creating prediction file ... ")

	X_test = X_test
	X_test = data_preprocess(X_test)
	total_test_time_points = len(X_test) // NO_TIME_POINTS
	remainder_test_points = len(X_test) % NO_TIME_POINTS

	no_rows = total_test_time_points * NO_TIME_POINTS
	X_test = X_test[0:no_rows, :]

	X_test = X_test.transpose()
	X_test_Samples = np.split(X_test, total_test_time_points, axis=1)
	X_test = np.asarray(X_test_Samples)
	
###########################################################################
#######get predictions and write to files for series 9 and series 10#######
	print("Testing subject 0....")
	params = net.get_all_params_values()
	learned_weights = net.load_params_from(params)
	probabilities = net.predict_proba(X_test)

	total_time_points = []
	all_remainder_data = []
	subs = []
	total_test_points = 0

	trials = np.array(['01','02','03','04','05','06','07','08','09','10'])

	for trial in trials:    	
		sub = 'subj{0}_series{1}'.format('0', trial)
		data_len = test_dict[sub]
		total_time_point = data_len // NO_TIME_POINTS
		remainder_data = data_len % NO_TIME_POINTS

		subs.append(sub)
		total_time_points.append(total_time_point)
		all_remainder_data.append(remainder_data)

	total_test_points = np.sum(total_time_points)


	print len(ids_tot)
	print cols

	print len(probabilities)

	for i, p in enumerate(probabilities):
		for j in range(NO_TIME_POINTS):
			pred_tot.append(p)


	print len(pred_tot)
	
	# for k in range(np.sum(all_remainder_data)):
	# 	pred_tot.append(pred_tot[-1])

	#submission file
	print('Creating submission(prediction) file...')

	submission_file = './test_conv_net_push.csv'
	# create pandas object for sbmission

	submission = pd.DataFrame(index=ids_tot[:len(pred_tot)],
	                           columns=cols,
	                           data=pred_tot)
	# write file
	submission.to_csv(submission_file, index_label='id', float_format='%.6f')
Beispiel #26
0

import sys

sys.setrecursionlimit(10000)

X = None
y = None

if os.path.exists('X.pickle') and os.path.exists('y.pickle'):
    X = pickle.load(open('X.pickle', 'rb'))
    y = pickle.load(open('y.pickle', 'rb'))
else:
    X, y = load2d()
    with open('X.pickle', 'wb') as f:
        pickle.dump(X, f, -1)
    with open('y.pickle', 'wb') as f:
        pickle.dump(y, f, -1)

if os.path.exists('net.pickle'):
    print 'already learning end'
elif os.path.exists('net_epoch_backup.pickle'):
    net.load_params_from('net_epoch_backup.pickle')
    net.fit(X, y)
else:
    net.fit(X, y)

if net is not None:
    with open('net.pickle', 'wb') as f:
        pickle.dump(y, f, -1)
Beispiel #27
0
normal_stack = create_stack(N)

print("Made stack!")

for k in range(0, 1000):
    saved_accuracy = 10011.0
    data = np.array(normal_stack + random.sample(coords, N))
    val = np.append(np.zeros(N), np.ones(N))
    data, val = shuffle(data, val)
    for i in range(0, int(EPOCHS)):
        nn.fit(data, val)
        cur_accuracy = nn.train_history_[-1]['valid_loss']
        if cur_accuracy - 0.004 > saved_accuracy:
            print("Test Loss Jump! Loading previous network!")
            with suppress_stdout():
                nn.load_params_from("cachedgooglenn2.params")
        else:
            nn.save_params_to('cachedgooglenn2.params')
            saved_accuracy = cur_accuracy
        nn.update_learning_rate *= DECAY

    normal_stack = update_stack(normal_stack, int(K*N), nn)

    print("Data Report: K={3:.2f}, Prob Before={0}, Prob After={1}, Overlap={2}".format(proba_before, proba_after, overlap, K))
    K += KGROWTH
    EPOCHS *= EGROWTH
    for r in range(len(nn.train_history_)):
        nn.train_history_[r]['train_loss'] = 10011.0

nn.save_params_to('googlenn2.params')
Beispiel #28
0
def build_dbn():
	cols = ['Start']

	t = 1
	channels = 14
	batch_size = None #None = arbitary batch size
	hidden_layer_size = 100
	N_EVENTS = 1
	max_epochs = 500
	NO_TIME_POINTS = 177
	
	ids_tot = []
	pred_tot = []
	test_dict = dict()
	
	test_total = 0
	
	net = NeuralNet(
		layers=[
			('input', layers.InputLayer),
			('conv1', layers.Conv1DLayer),
			('conv2', layers.Conv1DLayer),
			('pool1', layers.MaxPool1DLayer),
			('dropout2', layers.DropoutLayer),
			('hidden4', layers.DenseLayer),
			('dropout3', layers.DropoutLayer),
			('hidden5', layers.DenseLayer),
			('dropout4', layers.DropoutLayer),
			('output', layers.DenseLayer),
		],
		input_shape=(batch_size, channels, NO_TIME_POINTS),
		conv1_num_filters=4, conv1_filter_size=1,
		conv1_nonlinearity=None,
		conv2_num_filters=8, conv2_filter_size=5,
		pool1_pool_size=4,
		dropout2_p=0.5, hidden4_num_units=hidden_layer_size,
		dropout3_p=0.3, hidden5_num_units=hidden_layer_size,
		dropout4_p=0.2, output_num_units=N_EVENTS,
		output_nonlinearity=sigmoid,
		
		batch_iterator_train = BatchIterator(batch_size=1000),
		batch_iterator_test = BatchIterator(batch_size=1000),
		
		y_tensor_type=theano.tensor.matrix,
		update=nesterov_momentum,
		update_learning_rate=theano.shared(float(0.03)),
		update_momentum=theano.shared(float(0.9)),
		
		objective_loss_function=loss,
		regression=True,

		on_epoch_finished=[
			AdjustVariable('update_learning_rate', start=0.03,stop=0.0001),
			AdjustVariable('update_momentum', start=0.9, stop=0.999),
			EarlyStopping(patience=100),	
		],

		max_epochs=max_epochs,
		verbose=1,
		)
	
	# load trial dataset
	dic = pickle.load(open('datapickled/traildata.pickle', 'rb'))
	
	X = dic['X']
	y = dic['y']
	
	# process training data
	total_time_points = len(X) // NO_TIME_POINTS
	no_rows = total_time_points * NO_TIME_POINTS

	X = X[0:no_rows, :]
	
	X = X.transpose()
	X_Samples = np.split(X, total_time_points, axis=1)
	X = np.asarray(X_Samples)
	
	y = y[0:no_rows, :]
	y = y[::NO_TIME_POINTS, :]
	y = y.astype('float32')
	
	net.fit(X,y)
	
	tip = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
	
	# Save the net
	with open('net/net'+tip+'.pickle', 'wb') as f:
		pickle.dump(net, f, -1)
	
	plot(net)

	# Load test data
	dic = pickle.load(open('datapickled/testdata2.pickle', 'rb'))
	X_test = dic['X_test']
	ids_tot = dic['ids_tot']
	test_dict = dic['test_dict']
	test_total = dic['test_total']

	####process test data####
	print("Creating prediction file ... ")
	
	X_test = X_test
	total_test_len = len(X_test)
	
	total_test_time_points = len(X_test) // NO_TIME_POINTS
	remainder_test_points = len(X_test) % NO_TIME_POINTS
	
	no_rows = total_test_time_points * NO_TIME_POINTS
	X_test = X_test[0:no_rows, :]

	X_test = X_test.transpose()
	X_test_Samples = np.split(X_test, total_test_time_points, axis=1)
	X_test = np.asarray(X_test_Samples)
	
	# Evaluate test data
	print("Testing subject 0....")
	params = net.get_all_params_values()
	learned_weights = net.load_params_from(params)
	probabilities = net.predict_proba(X_test)
	
	total_test_points = total_test_len // NO_TIME_POINTS
	remainder_data = total_test_len % NO_TIME_POINTS
	for i, p in enumerate(probabilities):
		if i != total_test_points:
			for j in range(NO_TIME_POINTS):
				pred_tot.append(p)
	
	# create prediction file
	print('Creating submission(prediction) file...')
	tip = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
	submission_file = 'res/test_conv_net_push'+tip+'.csv'
	# create pandas object
	submission =  pd.DataFrame(index=ids_tot[:len(pred_tot)],columns=cols,data=pred_tot)
	# write file
	submission.to_csv(submission_file, index_label='id', float_format='%.6f')

"""Loading data and training Lasagne network using nolearn"""

trainVal2 = trainVal2
print trainImg2.shape

print "Ratio: " + str(1.0 - float(sum(trainVal2)) / float(len(trainVal2)))

best_accuracy = 0.0
print "Training Classifier: 80/20 split"
for i in [1, 2, 3, 4, 6, 8, 10, 40, 100, 250]:
    saved_accuracy = 0.0
    print "Size: " + str(i*2000)
    for epoch in range(0, 25):
        nn = nn.fit(trainImg2[0:2000*i], trainVal2[0:2000*i])
        cur_accuracy = nn.train_history_[-1]['valid_accuracy']
        best_accuracy = max(cur_accuracy, best_accuracy)
        #print "Current Accuracy: " + str(cur_accuracy)
        #print "Saved Accuracy: " + str(saved_accuracy)
        if cur_accuracy + 0.04 < saved_accuracy or cur_accuracy + 0.12 < best_accuracy:
            print "Accuracy Drop! Loading previous network!"
            nn.load_params_from("cachednn.params")
        else:
            nn.save_params_to('cachednn.params')
            saved_accuracy = cur_accuracy

nn.save_params_to('nn_stage2.params')

#pickle.dump(nn, open( "nn_stage2.pkl", "wb" ))
def main():
    c = color_codes()
    patch_size = (15, 15, 15)
    dir_name = '/home/sergivalverde/w/CNN/images/CH16'
    patients = [f for f in sorted(os.listdir(dir_name)) if os.path.isdir(os.path.join(dir_name, f))]
    names = np.stack([name for name in [
        [os.path.join(dir_name, patient, 'FLAIR_preprocessed.nii.gz') for patient in patients],
        [os.path.join(dir_name, patient, 'DP_preprocessed.nii.gz') for patient in patients],
        [os.path.join(dir_name, patient, 'T2_preprocessed.nii.gz') for patient in patients],
        [os.path.join(dir_name, patient, 'T1_preprocessed.nii.gz') for patient in patients]
    ] if name is not None], axis=1)
    seed = np.random.randint(np.iinfo(np.int32).max)
    ''' Here we create an initial net to find conflictive voxels '''
    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] + '<Running iteration ' + c['b'] + '1>' + c['nc'])
    net_name = '/home/sergivalverde/w/CNN/code/CNN1/miccai_challenge2016/deep-challenge2016.init.'
    net = NeuralNet(
        layers=[
            (InputLayer, dict(name='in', shape=(None, 4, 15, 15, 15))),
            (Conv3DDNNLayer, dict(name='conv1_1', num_filters=32, filter_size=(5, 5, 5), pad='same')),
            (Pool3DDNNLayer, dict(name='avgpool_1', pool_size=2, stride=2, mode='average_inc_pad')),
            (Conv3DDNNLayer, dict(name='conv2_1', num_filters=64, filter_size=(5, 5, 5), pad='same')),
            (Pool3DDNNLayer, dict(name='avgpool_2', pool_size=2, stride=2, mode='average_inc_pad')),
            (DropoutLayer, dict(name='l2drop', p=0.5)),
            (DenseLayer, dict(name='l1', num_units=256)),
            (DenseLayer, dict(name='out', num_units=2, nonlinearity=nonlinearities.softmax)),
        ],
        objective_loss_function=objectives.categorical_crossentropy,
        update=updates.adam,
        update_learning_rate=0.0001,
        on_epoch_finished=[
            SaveWeights(net_name + 'model_weights.pkl', only_best=True, pickle=False),
            SaveTrainingHistory(net_name + 'model_history.pkl'),
            PlotTrainingHistory(net_name + 'training_history.png'),
            EarlyStopping(patience=10)
        ],
        verbose=10,
        max_epochs=50,
        train_split=TrainSplit(eval_size=0.25),
        custom_scores=[('dsc', lambda p, t: 2 * np.sum(p * t[:, 1]) / np.sum((p + t[:, 1])))],
    )

    try:
        net.load_params_from(net_name + 'model_weights.pkl')
    except IOError:
        print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' +
              c['g'] + 'Loading the data for ' + c['b'] + 'iteration 1' + c['nc'])
        # Create the data
        (x, y, _) = load_patches(
            dir_name=dir_name,
            use_flair=True,
            use_pd=True,
            use_t2=True,
            use_t1=True,
            use_gado=False,
            flair_name='FLAIR_preprocessed.nii.gz',
            pd_name='DP_preprocessed.nii.gz',
            t2_name='T2_preprocessed.nii.gz',
            t1_name='T1_preprocessed.nii.gz',
            gado_name=None,
            mask_name='Consensus.nii.gz',
            size=patch_size
        )

        print('-- Permuting the data')
        np.random.seed(seed)
        x_train = np.random.permutation(np.concatenate(x).astype(dtype=np.float32))
        print('-- Permuting the labels')
        np.random.seed(seed)
        y_train = np.random.permutation(np.concatenate(y).astype(dtype=np.int32))
        y_train = y_train[:, y_train.shape[1] / 2 + 1, y_train.shape[2] / 2 + 1, y_train.shape[3] / 2 + 1]
        print('-- Training vector shape = (' + ','.join([str(length) for length in x_train.shape]) + ')')
        print('-- Training labels shape = (' + ','.join([str(length) for length in y_train.shape]) + ')')

        print c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +\
            'Training (' + c['b'] + 'initial' + c['nc'] + c['g'] + ')' + c['nc']
        # We try to get the last weights to keep improving the net over and over
        net.fit(x_train, y_train)

    ''' Here we get the seeds '''
    print c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] + '<Looking for seeds>' + c['nc']
    for patient in names:
        output_name = os.path.join('/'.join(patient[0].rsplit('/')[:-1]), 'test.iter1.nii.gz')
        try:
            load_nii(output_name)
            print c['c'] + '[' + strftime("%H:%M:%S") + '] ' \
                + c['g'] + '-- Patient ' + patient[0].rsplit('/')[-2] + ' already done' + c['nc']
        except IOError:
            print c['c'] + '[' + strftime("%H:%M:%S") + '] '\
                  + c['g'] + '-- Testing with patient ' + c['b'] + patient[0].rsplit('/')[-2] + c['nc']
            image_nii = load_nii(patient[0])
            image = np.zeros_like(image_nii.get_data())
            for batch, centers in load_patch_batch(patient, 100000, patch_size):
                y_pred = net.predict_proba(batch)
                [x, y, z] = np.stack(centers, axis=1)
                image[x, y, z] = y_pred[:, 1]

            print c['g'] + '-- Saving image ' + c['b'] + output_name + c['nc']
            image_nii.get_data()[:] = image
            image_nii.to_filename(output_name)

    ''' Here we perform the last iteration '''
    print c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] + '<Running iteration ' + c['b'] + '2>' + c['nc']
    net_name = '/home/sergivalverde/w/CNN/code/CNN1/miccai_challenge2016/deep-challenge2016.final.'
    net = NeuralNet(
        layers=[
            (InputLayer, dict(name='in', shape=(None, 4, 15, 15, 15))),
            (Conv3DDNNLayer, dict(name='conv1_1', num_filters=32, filter_size=(5, 5, 5), pad='same')),
            (Pool3DDNNLayer, dict(name='avgpool_1', pool_size=2, stride=2, mode='average_inc_pad')),
            (Conv3DDNNLayer, dict(name='conv2_1', num_filters=64, filter_size=(5, 5, 5), pad='same')),
            (Pool3DDNNLayer, dict(name='avgpool_2', pool_size=2, stride=2, mode='average_inc_pad')),
            (DropoutLayer, dict(name='l2drop', p=0.5)),
            (DenseLayer, dict(name='l1', num_units=256)),
            (DenseLayer, dict(name='out', num_units=2, nonlinearity=nonlinearities.softmax)),
        ],
        objective_loss_function=objectives.categorical_crossentropy,
        update=updates.adam,
        update_learning_rate=0.0001,
        on_epoch_finished=[
            SaveWeights(net_name + 'model_weights.pkl', only_best=True, pickle=False),
            SaveTrainingHistory(net_name + 'model_history.pkl'),
            PlotTrainingHistory(net_name + 'training_history.png'),
        ],
        batch_iterator_train=BatchIterator(batch_size=4096),
        verbose=10,
        max_epochs=2000,
        train_split=TrainSplit(eval_size=0.25),
        custom_scores=[('dsc', lambda p, t: 2 * np.sum(p * t[:, 1]) / np.sum((p + t[:, 1])))],
    )

    try:
        net.load_params_from(net_name + 'model_weights.pkl')
    except IOError:
        pass
    print c['c'] + '[' + strftime("%H:%M:%S") + '] '\
        + c['g'] + 'Loading the data for ' + c['b'] + 'iteration 2' + c['nc']
    (x, y, names) = load_patches(
        dir_name='/home/sergivalverde/w/CNN/images/CH16',
        use_flair=True,
        use_pd=True,
        use_t2=True,
        use_t1=True,
        use_gado=False,
        flair_name='FLAIR_preprocessed.nii.gz',
        pd_name='DP_preprocessed.nii.gz',
        t2_name='T2_preprocessed.nii.gz',
        gado_name=None,
        t1_name='T1_preprocessed.nii.gz',
        mask_name='Consensus.nii.gz',
        size=patch_size,
        roi_name='test.iter1.nii.gz'
    )

    print '-- Permuting the data'
    np.random.seed(seed)
    x_train = np.random.permutation(np.concatenate(x).astype(dtype=np.float32))
    print '-- Permuting the labels'
    np.random.seed(seed)
    y_train = np.random.permutation(np.concatenate(y).astype(dtype=np.int32))
    y_train = y_train[:, y_train.shape[1] / 2 + 1, y_train.shape[2] / 2 + 1, y_train.shape[3] / 2 + 1]
    print '-- Training vector shape = (' + ','.join([str(length) for length in x_train.shape]) + ')'
    print '-- Training labels shape = (' + ','.join([str(length) for length in y_train.shape]) + ')'
    print c['c'] + '[' + strftime("%H:%M:%S") + '] '\
        + c['g'] + 'Training (' + c['b'] + 'final' + c['nc'] + c['g'] + ')' + c['nc']
    net.fit(x_train, y_train)
        ],
    input_shape = (None, 1, 20, 20),
    conv_num_filters = 32, conv_filter_size = (3, 3), 
    pool_pool_size = (2, 2),
	hidden_num_units = 50,
    output_num_units = 2, output_nonlinearity = softmax,

    update_learning_rate=0.01,
    update_momentum = 0.9,

    regression = False,
    max_epochs = 50,
    verbose = 1,
    )

net.load_params_from(CNN_Weights) 

##############################################################################
# Scan the entire image with a (w x h) window
##############################################################################

for root, dirs, files in os.walk(SourceDir): 
    for name in files:
        
        ext = ['.jpg', '.jpeg', '.gif', '.png']
        if name.endswith(tuple(ext)):

			path = os.path.join(root,name)
			orig_image = Image.open(path).convert('RGBA')
			image = orig_image.convert('L')  # Convert to grayscale
			image = ImageOps.equalize(image)  # Histogram equalization
Beispiel #32
0
def main():
    data = load_av_letters('data/allData_mouthROIs.mat')

    # create the necessary variable mappings
    data_matrix = data['dataMatrix']
    data_matrix_len = data_matrix.shape[0]
    targets_vec = data['targetsVec']
    vid_len_vec = data['videoLengthVec']
    iter_vec = data['iterVec']

    indexes = create_split_index(data_matrix_len, vid_len_vec, iter_vec)

    # split the data
    train_data = data_matrix[indexes == True]
    train_targets = targets_vec[indexes == True]
    test_data = data_matrix[indexes == False]
    test_targets = targets_vec[indexes == False]

    idx = [i for i, elem in enumerate(test_targets) if elem == 20]

    print(train_data.shape)
    print(test_data.shape)
    print(sum([train_data.shape[0], test_data.shape[0]]))

    # resize the input data to 40 x 30
    train_data_resized = resize_images(train_data).astype(np.float32)

    # normalize the inputs [0 - 1]
    train_data_resized = normalize_input(train_data_resized, centralize=True)

    test_data_resized = resize_images(test_data).astype(np.float32)
    test_data_resized = normalize_input(test_data_resized, centralize=True)

    dic = {}
    dic['trainDataResized'] = train_data_resized
    dic['testDataResized'] = test_data_resized
    """second experiment: overcomplete sigmoid encoder/decoder, squared loss"""
    encode_size = 2500
    sigma = 0.5

    # to get tied weights in the encoder/decoder, create this shared weightMatrix
    # 1200 x 2000
    w1, layer1 = build_encoder_layers(1200, 2500, sigma)

    ae1 = NeuralNet(layers=layer1,
                    max_epochs=50,
                    objective_loss_function=squared_error,
                    update=adadelta,
                    regression=True,
                    verbose=1)

    load = True
    save = False
    if load:
        print('[LOAD] layer 1...')
        ae1.load_params_from('layer1.dat')
    else:
        print('[TRAIN] layer 1...')
        ae1.fit(train_data_resized, train_data_resized)

    # save params
    if save:
        print('[SAVE] layer 1...')
        ae1.save_params_to('layer1.dat')

    train_encoded1 = ae1.get_output('encoder',
                                    train_data_resized)  # 12293 x 2000

    w2, layer2 = build_encoder_layers(2500, 1250)
    ae2 = NeuralNet(layers=layer2,
                    max_epochs=50,
                    objective_loss_function=squared_error,
                    update=adadelta,
                    regression=True,
                    verbose=1)

    load2 = True
    if load2:
        print('[LOAD] layer 2...')
        ae2.load_params_from('layer2.dat')
    else:
        print('[TRAIN] layer 2...')
        ae2.fit(train_encoded1, train_encoded1)

    save2 = False
    if save2:
        print('[SAVE] layer 2...')
        ae2.save_params_to('layer2.dat')

    train_encoded2 = ae2.get_output('encoder', train_encoded1)  # 12293 x 1250

    w3, layer3 = build_encoder_layers(1250, 600)
    ae3 = NeuralNet(layers=layer3,
                    max_epochs=100,
                    objective_loss_function=squared_error,
                    update=adadelta,
                    regression=True,
                    verbose=1)

    load3 = True
    if load3:
        print('[LOAD] layer 3...')
        ae3.load_params_from('layer3.dat')
    else:
        ae3.fit(train_encoded2, train_encoded2)

    save3 = False
    if save3:
        print('[SAVE] layer 3...')
        ae3.save_params_to('layer3.dat')

    train_encoded3 = ae3.get_output('encoder', train_encoded2)  # 12293 x 1250

    w4, layer4 = build_bottleneck_layer(600, 100)
    ae4 = NeuralNet(layers=layer4,
                    max_epochs=100,
                    objective_loss_function=squared_error,
                    update=adadelta,
                    regression=True,
                    verbose=1)

    load4 = False
    if load4:
        print('[LOAD] layer 4...')
        ae4.load_params_from('layer4.dat')
    else:
        print('[TRAIN] layer 4...')
        ae4.fit(train_encoded3, train_encoded3)

    save4 = True
    if save4:
        print('[SAVE] layer 4...')
        ae4.save_params_to('layer4.dat')

    test_enc1 = ae1.get_output('encoder', test_data_resized)
    test_enc2 = ae2.get_output('encoder', test_enc1)
    test_enc3 = ae3.get_output('encoder', test_enc2)
    test_enc4 = ae4.get_output('encoder', test_enc3)

    decoder4 = create_decoder(100, 600, w4.T)
    decoder4.initialize()
    decoder3 = create_decoder(600, 1250, w3.T)
    decoder3.initialize()
    decoder2 = create_decoder(1250, 2500, w2.T)
    decoder2.initialize()  # initialize the net
    decoder1 = create_decoder(2500, 1200, w1.T)
    decoder1.initialize()

    test_dec3 = decoder4.predict(test_enc4)
    test_dec2 = decoder3.predict(test_dec3)
    test_dec1 = decoder2.predict(test_dec2)
    X_pred = decoder1.predict(test_dec1)

    # plot_loss(ae3)
    # plot_loss(ae2)
    # plot_loss(ae1)
    tile_raster_images(X_pred[4625:4650, :], (30, 40), (5, 5),
                       tile_spacing=(1, 1))
    plt.title('reconstructed')
    tile_raster_images(test_data_resized[4625:4650, :], (30, 40), (5, 5),
                       tile_spacing=(1, 1))
    plt.title('original')
    plt.show()
    """
Beispiel #33
0
def main():
       
    seed = 12345
    np.random.seed(seed)
    set_lasagne_rng(RandomState(seed))
     
    LOOKUP_PATH = os.path.join(WDIR, 'data', 'HIV.pkl')
    lookup =  pickle.load(open(LOOKUP_PATH, 'rb'))
    data_list = lookup['data']
    y = lookup['y']
    labels = lookup['labels']
    nmark = len(labels)
    
    # event occurence list    
    occurred = [x for i, x in enumerate(data_list) if y[i,1] == 1]
    not_occurred = [x for i, x in enumerate(data_list) if y[i,1] == 0]
    y1 = y[y[:,1] == 1]
    y0 = y[y[:,1] == 0]
    
    # split the examples randomly into a training (2/3) and test (1/3) cohort
    # both cohorts should contain equal percentage of cencored data
    sep1 = len(y1) / 3
    sep0 = len(y0) / 3
        
    # include only uncensored data from the training cohort for training CellCnn
    tr_list = occurred[sep1:]
    tr_stime = y1[sep1:,0].astype(float)
            
    # transform survival times to [-1, 1] interval by ranking them
    tr_stime = (ss.rankdata(tr_stime) / (0.5 * len(tr_stime))) - 1
                
    # fit scaler to all training data
    sc = StandardScaler()
    sc.fit(np.vstack(occurred[sep1:] + not_occurred[sep0:]))
    tr_list = [sc.transform(x) for x in tr_list]
            
    # the test cohort
    validation_list = [sc.transform(x) for x in (occurred[:sep1] + not_occurred[:sep0])]
    y_valid = np.vstack([y1[:sep1], y0[:sep0]])
    
    # cross validation on the training cohort    
    nfold = 10
    nfilter = 3
           
    skf = KFold(len(tr_list), n_folds=nfold, shuffle=True)
    committee = []
    valid_accuracy = []
    accum_w = np.empty((nfilter * nfold, nmark+2))
    
    for ifold, (train_index, test_index) in enumerate(skf):
        cv_train_samples = [tr_list[t_idx] for t_idx in train_index]
        cv_test_samples = [tr_list[t_idx] for t_idx in test_index]
        cv_y_train = list(tr_stime[train_index])
        cv_y_test = list(tr_stime[test_index])
        
        results = train_model(cv_train_samples, cv_y_train, labels,
                                valid_samples=cv_test_samples, valid_phenotypes=cv_y_test, 
                                ncell=500, nsubset=200, subset_selection='random',
                                nrun=3, pooling='mean', regression=True, nfilter=nfilter,
                                learning_rate=0.03, momentum=0.9, l2_weight_decay_conv=1e-8,
                                l2_weight_decay_out=1e-8, max_epochs=20, verbose=1,
                                select_filters='best', accur_thres=-1)
            
        net_dict = results['best_net']
            
        # update the committee of networks        
        committee.append(net_dict)
        valid_accuracy.append(results['best_accuracy'])
        w_tot = param_vector(net_dict, regression=True)
                
        # add weights to accumulator    
        accum_w[ifold*nfilter:(ifold+1)*nfilter] = w_tot
         
    save_path = os.path.join(OUTDIR, 'network_committee.pkl')
    with open(save_path, 'wb') as f:
        pickle.dump((committee, valid_accuracy), f, -1)    
        
    '''
    committee, valid_accuracy = pickle.load(open(save_path, 'r'))    
    # retrieve the filter weights
    for ifold, net_dict in enumerate(committee):
        w_tot = param_vector(net_dict, regression=True)
                
        # add weights to accumulator    
        accum_w[ifold*nfilter:(ifold+1)*nfilter] = w_tot
    '''    
    
    # choose the strong signatures (all of them)
    w_strong = accum_w
    
    # members of each cluster should have cosine similarity > 0.7 
    # equivalently, cosine distance < 0.3
    Z = linkage(w_strong, 'average', metric='cosine')
    clusters = fcluster(Z, .3, criterion='distance') - 1
        
    n_clusters = len(np.unique(clusters))
    print '%d clusters chosen' % (n_clusters)   
            
    # plot the discovered filter profiles
    plt.figure(figsize=(3,2))
    idx = range(nmark) + [nmark+1]
    clmap = sns.clustermap(pd.DataFrame(w_strong[:,idx], columns=labels+['survival']),
                                method='average', metric='cosine', row_linkage=Z,
                                col_cluster=False, robust=True, yticklabels=clusters)
    clmap.cax.set_visible(False)
    fig_path = os.path.join(OUTDIR, 'HIV_clmap.eps')
    clmap.savefig(fig_path, format='eps')
    plt.close()
        
        
    # generate the consensus filter profiles
    c = Counter(clusters)
    cons = []
    for key, val in c.items():
        if val > nfold/2:
            cons.append(np.mean(w_strong[clusters == key], axis=0))
    cons_mat = np.vstack(cons)
        
    # plot the consensus filter profiles
    plt.figure(figsize=(10, 3))
    idx = range(nmark) + [nmark+1]
    ax = sns.heatmap(pd.DataFrame(cons_mat[:,idx], columns=labels + ['survival']),
                            robust=True, yticklabels=False)
    plt.xticks(rotation=90)
    ax.tick_params(axis='both', which='major', labelsize=20)
    plt.tight_layout()
    fig_path = os.path.join(OUTDIR, 'clmap_consensus.eps')
    plt.savefig(fig_path, format='eps')
    plt.close()
       
    # create an ensemble of neural networks
    ncell_cons = 3000
    ncell_voter = 3000
    layers_voter = [
                    (layers.InputLayer, {'name': 'input', 'shape': (None, nmark, ncell_voter)}),
                    (layers.Conv1DLayer, {'name': 'conv', 
                                        'num_filters': nfilter, 'filter_size': 1}),
                    (layers.Pool1DLayer, {'name': 'meanPool', 'pool_size' : ncell_voter,
                                        'mode': 'average_exc_pad'}),
                    (layers.DenseLayer, {'name': 'output',
                                        'num_units': 1,
                                        'nonlinearity': T.tanh})]
             
    # predict on the test cohort
    small_data_list_v = [x[:ncell_cons].T.reshape(1,nmark,ncell_cons) for x in validation_list]
    data_v = np.vstack(small_data_list_v)
    stime, censor = y_valid[:,0], y_valid[:,1]
    
    # committee of the best nfold/2 models
    voter_risk_pred = list()
    for ifold in np.argsort(valid_accuracy):
        voter = NeuralNet(layers = layers_voter,                
                                    update = nesterov_momentum,
                                    update_learning_rate = 0.001,
                                    regression=True,
                                    max_epochs=5,
                                    verbose=0)
        voter.load_params_from(committee[ifold])
        voter.initialize()
        # rank the risk predictions
        voter_risk_pred.append(ss.rankdata(- np.squeeze(voter.predict(data_v))))
    all_voters = np.vstack(voter_risk_pred)
                
    # compute mean rank per individual
    risk_p = np.mean(all_voters, axis=0)
    g1 = np.squeeze(risk_p > np.median(risk_p))
    voters_pval_v = logrank_pval(stime, censor, g1)
    fig_v = os.path.join(OUTDIR, 'cellCnn_cox_test.eps')
    plot_KM(stime, censor, g1, voters_pval_v, fig_v)

    # filter-activating cells
    data_t = np.vstack(small_data_list_v)
    data_stack = np.vstack([x for x in np.swapaxes(data_t, 2, 1)])
                
    # finally define a network from the consensus filters
    nfilter_cons = cons_mat.shape[0]
    ncell_cons = 3000
    layers_cons = [
                    (layers.InputLayer, {'name': 'input', 'shape': (None, nmark, ncell_cons)}),
                    (layers.Conv1DLayer, {'name': 'conv', 
                                        'b': init.Constant(cons_mat[:,-2]),
                                        'W': cons_mat[:,:-2].reshape(nfilter_cons, nmark, 1),
                                        'num_filters': nfilter_cons, 'filter_size': 1}),
                    (layers.Pool1DLayer, {'name': 'meanPool', 'pool_size' : ncell_cons,
                                        'mode': 'average_exc_pad'}),
                    (layers.DenseLayer, {'name': 'output',
                                        'num_units': 1,
                                        'W': np.sign(cons_mat[:,-1:]),
                                        'b': init.Constant(0.),
                                        'nonlinearity': T.tanh})]
            
    net_cons = NeuralNet(layers = layers_cons,                
                            update = nesterov_momentum,
                            update_learning_rate = 0.001,
                            regression=True,
                            max_epochs=5,
                            verbose=0)
    net_cons.initialize()

    # get the representation after mean pooling
    xs = T.tensor3('xs').astype(theano.config.floatX)
    act_conv = theano.function([xs], lh.get_output(net_cons.layers_['conv'], xs)) 
    
    # and apply to the test data
    act_tot = act_conv(data_t)
    act_tot = np.swapaxes(act_tot, 2, 1)
    act_stack = np.vstack([x for x in act_tot])
    idx = range(7) + [8,9]
                
    for i_map in range(nfilter_cons):
        val = act_stack[:, i_map]
        descending_order = np.argsort(val)[::-1]
        val_cumsum = np.cumsum(val[descending_order])
        data_sorted = data_stack[descending_order]
        thres = 0.75 * val_cumsum[-1]
        res_data = data_sorted[val_cumsum < thres] 
        fig_path = os.path.join(OUTDIR, 'filter_'+str(i_map)+'_active.eps')       
        plot_marker_distribution([res_data[:,idx], data_stack[:,idx]],
                                            ['filter '+str(i_map), 'all'],
                                            [labels[l] for l in idx],
                                            (3,3), fig_path, 24)
    def write(self, message):
        self.terminal.write(message)
        self.log.write(message)

    def flush(self):
        # this flush method is needed for python 3 compatibility.
        # this handles the flush command by doing nothing.
        # you might want to specify some extra behavior here.
        pass


sys.stdout = Logger()

# cnn init
cnn.load_params_from('./model_trained_on_UCF.pkl')

if os.path.exists('./data_cache/combined_dataset/cnn_' + CNNCode + '-' +
                  TrainCode + '.pkl'):
    # check if a trained model already exists
    cnn.load_params_from('./data_cache/combined_dataset/cnn_' + CNNCode + '-' +
                         TrainCode + '.pkl')
else:
    cnn_train = time.time()
    # training a new model
    for epoch in range(Epochs):
        # for every epoch
        for batch in patches_extract_all(Train):
            # for every batch
            inputs, targets = batch
            # data augmentation
Beispiel #35
0
    objective_loss_function=objectives.categorical_crossentropy,

    update=updates.adam,

    batch_iterator_train=train_iterator,
    batch_iterator_test=test_iterator,

    on_epoch_finished=[
        save_weights,
        save_training_history,
        plot_training_history
    ],

    verbose=10,
    max_epochs=250,
)


if __name__ == '__main__':
    # X_train, X_test are image file names
    # They will be read in the iterator
    X_train, X_test, y_train, y_test = load_data(test_size=0.25, random_state=42)

    net.fit(X_train, y_train)

    # Load the best weights from pickled model
    net.load_params_from('./examples/cifar10/model_weights.pkl')

    score = net.score(X_test, y_test)
    print 'Final score %.4f' % score
Beispiel #36
0
def fit_net2(fname='net.pickle', sfname='net2.pickle'):
	with open(fname, 'r') as f:
		net = pickle.load(f)
	l1=net.get_all_layers()

	net2 = NeuralNet(
		layers=[
			('input', layers.InputLayer),
			('conv1', Conv2DLayer),
			('pool1', MaxPool2DLayer),
			('dropout1', layers.DropoutLayer),
			('conv2', Conv2DLayer),
			('pool2', MaxPool2DLayer),
			('dropout2', layers.DropoutLayer),
			('conv3', Conv2DLayer),
			('pool3', MaxPool2DLayer),
			('dropout3', layers.DropoutLayer),
			('hidden4', FactoredLayer),
			('dropout4', layers.DropoutLayer),
			('hidden5', FactoredLayer),
			('output', layers.DenseLayer),
			],
		input_shape=(None, 1, 96, 96),
		conv1_num_filters=32, conv1_filter_size=(3, 3), pool1_pool_size=(2, 2),
		dropout1_p=0.1,
		conv2_num_filters=64, conv2_filter_size=(2, 2), pool2_pool_size=(2, 2),
		dropout2_p=0.2,
		conv3_num_filters=128, conv3_filter_size=(2, 2), pool3_pool_size=(2, 2),
		dropout3_p=0.3,
		hidden4_num_units=1000,
		hidden4_num_hidden=200,
		hidden4_W=l1[10].W.get_value(),
		hidden4_b=l1[10].b.get_value(),
		dropout4_p=0.5,
		hidden5_num_units=1000,
		hidden5_num_hidden=200,
		hidden5_W=l1[12].W.get_value(),
		hidden5_b=l1[12].b.get_value(),
		output_num_units=30, output_nonlinearity=None,

		update_learning_rate=theano.shared(float32(0.03)),
		update_momentum=theano.shared(float32(0.9)),

		regression=True,
		batch_iterator_train=FlipBatchIterator(batch_size=128),
		on_epoch_finished=[
			AdjustVariable('update_learning_rate', start=0.03, stop=0.0001),
			AdjustVariable('update_momentum', start=0.9, stop=0.999),
			EarlyStopping(patience=200),
			],
		max_epochs=1,
		verbose=1,
		)
	
	X, y = load2d()
	net2.fit(X, y)
	net2.load_params_from(net.get_all_params_values())
	#net2.fit(X, y)
	"""
	l2=net2.get_all_layers()
	print(l2)
	for i in xrange(len(l1)):
		if i!=10 and i!=12:
			all_param_values = lasagne.layers.get_all_param_values(l1[i])
			lasagne.layers.set_all_param_values(l2[i], all_param_values)
	"""
	with open(sfname, 'wb') as f:
		pickle.dump(net2, f, -1)
Beispiel #37
0
nn = NeuralNet(
    net['softmax'],
    max_epochs=1,
    update=adam,
    update_learning_rate=.00014, #start with a really low learning rate
    #objective_l2=0.0001,

    # batch iteration params
    batch_iterator_test=test_iterator,

    train_split=TrainSplit(eval_size=0.2),
    verbose=3,
)

nn.load_params_from(netfile);




nuclei_area = 0.0
mitosis_area = 0.0
num = 0

for img in images:
    cntr = Controller(img)
    if normalized:
        img, _, __, __ = macenko(cntr)

    plt.imsave('data/imagemap' + str(num) + ".png", img)
    num += 1
def main():
    # Parse command line options
    parser = argparse.ArgumentParser(description='Test different nets with 3D data.')
    parser.add_argument('--flair', action='store', dest='flair', default='FLAIR_preprocessed.nii.gz')
    parser.add_argument('--pd', action='store', dest='pd', default='DP_preprocessed.nii.gz')
    parser.add_argument('--t2', action='store', dest='t2', default='T2_preprocessed.nii.gz')
    parser.add_argument('--t1', action='store', dest='t1', default='T1_preprocessed.nii.gz')
    parser.add_argument('--output', action='store', dest='output', default='output.nii.gz')
    parser.add_argument('--no-docker', action='store_false', dest='docker', default=True)

    c = color_codes()
    patch_size = (15, 15, 15)
    options = vars(parser.parse_args())
    batch_size = 10000
    min_size = 30

    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +
          '<Loading the net ' + c['b'] + '1' + c['nc'] + c['g'] + '>' + c['nc'])
    net_name = '/usr/local/nets/deep-challenge2016.init.model_weights.pkl' if options['docker'] \
        else './deep-challenge2016.init.model_weights.pkl'
    net = NeuralNet(
        layers=[
            (InputLayer, dict(name='in', shape=(None, 4, 15, 15, 15))),
            (Conv3DDNNLayer, dict(name='conv1_1', num_filters=32, filter_size=(5, 5, 5), pad='same')),
            (Pool3DDNNLayer, dict(name='avgpool_1', pool_size=2, stride=2, mode='average_inc_pad')),
            (Conv3DDNNLayer, dict(name='conv2_1', num_filters=64, filter_size=(5, 5, 5), pad='same')),
            (Pool3DDNNLayer, dict(name='avgpool_2', pool_size=2, stride=2, mode='average_inc_pad')),
            (DropoutLayer, dict(name='l2drop', p=0.5)),
            (DenseLayer, dict(name='l1', num_units=256)),
            (DenseLayer, dict(name='out', num_units=2, nonlinearity=nonlinearities.softmax)),
        ],
        objective_loss_function=objectives.categorical_crossentropy,
        update=updates.adam,
        update_learning_rate=0.0001,
        verbose=10,
        max_epochs=50,
        train_split=TrainSplit(eval_size=0.25),
        custom_scores=[('dsc', lambda p, t: 2 * np.sum(p * t[:, 1]) / np.sum((p + t[:, 1])))],
    )
    net.load_params_from(net_name)

    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +
          '<Creating the probability map ' + c['b'] + '1' + c['nc'] + c['g'] + '>' + c['nc'])
    names = np.array([options['flair'], options['pd'], options['t2'], options['t1']])
    image_nii = load_nii(options['flair'])
    image1 = np.zeros_like(image_nii.get_data())
    print('0% of data tested', end='\r')
    sys.stdout.flush()
    for batch, centers, percent in load_patch_batch_percent(names, batch_size, patch_size):
        y_pred = net.predict_proba(batch)
        print('%f%% of data tested' % percent, end='\r')
        sys.stdout.flush()
        [x, y, z] = np.stack(centers, axis=1)
        image1[x, y, z] = y_pred[:, 1]

    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +
          '<Loading the net ' + c['b'] + '2' + c['nc'] + c['g'] + '>' + c['nc'])
    net_name = '/usr/local/nets/deep-challenge2016.final.model_weights.pkl' if options['docker'] \
        else './deep-challenge2016.final.model_weights.pkl'
    net = NeuralNet(
        layers=[
            (InputLayer, dict(name='in', shape=(None, 4, 15, 15, 15))),
            (Conv3DDNNLayer, dict(name='conv1_1', num_filters=32, filter_size=(5, 5, 5), pad='same')),
            (Pool3DDNNLayer, dict(name='avgpool_1', pool_size=2, stride=2, mode='average_inc_pad')),
            (Conv3DDNNLayer, dict(name='conv2_1', num_filters=64, filter_size=(5, 5, 5), pad='same')),
            (Pool3DDNNLayer, dict(name='avgpool_2', pool_size=2, stride=2, mode='average_inc_pad')),
            (DropoutLayer, dict(name='l2drop', p=0.5)),
            (DenseLayer, dict(name='l1', num_units=256)),
            (DenseLayer, dict(name='out', num_units=2, nonlinearity=nonlinearities.softmax)),
        ],
        objective_loss_function=objectives.categorical_crossentropy,
        update=updates.adam,
        update_learning_rate=0.0001,
        batch_iterator_train=BatchIterator(batch_size=4096),
        verbose=10,
        max_epochs=2000,
        train_split=TrainSplit(eval_size=0.25),
        custom_scores=[('dsc', lambda t, p: 2 * np.sum(t * p[:, 1]) / np.sum((t + p[:, 1])))],
    )
    net.load_params_from(net_name)

    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +
          '<Creating the probability map ' + c['b'] + '2' + c['nc'] + c['g'] + '>' + c['nc'])
    image2 = np.zeros_like(image_nii.get_data())
    print('0% of data tested', end='\r')
    sys.stdout.flush()
    for batch, centers, percent in load_patch_batch_percent(names, batch_size, patch_size):
        y_pred = net.predict_proba(batch)
        print('%f%% of data tested' % percent, end='\r')
        sys.stdout.flush()
        [x, y, z] = np.stack(centers, axis=1)
        image2[x, y, z] = y_pred[:, 1]

    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +
          '<Saving to file ' + c['b'] + options['output'] + c['nc'] + c['g'] + '>' + c['nc'])
    image = (image1 * image2) > 0.5

    # filter candidates < min_size
    labels, num_labels = ndimage.label(image)
    lesion_list = np.unique(labels)
    num_elements_by_lesion = ndimage.labeled_comprehension(image, labels, lesion_list, np.sum, float, 0)
    filt_min_size = num_elements_by_lesion >= min_size
    lesion_list = lesion_list[filt_min_size]
    image = reduce(np.logical_or, map(lambda lab: lab == labels, lesion_list))

    image_nii.get_data()[:] = np.roll(np.roll(image, 1, axis=0), 1, axis=1)
    path = '/'.join(options['t1'].rsplit('/')[:-1])
    outputname = options['output'].rsplit('/')[-1]
    image_nii.to_filename(os.path.join(path, outputname))

    if not options['docker']:
        path = '/'.join(options['output'].rsplit('/')[:-1])
        case = options['output'].rsplit('/')[-1]
        gt = load_nii(os.path.join(path, 'Consensus.nii.gz')).get_data().astype(dtype=np.bool)
        dsc = np.sum(2.0 * np.logical_and(gt, image)) / (np.sum(gt) + np.sum(image))
        print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +
              '<DSC value for ' + c['c'] + case + c['g'] + ' = ' + c['b'] + str(dsc) + c['nc'] + c['g'] + '>' + c['nc'])
Beispiel #39
0
def cascade_model(options):
    """
    3D cascade model using Nolearn and Lasagne
    
    Inputs:
    - model_options:
    - weights_path: path to where weights should be saved

    Output:
    - nets = list of NeuralNets (CNN1, CNN2)
    """

    # model options
    channels = len(options['modalities'])
    train_split_perc = options['train_split']
    num_epochs = options['max_epochs']
    max_epochs_patience = options['patience']

    # save model to disk to re-use it. Create an experiment folder
    # organize experiment
    if not os.path.exists(
            os.path.join(options['weight_paths'], options['experiment'])):
        os.mkdir(os.path.join(options['weight_paths'], options['experiment']))
    if not os.path.exists(
            os.path.join(options['weight_paths'], options['experiment'],
                         'nets')):
        os.mkdir(
            os.path.join(options['weight_paths'], options['experiment'],
                         'nets'))

    # --------------------------------------------------
    # first model
    # --------------------------------------------------

    layer1 = InputLayer(name='in1',
                        shape=(None, channels) + options['patch_size'])
    layer1 = batch_norm(Conv3DLayer(layer1,
                                    name='conv1_1',
                                    num_filters=32,
                                    filter_size=3,
                                    pad='same'),
                        name='BN1')
    layer1 = Pool3DLayer(layer1,
                         name='avgpool_1',
                         mode='max',
                         pool_size=2,
                         stride=2)
    layer1 = batch_norm(Conv3DLayer(layer1,
                                    name='conv2_1',
                                    num_filters=64,
                                    filter_size=3,
                                    pad='same'),
                        name='BN2')
    layer1 = Pool3DLayer(layer1,
                         name='avgpoo2_1',
                         mode='max',
                         pool_size=2,
                         stride=2)
    layer1 = DropoutLayer(layer1, name='l2drop', p=0.5)
    layer1 = DenseLayer(layer1, name='d_1', num_units=256)
    layer1 = DenseLayer(layer1,
                        name='out',
                        num_units=2,
                        nonlinearity=nonlinearities.softmax)

    # save weights
    net_model = 'model_1'
    net_weights = os.path.join(options['weight_paths'], options['experiment'],
                               'nets', net_model + '.pkl')
    net_history = os.path.join(options['weight_paths'], options['experiment'],
                               'nets', net_model + '_history.pkl')

    net1 = NeuralNet(
        layers=layer1,
        objective_loss_function=objectives.categorical_crossentropy,
        batch_iterator_train=Rotate_batch_Iterator(batch_size=128),
        update=updates.adadelta,
        on_epoch_finished=[
            SaveWeights(net_weights, only_best=True, pickle=False),
            SaveTrainingHistory(net_history),
            EarlyStopping(patience=max_epochs_patience)
        ],
        verbose=options['net_verbose'],
        max_epochs=num_epochs,
        train_split=TrainSplit(eval_size=train_split_perc),
    )

    # --------------------------------------------------
    # second model
    # --------------------------------------------------

    layer2 = InputLayer(name='in2',
                        shape=(None, channels) + options['patch_size'])
    layer2 = batch_norm(Conv3DLayer(layer2,
                                    name='conv1_1',
                                    num_filters=32,
                                    filter_size=3,
                                    pad='same'),
                        name='BN1')
    layer2 = Pool3DLayer(layer2,
                         name='avgpool_1',
                         mode='max',
                         pool_size=2,
                         stride=2)
    layer2 = batch_norm(Conv3DLayer(layer2,
                                    name='conv2_1',
                                    num_filters=64,
                                    filter_size=3,
                                    pad='same'),
                        name='BN2')
    layer2 = Pool3DLayer(layer2,
                         name='avgpoo2_1',
                         mode='max',
                         pool_size=2,
                         stride=2)
    layer2 = DropoutLayer(layer2, name='l2drop', p=0.5)
    layer2 = DenseLayer(layer2, name='d_1', num_units=256)
    layer2 = DenseLayer(layer2,
                        name='out',
                        num_units=2,
                        nonlinearity=nonlinearities.softmax)

    # save weights
    net_model = 'model_2'
    net_weights2 = os.path.join(options['weight_paths'], options['experiment'],
                                'nets', net_model + '.pkl')
    net_history2 = os.path.join(options['weight_paths'], options['experiment'],
                                'nets', net_model + '_history.pkl')

    net2 = NeuralNet(
        layers=layer2,
        objective_loss_function=objectives.categorical_crossentropy,
        batch_iterator_train=Rotate_batch_Iterator(batch_size=128),
        update=updates.adadelta,
        on_epoch_finished=[
            SaveWeights(net_weights2, only_best=True, pickle=False),
            SaveTrainingHistory(net_history2),
            EarlyStopping(patience=max_epochs_patience)
        ],
        verbose=options['net_verbose'],
        max_epochs=num_epochs,
        train_split=TrainSplit(eval_size=train_split_perc),
    )

    # upload weights if set
    if options['load_weights'] == 'True':
        print "    --> CNN, loading weights from", options[
            'experiment'], 'configuration'
        net1.load_params_from(net_weights)
        net2.load_params_from(net_weights2)
    return [net1, net2]
Beispiel #40
0
def create_pretrained_vgg_nn_nolearn():
    '''
    *** This function need only be run once to create and save a nolearn NeuralNet ***
    ***     instance from the origninal lasagne layer weights for the vgg net.     ***
    Create a vgg neural net. Load pretrained weights.
    Pickle the entire net.
    Pickle the mean image.
    Return a nolearn.NeuralNet instance,  mean_image numpy array
    '''
    # define the vgg_s network
    vgg_nn = NeuralNet(
        layers = [
            (InputLayer, {
                        'name':'input',
                        'shape':(None,3,224,224)
                         }),
            (ConvLayer, {
                        'name':'conv1',
                        'num_filters':96,
                        'filter_size':(7,7),
                        'stride':2,
                        'flip_filters':False
                        }),
            (NormLayer, {
                        'name':'norm1',
                        'alpha':.0001
                        }),
            (PoolLayer, {
                        'name':'pool1',
                        'pool_size':(3,3),
                        'stride':3,
                        'ignore_border':False
                        }),
            (ConvLayer, {
                        'name':'conv2',
                        'num_filters':256,
                        'filter_size':(5,5),
                        'flip_filters':False
    #                     'pad':2,
    #                     'stride':1
                       }),
            (PoolLayer, {
                        'name':'pool2',
                        'pool_size':(2,2),
                        'stride':2,
                        'ignore_border':False
                        }),
            (ConvLayer, {
                        'name':'conv3',
                        'num_filters':512,
                        'filter_size':(3,3),
                        'pad':1,
    #                     'stride':1
                        'flip_filters':False
                       }),
            (ConvLayer, {
                        'name':'conv4',
                        'num_filters':512,
                        'filter_size':(3,3),
                        'pad':1,
    #                     'stride':1
                        'flip_filters':False
                        }),
            (ConvLayer, {
                        'name':'conv5',
                        'num_filters':512,
                        'filter_size':(3,3),
                        'pad':1,
    #                     'stride':1
                        'flip_filters':False
                         }),
            (PoolLayer, {
                        'name':'pool5',
                        'pool_size':(3,3),
                        'stride':3,
                        'ignore_border':False
                        }),
            (DenseLayer,{
                        'name':'fc6',
                        'num_units':4096
                       }),
            (DropoutLayer, {
                        'name':'drop6',
                        'p':.5
                        }),
            (DenseLayer, {
                        'name':'fc7',
                        'num_units':4096
                        }),
        ],



    #        # optimization method:
        update=nesterov_momentum,
        update_learning_rate=0.01,
        update_momentum=0.9,

    #  Do not need these unless trainng the net.
    #     regression=True,  # flag to indicate we're dealing with regression problem
    #     max_epochs=400,  # we want to train this many epochs
    #     verbose=1,
    )

    # upload pretrained weights
    vgg_nn.initialize()
    vgg_nn.load_params_from('./vgg_nolearn_saved_wts_biases.pkl')

    # upload mean image
    model = pickle.load(open('./vgg_cnn_s.pkl'))
    mean_image = model['mean image']

    # pickel the model and the mean image
    with open("/data/mean_image.pkl", 'w') as f:
        pickle.dump(mean_image, f)
    with open("/data/full_vgg.pkl", 'w') as f:
        pickle.dump(vgg_nn, f)

    return vgg_net, mean_image
Beispiel #41
0
def create_cae(folder_path, learning_rate, input_width=300, input_height=140, layers_size=[32, 32, 64, 32, 32],
               update_momentum=0.9, activation=None, last_layer_activation=tanh, filters_type=9, batch_size=32,
               epochs=25, train_valid_split=0.2, flip_batch=True):

    if filters_type == 3:
        filter_1 = (3, 3)
        filter_2 = (3, 3)
        filter_3 = (3, 3)
        filter_4 = (3, 3)
        filter_5 = (3, 3)
        filter_6 = (3, 3)
    elif filters_type == 5:
        filter_1 = (5, 5)
        filter_2 = (5, 5)
        filter_3 = (5, 5)
        filter_4 = (5, 5)
        filter_5 = (5, 5)
        filter_6 = (5, 5)
    elif filters_type == 7:
        filter_1 = (7, 7)
        filter_2 = (7, 7)
        filter_3 = (5, 5)
        filter_4 = (7, 7)
        filter_5 = (7, 7)
        filter_6 = (5, 5)
    elif filters_type == 9:
        filter_1 = (9, 9)
        filter_2 = (7, 7)
        filter_3 = (5, 5)
        filter_4 = (7, 7)
        filter_5 = (9, 9)
        filter_6 = (5, 5)

    cnn = NeuralNet(layers=[
        ('input', layers.InputLayer),
        ('conv1', layers.Conv2DLayer),
        ('conv11', layers.Conv2DLayer),
        ('conv12', layers.Conv2DLayer),
        ('pool1', layers.MaxPool2DLayer),
        ('conv2', layers.Conv2DLayer),
        ('conv21', layers.Conv2DLayer),
        ('conv22', layers.Conv2DLayer),
        ('pool2', layers.MaxPool2DLayer),
        ('conv3', layers.Conv2DLayer),
        ('conv31', layers.Conv2DLayer),
        ('conv32', layers.Conv2DLayer),
        ('unpool1', Unpool2DLayer),
        ('conv4', layers.Conv2DLayer),
        ('conv41', layers.Conv2DLayer),
        ('conv42', layers.Conv2DLayer),
        ('unpool2', Unpool2DLayer),
        ('conv5', layers.Conv2DLayer),
        ('conv51', layers.Conv2DLayer),
        ('conv52', layers.Conv2DLayer),
        ('conv6', layers.Conv2DLayer),
        ('output_layer', ReshapeLayer),
    ],

        input_shape=(None, 1, input_width, input_height),
        # Layer current size - 1x300x140

        conv1_num_filters=layers_size[0], conv1_filter_size=filter_1, conv1_nonlinearity=activation,
        conv1_border_mode="same",
        # conv1_pad="same",
        conv11_num_filters=layers_size[0], conv11_filter_size=filter_1, conv11_nonlinearity=activation,
        conv11_border_mode="same",
        # conv11_pad="same",
        conv12_num_filters=layers_size[0], conv12_filter_size=filter_1, conv12_nonlinearity=activation,
        conv12_border_mode="same",
        # conv12_pad="same",

        pool1_pool_size=(2, 2),

        conv2_num_filters=layers_size[1], conv2_filter_size=filter_2, conv2_nonlinearity=activation,
        conv2_border_mode="same",
        # conv2_pad="same",
        conv21_num_filters=layers_size[1], conv21_filter_size=filter_2, conv21_nonlinearity=activation,
        conv21_border_mode="same",
        # conv21_pad="same",
        conv22_num_filters=layers_size[1], conv22_filter_size=filter_2, conv22_nonlinearity=activation,
        conv22_border_mode="same",
        # conv22_pad="same",

        pool2_pool_size=(2, 2),

        conv3_num_filters=layers_size[2], conv3_filter_size=filter_3, conv3_nonlinearity=activation,
        conv3_border_mode="same",
        # conv3_pad="same",
        conv31_num_filters=layers_size[2], conv31_filter_size=filter_3, conv31_nonlinearity=activation,
        conv31_border_mode="same",
        # conv31_pad="same",
        conv32_num_filters=1, conv32_filter_size=filter_3, conv32_nonlinearity=activation,
        conv32_border_mode="same",
        # conv32_pad="same",

        unpool1_ds=(2, 2),

        conv4_num_filters=layers_size[3], conv4_filter_size=filter_4, conv4_nonlinearity=activation,
        conv4_border_mode="same",
        # conv4_pad="same",
        conv41_num_filters=layers_size[3], conv41_filter_size=filter_4, conv41_nonlinearity=activation,
        conv41_border_mode="same",
        # conv41_pad="same",
        conv42_num_filters=layers_size[3], conv42_filter_size=filter_4, conv42_nonlinearity=activation,
        conv42_border_mode="same",
        # conv42_pad="same",

        unpool2_ds=(2, 2),

        conv5_num_filters=layers_size[4], conv5_filter_size=filter_5, conv5_nonlinearity=activation,
        conv5_border_mode="same",
        # conv5_pad="same",
        conv51_num_filters=layers_size[4], conv51_filter_size=filter_5, conv51_nonlinearity=activation,
        conv51_border_mode="same",
        # conv51_pad="same",
        conv52_num_filters=layers_size[4], conv52_filter_size=filter_5, conv52_nonlinearity=activation,
        conv52_border_mode="same",
        # conv52_pad="same",

        conv6_num_filters=1, conv6_filter_size=filter_6, conv6_nonlinearity=last_layer_activation,
        conv6_border_mode="same",
        # conv6_pad="same",

        output_layer_shape=(([0], -1)),

        update_learning_rate=learning_rate,
        update_momentum=update_momentum,
        update=nesterov_momentum,
        train_split=TrainSplit(eval_size=train_valid_split),
        batch_iterator_train=FlipBatchIterator(batch_size=batch_size) if flip_batch else BatchIterator(
            batch_size=batch_size),
        regression=True,
        max_epochs=epochs,
        verbose=1,
        hiddenLayer_to_output=-11)

    # try:
    #     pickle.dump(cnn, open(folder_path + 'conv_ae.pkl', 'w'))
    #     # cnn = pickle.load(open(folder_path + 'conv_ae.pkl','r'))
    #     cnn.save_weights_to(folder_path + 'conv_ae.np')
    # except:
    #     print("Could not pickle cnn")

    cnn.load_params_from(folder_path + CONV_AE_NP)
    # cnn.load_weights_from(folder_path + CONV_AE_NP)
    return cnn
Beispiel #42
0
        'ignore_border': False
    }),
    (DenseLayer, {
        'name': 'fc6',
        'num_units': 4096
    }),
    (DropoutLayer, {
        'name': 'drop6',
        'p': 0.5
    }),
    (DenseLayer, {
        'name': 'fc7',
        'num_units': 4096
    })
]

net0 = NeuralNet(
    layers=layers0,
    update=nesterov_momentum,
    update_learning_rate=0.01,
    update_momentum=0.9,

    #  regression=True,  # flag to indicate we're dealing with regression problem
    #  max_epochs=400,  # we want to train this many epochs
    verbose=1,
)

net0.load_params_from('nolearn_with_w_b.pkl')

with open('cars_net.pkl', 'wb') as f:
    pickle.dump(net0, f)
Beispiel #43
0
def process(fin, fon, iden, version, evalue, prob, minCoverage, pipeline, version_m):

    fi = fin  # first parameter is the input file

    # fi = opt.path+"/db/argsdb.reads.align.test.tsv";
    print("Loading deep learning model ...")
    deepL = cPickle.load(
        open(opt.path+"/model/"+version_m+"/metadata"+version+".pkl"))
    clf = NeuralNet(
        layers=model(deepL['input_nodes'], deepL['output_nodes']),
        update=nesterov_momentum,
        update_learning_rate=0.01,
        update_momentum=0.9,
        regression=False,
        max_epochs=100,
        verbose=2,
    )

    clf.load_params_from(opt.path+"/model/"+version_m+"/model"+version+".pkl")

    # print deepL['features']

    print("loading gene lengths")
    glen = {i.split()[0]: float(i.split()[1]) for i in open(
        opt.path+"/database/"+version_m+"/features.gene.length")}

    print("Loading sample to analyze")
    [align, BH] = process_blast.make_alignments_json(
        fi, iden=iden, eval=evalue, coverage=minCoverage, BitScore=True, Features=deepL['features'], glen=glen, pipeline=pipeline)

    # print align
    print("Predicting ARG-like reads: Running deepARG" +
          version+" model version "+version_m)
    print("input dataset is splitted into chunks of 10000 reads")

    chunks_input = chunks(align, size=10000)
    predict = []
    for _chunk in tqdm(chunks_input, total=int(len(align)/10000), unit="chunks"):
        predict += main(deepL, clf, _chunk, version)

    # print predict
    print("Computing relative abundance")

    fo = open(fon+'.ARG', 'w')  # second parameter is the output file
    fo2 = open(fon+'.potential.ARG', 'w')
    fo.write("#ARG\tquery-start\tquery-end\tread_id\tpredicted_ARG-class\tbest-hit\tprobability\tidentity\talignment-length\talignment-bitscore\talignment-evalue\tcounts\n")
    fo2.write("#ARG\tquery-start\tquery-end\tread_id\tpredicted_ARG-class\tbest-hit\tprobability\tidentity\talignment-length\talignment-bitscore\talignment-evalue\tcounts\n")
    for i in tqdm(predict):

        # 1 Get the alignments for that sample
        x_align = align[i[0]]
        # 2 Get only the alignments with the predicted label
        x_align = {o: x_align[o] for o in x_align.keys() if "|"+i[1]+"|" in o}
        # 3 Compute the best Hit

        if x_align:
            x_bh = max(x_align.iteritems(), key=operator.itemgetter(1))[0]
            bs_bh = x_align[x_bh]
            # print(x_bh, align[i[0]][x_bh])
            # print(BH[i[0]])
            if i[2] >= prob:
                fo.write("\t".join([
                    # gene where read is from (subtype)
                    x_bh.split("|")[-1].upper(),
                    BH[i[0]][2][8],  # alignment gene start
                    BH[i[0]][2][9],  # alignment gene end
                    i[0],  # read-id
                    i[1],  # predicted type
                    x_bh,  # best hit
                    str(i[2]),  # probability
                    BH[i[0]][2][2],  # identity
                    BH[i[0]][2][3],  # alignment length
                    BH[i[0]][2][-1],  # bitscore
                    BH[i[0]][2][-2],  # evalue
                    '1'  # count
                ])+"\n"
                )
            else:
                x_bh = BH[i[0]][0]
                bs_bh = BH[i[0]][1]
                fo2.write("\t".join([
                    # gene where read is from (subtype)
                    x_bh.split("|")[-1].upper(),
                    BH[i[0]][2][8],  # alignment gene start
                    BH[i[0]][2][9],  # alignment gene end
                    i[0],  # read-id
                    i[1],  # predicted type
                    x_bh,  # best hit
                    str(i[2]),  # probability
                    BH[i[0]][2][2],  # identity
                    BH[i[0]][2][3],  # alignment length
                    BH[i[0]][2][-1],  # bitscore
                    BH[i[0]][2][-2],  # evalue
                    '1'  # count
                ])+"\n"
                )
        else:
            x_bh = BH[i[0]][0]
            bs_bh = BH[i[0]][1]
            fo2.write("\t".join([
                # gene where read is from (subtype)
                x_bh.split("|")[-1].upper(),
                BH[i[0]][2][8],  # alignment gene start
                BH[i[0]][2][9],  # alignment gene end
                i[0],  # read-id
                i[1],  # predicted type
                "undefined",  # best hit
                str(i[2]),  # probability
                BH[i[0]][2][2],  # identity
                BH[i[0]][2][3],  # alignment length
                BH[i[0]][2][-1],  # bitscore
                BH[i[0]][2][-2],  # evalue
                # x_bh, # problematic-classification: when the prediction class has a different best feature (the arg cannot be defined - probably because errors in the database )
                '1'
            ])+"\n"
            )

        # remove entries with low coverage
        # if float(BH[i[0]][2][3])/glen[x_bh] < minlenper: continue

    fo.close()
    fo2.close()
class CNN(object):
    __metaclass__ = Singleton
    channels = 3
    image_size = [64, 64]
    layers = [
        # layer dealing with the input data
        (InputLayer, {
            'shape': (None, channels, image_size[0], image_size[1])
        }),
        # first stage of our convolutional layers
        (Conv2DLayer, {
            'num_filters': 32,
            'filter_size': 9
        }),
        (Conv2DLayer, {
            'num_filters': 32,
            'filter_size': 5
        }),
        (MaxPool2DLayer, {
            'pool_size': 2
        }),
        # second stage of our convolutional layers
        (Conv2DLayer, {
            'num_filters': 32,
            'filter_size': 5
        }),
        (Conv2DLayer, {
            'num_filters': 32,
            'filter_size': 3
        }),
        (MaxPool2DLayer, {
            'pool_size': 2
        }),
        # two dense layers with dropout
        (DenseLayer, {
            'num_units': 256
        }),
        (DropoutLayer, {}),
        (DenseLayer, {
            'num_units': 256
        }),
        # the output layer
        (DenseLayer, {
            'num_units': 2,
            'nonlinearity': softmax
        }),
    ]

    def __init__(self):
        logger = logging.getLogger(__name__)
        logger.info("Initializing neural net...")
        self.net = NeuralNet(layers=self.layers, update_learning_rate=0.0002)
        self.net.load_params_from("conv_params")
        logger.info("Finished loading parameters")

    def resize(self, infile):
        try:
            im = Image.open(infile)
            resized_im = np.array(ImageOps.fit(
                im, (self.image_size[0], self.image_size[1]), Image.ANTIALIAS),
                                  dtype=np.uint8)
            rgb = np.array([
                resized_im[:, :, 0], resized_im[:, :, 1], resized_im[:, :, 2]
            ])
            return rgb.reshape(1, self.channels, self.image_size[0],
                               self.image_size[1])

        except IOError:
            return "cannot create thumbnail for '%s'" % infile

    def predict(self, X):
        p**n = self.net.predict(X)[0] == 1
        return "true" if p**n else "false"
Beispiel #45
0
def main(input_file, model_path):
    batch_size = 128
    nb_classes = 62  # A-Z, a-z and 0-9
    nb_epoch = 2

    # Input image dimensions
    img_rows, img_cols = 32, 32

    # Path of data files
    path = input_file
    ### PREDICTION ###

    # # Load the model with the highest validation accuracy
    # model.load_weights("best.kerasModelWeights")

    # Load Kaggle test set
    X_test = np.load(path + "/testPreproc_" + str(img_rows) + "_" +
                     str(img_cols) + ".npy")

    print X_test.shape

    # Load the preprocessed data and labels
    X_train_all = np.load(path + "/trainPreproc_" + str(img_rows) + "_" +
                          str(img_cols) + ".npy")
    Y_train_all = np.load(path + "/labelsPreproc.npy")

    X_train, X_val, Y_train, Y_val = \
        train_test_split(X_train_all, Y_train_all, test_size=0.25, stratify=np.argmax(Y_train_all, axis=1))

    print X_train.shape

    Y_val = convert_(Y_val)

    X_train = X_train.reshape((-1, 1, 32, 32))
    #
    # # input shape for neural network

    # labels = labels.astype(np.uint8)

    X_val = X_val.reshape((-1, 1, 32, 32))
    #
    # # input shape for neural network

    Y_val = Y_val.astype(np.uint8)
    #
    input_image_vector_shape = (None, 1, 32, 32)
    #
    '''
        @description: Two layer convolutional neural network
    '''
    #input layer
    input_layer = ('input', layers.InputLayer)
    # fist layer design
    first_layer_conv_filter = layers.Conv2DLayer
    first_layer_pool_filter = layers.MaxPool2DLayer

    conv_filter = ('conv2d1', first_layer_conv_filter)
    pool_filter = ('maxpool1', first_layer_pool_filter)

    # second layer design
    second_layer_conv_filter = layers.Conv2DLayer
    second_layer_pool_filter = layers.MaxPool2DLayer

    conv_filter2 = ('conv2d2', second_layer_conv_filter)
    pool_filter2 = ('maxpool2', second_layer_pool_filter)

    # dropout rates ( used for regularization )
    dropout_layer = layers.DropoutLayer
    drop1 = 0.5
    drop2 = 0.5
    first_drop_layer = ('dropout1', dropout_layer)
    second_drop_layer = ('dropout2', dropout_layer)
    #
    # network parameters
    design_layers = [
        input_layer, conv_filter, pool_filter, conv_filter2, pool_filter2,
        first_drop_layer, ('dense', layers.DenseLayer), second_drop_layer,
        ('output', layers.DenseLayer)
    ]
    # Neural net object instance
    net1 = NeuralNet(
        # declare convolutional neural network layers
        # convolutional mapping and pooling window sized will be declared
        # and set to various sizes
        layers=design_layers,
        # input layer
        # vector size of image will be taken as 28 x 28
        input_shape=input_image_vector_shape,
        # first layer convolutional filter
        # mapping layer set at 5 x 5
        conv2d1_num_filters=32,
        conv2d1_filter_size=(5, 5),
        conv2d1_nonlinearity=lasagne.nonlinearities.rectify,
        conv2d1_W=lasagne.init.HeNormal(gain='relu'),
        # first layer convolutional pool filter
        # mapping layer set at 2 x 2
        maxpool1_pool_size=(2, 2),
        # second layer convolutional filter
        # mapping layer set at 5 x 5
        conv2d2_num_filters=32,
        conv2d2_filter_size=(5, 5),
        conv2d2_nonlinearity=lasagne.nonlinearities.rectify,
        # second layer convolutional pool filter
        # mapping layer set at 2 x 2
        maxpool2_pool_size=(2, 2),
        dropout1_p=drop1,
        # hidden unit density
        dense_num_units=512,
        dense_nonlinearity=lasagne.nonlinearities.rectify,
        # dropout2
        dropout2_p=drop2,
        # output
        output_nonlinearity=lasagne.nonlinearities.softmax,
        #corresponds to the amount of target labels to compare to
        output_num_units=62,
        # optimization method params
        # NOTE: Different momentum steepest gradient methods yield varied
        #       results.
        update=nesterov_momentum,
        # 69
        update_learning_rate=0.01,
        update_momentum=0.078,
        # update_learning_rate=1e-4,
        # update_momentum=0.9,
        # max_epochs=1000,
        # update_learning_rate=0.1,
        # update_momentum=0.003,
        max_epochs=1000,
        verbose=1,
    )
    print "Loading Neural Net Parameters..."
    net1.initialize_layers()
    net1.load_weights_from('{}_weightfile.w'.format(model_path))
    '''
    new_twoLayer_paramfile.w	new_twoLayer_weightfile.w
    '''
    net1.load_params_from('{}_paramfile.w'.format(model_path))

    from sklearn.metrics import classification_report, accuracy_score, confusion_matrix

    print 'Testing...'
    y_true, y_pred = Y_val, net1.predict(X_val)  # Get our predictions
    print(classification_report(y_true,
                                y_pred))  # Classification on each digit

    print net1.predict(X_val)
    print Y_val
    a = confusion_matrix(Y_val, net1.predict(X_val))
    b = np.trace(a)
    print 'Training Accuracy: ' + str(float(b) / float(np.sum(a)))
Beispiel #46
0
class EmotionClassifier:
    def __init__(self,
                 data_directory="/home/nicholai/Documents/Emotion Files/",
                 face_data="../FaceData/landmarks.dat",
                 show_image=False,
                 epochs=10,
                 dropout_1=0.5,
                 dropout_2=0.5):
        self.data_dir = data_directory
        self.picture_dir = self.data_dir + "cohn-kanade-images/"
        self.FACS_dir = self.data_dir + "FACS/"
        self.Emotion_dir = self.data_dir + "Emotion/"
        self.detector = dlib.get_frontal_face_detector()
        self.predictor = dlib.shape_predictor(face_data)
        self.face_sz = 200
        self.extra_face_space = 0
        self.face_sz += self.extra_face_space
        self.width = self.face_sz
        self.height = self.face_sz
        self.show_img = show_image
        self.network = NeuralNet(
            layers=[
                ('input', layers.InputLayer),
                ('conv2d1', layers.Conv2DLayer),
                # ('conv2d2', layers.Conv2DLayer),
                ('maxpool1', layers.MaxPool2DLayer),
                # ('conv2d3', layers.Conv2DLayer),
                ('conv2d4', layers.Conv2DLayer),
                ('maxpool2', layers.MaxPool2DLayer),
                ('dropout1', layers.DropoutLayer),
                ('learningLayer', layers.DenseLayer),
                ('learningLayer1', layers.DenseLayer),
                ('output', layers.DenseLayer),
            ],
            # input layer
            input_shape=(None, 1, self.face_sz, self.face_sz),
            # layer conv2d1
            conv2d1_num_filters=32,
            conv2d1_filter_size=(5, 5),
            conv2d1_nonlinearity=lasagne.nonlinearities.rectify,
            conv2d1_W=lasagne.init.GlorotUniform(),
            # layer conv2d2
            # conv2d2_num_filters=32,
            # conv2d2_filter_size=(5, 5),
            # conv2d2_nonlinearity=lasagne.nonlinearities.rectify,
            # conv2d2_W=lasagne.init.GlorotUniform(),
            # layer maxpool1
            maxpool1_pool_size=(5, 5),
            # layer conv2d3
            # conv2d3_num_filters=32,
            # conv2d3_filter_size=(5, 5),
            # conv2d3_nonlinearity=lasagne.nonlinearities.rectify,
            # conv2d3_W=lasagne.init.GlorotUniform(),
            # layer conv2d4
            conv2d4_num_filters=32,
            conv2d4_filter_size=(5, 5),
            conv2d4_nonlinearity=lasagne.nonlinearities.rectify,
            conv2d4_W=lasagne.init.GlorotUniform(),
            # layer maxpool2
            maxpool2_pool_size=(5, 5),
            # dropout1a
            dropout1_p=dropout_1,
            # dense
            learningLayer_num_units=1024,
            learningLayer_nonlinearity=lasagne.nonlinearities.rectify,
            learningLayer1_num_units=512,
            learningLayer1_nonlinearity=lasagne.nonlinearities.rectify,
            # # dropout2
            # # dropout2_p=dropout_2,
            # # dense1
            # dense1_num_units=256,
            # dense1_nonlinearity=lasagne.nonlinearities.rectify,
            # output
            output_nonlinearity=lasagne.nonlinearities.softmax,
            output_num_units=8,
            # optimization method params
            regression=False,
            update=nesterov_momentum,
            update_learning_rate=theano.shared(np.cast['float32'](0.05)),
            update_momentum=theano.shared(np.cast['float32'](0.9)),
            on_epoch_finished=[
                AdjustVariable('update_learning_rate', start=0.05, stop=0.01),
                AdjustVariable('update_momentum', start=0.9, stop=0.999),
            ],
            max_epochs=epochs,
            verbose=2,
        )
        if self.show_img:
            self.win = dlib.image_window()

    def load_training_set(self):
        """
        Loads the CK+ data-set of images, processes the facial key-points of each face, and returns the emotion codes
        of each participant 0-7 (i.e. 0=neutral, 1=anger, 2=contempt, 3=disgust, 4=fear, 5=happy, 6=sadness, 7=surprise)
        :return: Training X (X_Train) and Y (y_train) Data as well as testing X (X_test) and Y (y_test) Data
        """
        x_train = np.zeros((382, self.width, self.height), dtype='float32')
        y_train = np.zeros(382, dtype='int32')
        i = 0
        for root, name, files in os.walk(self.picture_dir):
            files = [file for file in files if file.endswith(".png")]
            if len(files) == 0:
                continue
            fs = sorted(files, key=lambda x: x[:-4])
            emotion = self.get_emotion(fs[-1])
            # sampleImg = self.get_face_image(os.path.join(root, fs[0]))
            # print(sampleImg.shape)
            if emotion != -1:
                if i % 7 == 0:
                    # self.show_faces(os.path.join(root, fs[0]))
                    # self.show_faces(os.path.join(root, fs[-1]))
                    x_train[i] = self.get_face_image(os.path.join(
                        root, fs[0]))  # add the key-points of a neutral face
                    y_train[i] = 0  # emotion code of a neutral face
                    i += 1
                x_train[i] = self.get_face_image(os.path.join(root, fs[-1]))
                y_train[i] = emotion
                i += 1
            print(i)
        return x_train.astype(np.float32).reshape(-1, 1, self.face_sz,
                                                  self.face_sz), y_train

    def load_keypoint_training_set(self):
        x_train = np.zeros((655, 2, 68), dtype='int16')
        y_train = np.zeros(655, dtype='int16')
        i = 0
        for root, name, files in os.walk(self.picture_dir):
            files = [file for file in files if file.endswith(".png")]
            if len(files) == 0:
                continue
            fs = sorted(files, key=lambda x: x[:-4])
            emotion = self.get_emotion(fs[-1])
            # sampleImg = self.get_face_image(os.path.join(root, fs[0]))
            # print(sampleImg.shape)
            if emotion != -1:
                x_train[i] = self.get_keypoints(os.path.join(
                    root, fs[0]))  # add the key-points of a neutral face
                y_train[i] = 0  # emotion code of a neutral face
                i += 1
                x_train[i] = self.get_keypoints(os.path.join(
                    root, fs[-1]))  # add the key-points of an expressed face
                y_train[i] = emotion
                i += 1
            print(i)
        return x_train.astype(np.float32).reshape(-1, 1, 2, 68), y_train

    def get_keypoints(self, image_file):
        """
        Returns the key-point data from the facial recognition process
        :param image_file: a full file path to an image containing a face
        :return: a landmarks list
        """
        img = imageio.imread(image_file)
        details = self.detector(img, 1)
        landmarks = np.zeros((2, 68), dtype='int16')
        if self.show_img:
            self.win.set_image(img)
        for i, j in enumerate(details):
            shape = self.predictor(img, j)
            if self.show_img:
                self.win.add_overlay(shape)
            for k in range(0, 68):
                part = shape.part(k)
                landmarks[0][k] = part.x
                landmarks[1][k] = part.y
        if self.show_img:
            self.win.add_overlay(details)
        return landmarks

    def get_face_image(self, filename):
        img = imageio.imread(filename)
        details = self.detector(img, 1)
        for i, j in enumerate(details):
            shape = self.predictor(img, j)
            for k in range(0, 68):
                part = shape.part(k)
                img[part.y][part.x] = 255
        img = resize(img[j.top():j.bottom(),
                         j.left():j.right()],
                     output_shape=(self.face_sz, self.face_sz),
                     preserve_range=True)
        if len(img.shape) == 3:
            img = rgb2gray(img)
        img = np.asarray(img, dtype='float32') / 255
        return img

    def show_faces(self, filename):
        img = imageio.imread(filename)
        details = self.detector(img, 1)
        for i, j in enumerate(details):
            shape = self.predictor(img, j)
            for k in range(0, 68):
                part = shape.part(k)
                img[part.y][part.x] = 255
        if self.show_img:
            self.win.set_image(img[j.top():j.bottom(), j.left():j.right()])

    def get_full_image(self, filename):
        img = imageio.imread(filename, True)
        img = np.asarray(img, dtype='float32') / 255
        return img[0:self.width, 0:self.height]

    def get_facs(self, filename):
        """
        Basically Take a filename that is formatted like so 'S114_005_00000022.png'
        and turn that into a directory structure which contains a FACS text file
        named 'S114_005_00000022.txt' in ./FACS/S114/005/
        :param filename: Should be the name of the file (only) of the CK+ test picture
        :return: Returns the FACS codes and Emotion code as FACS, Emotion
        """
        fn = filename[:-4].split("_")  # Strip filename
        filepath = os.path.join(self.FACS_dir, fn[0], fn[1],
                                filename[:-4] + "_emotion.txt")
        # Craft the File path of the FACS emotion associated with the emotion changes
        lines = [line.split('\n')
                 for line in open(filepath)]  # Read the FACS codes from file
        return lines

    def get_emotion(self, filename):
        fn = filename[:-4].split("_")
        filepath = os.path.join(self.Emotion_dir, fn[0], fn[1],
                                filename[:-4] + "_emotion.txt")
        # Craft the File path of the FACS emotion associated with the emotion changes
        if os.path.isfile(filepath):
            line = [
                int(float(lines.strip(' ').strip('\n')))
                for lines in open(filepath)
            ]
            return line[0]
        return -1

    def train(self, x_train, y_train, epoch=0):
        """
        Fits training data to the Convolutional Neural Network
        :param epoch: number of epochs
        :param x_train: Training x values
        :param y_train: Training y values
        """
        if epoch == 0:
            self.network.fit(x_train, y_train)
        else:
            self.network.fit(x_train, y_train, epoch)

    def predict(self, image):
        return self.network.predict(image)

    def save_network_state(self, paramsname="params.npz"):
        self.network.save_params_to(paramsname)

    def load_network_state(self, paramsname="params.npz"):
        self.network.load_params_from(paramsname)
Beispiel #47
0
	def rodar(self):

			np.set_printoptions(threshold=np.nan)
			sourcepath = Classificar.sourcepath
			numerodeimagens = Classificar.numerodeimagens

			X_test = np.zeros((numerodeimagens, 19200),
							  dtype=np.int)  # Allocates space for each new image you want to classify, each line is an image

			for i in range(1, numerodeimagens):  # read the images
				X_test[i - 1] = np.asarray(Image.open(sourcepath+"galaxy" + str(i) + ".jpg")).reshape(
					-1)[0:19200]

			# Reshape the images to help the CNN execution
			X_test = X_test.reshape((-1, 3, 80, 80))

			# Define the CNN, must be the same CNN that is saved into your model that you generated running CNN.py
			net1 = NeuralNet(
				layers=[('input', layers.InputLayer),
						('conv2d1', layers.Conv2DLayer),
						('maxpool1', layers.MaxPool2DLayer),
						('conv2d2', layers.Conv2DLayer),
						('maxpool2', layers.MaxPool2DLayer),
						('conv2d3', layers.Conv2DLayer),
						('maxpool3', layers.MaxPool2DLayer),
						# ('conv2d4', layers.Conv2DLayer),
						# ('maxpool4', layers.MaxPool2DLayer),
						('dropout1', layers.DropoutLayer),
						# s('dropout2', layers.DropoutLayer),
						('dense', layers.DenseLayer),
						# ('dense2', layers.DenseLayer),
						('output', layers.DenseLayer),
						],

				input_shape=(None, 3, 80, 80),

				conv2d1_num_filters=16,
				conv2d1_filter_size=(3, 3),
				conv2d1_nonlinearity=lasagne.nonlinearities.rectify,
				conv2d1_W=lasagne.init.GlorotUniform(),

				maxpool1_pool_size=(2, 2),

				conv2d2_num_filters=16,
				conv2d2_filter_size=(3, 3),
				conv2d2_nonlinearity=lasagne.nonlinearities.rectify,

				maxpool2_pool_size=(2, 2),

				conv2d3_num_filters=16,
				conv2d3_filter_size=(3, 3),
				conv2d3_nonlinearity=lasagne.nonlinearities.rectify,

				maxpool3_pool_size=(2, 2),

				# conv2d4_num_filters = 16,
				# conv2d4_filter_size = (2,2),
				# conv2d4_nonlinearity = lasagne.nonlinearities.rectify,

				# maxpool4_pool_size = (2,2),

				dropout1_p=0.5,

				# dropout2_p = 0.5,

				dense_num_units=16,
				dense_nonlinearity=lasagne.nonlinearities.rectify,

				# dense2_num_units = 16,
				# dense2_nonlinearity = lasagne.nonlinearities.rectify,

				output_nonlinearity=lasagne.nonlinearities.softmax,
				output_num_units=2,

				update=nesterov_momentum,
				update_learning_rate=0.001,
				update_momentum=0.9,
				max_epochs=1000,
				verbose=1,
			)

			net1.load_params_from("/Users/Pedro/PycharmProjects/BIDHU/docs/train.txt")  # Read model

			preds = net1.predict(X_test)  # make predictions


			strpreds = str(preds)
			strpreds = strpreds.replace(" ", "\n")

			strpreds = strpreds.replace("1", "yes")
			strpreds = strpreds.replace("0", "no")
			xstrpreds = (strpreds.splitlines())
			for i in range(len(xstrpreds)):
				xstrpreds[i] = str(i + 1) + "-" + xstrpreds[i]
			strpreds = str(xstrpreds)
			strpreds = strpreds.replace(" ", "\n")
			strpreds = strpreds.replace("[", "")
			strpreds = strpreds.replace("]", "")
			strpreds = strpreds.replace("'", "")
			strpreds = strpreds.replace(",", "")
			strpreds = strpreds.replace("-", " - ")

			return strpreds
Beispiel #48
0
        (DenseLayer,
         dict(name='out',
              num_units=n_classes,
              nonlinearity=nonlinearities.softmax)),
    ],
    regression=False,
    objective_loss_function=objectives.categorical_crossentropy,
    update=updates.adam,
    batch_iterator_train=train_iterator,
    batch_iterator_test=test_iterator,
    on_epoch_finished=[
        save_weights, save_training_history, plot_training_history
    ],
    verbose=10,
    max_epochs=250,
)

if __name__ == '__main__':
    # X_train, X_test are image file names
    # They will be read in the iterator
    X_train, X_test, y_train, y_test = load_data(test_size=0.25,
                                                 random_state=42)

    net.fit(X_train, y_train)

    # Load the best weights from pickled model
    net.load_params_from('./examples/cifar10/model_weights.pkl')

    score = net.score(X_test, y_test)
    print 'Final score %.4f' % score
def build_model(weights_path, options):
    """
    Build the CNN model. Create the Neural Net object and return it back. 
    Inputs: 
    - subject name: used to save the net weights accordingly.
    - options: several hyper-parameters used to configure the net.
    
    Output:
    - net: a NeuralNet object 
    """

    net_model_name = options['experiment']

    try:
        os.mkdir(os.path.join(weights_path, net_model_name))
    except:
        pass

    net_weights = os.path.join(weights_path, net_model_name,
                               net_model_name + '.pkl')
    net_history = os.path.join(weights_path, net_model_name,
                               net_model_name + '_history.pkl')

    # select hyper-parameters
    t_verbose = options['net_verbose']
    train_split_perc = options['train_split']
    num_epochs = options['max_epochs']
    max_epochs_patience = options['patience']
    early_stopping = EarlyStopping(patience=max_epochs_patience)
    save_weights = SaveWeights(net_weights, only_best=True, pickle=False)
    save_training_history = SaveTrainingHistory(net_history)

    # build the architecture
    ps = options['patch_size'][0]
    num_channels = 1
    fc_conv = 180
    fc_fc = 180
    dropout_conv = 0.5
    dropout_fc = 0.5

    # --------------------------------------------------
    # channel_1: axial
    # --------------------------------------------------

    axial_ch = InputLayer(name='in1', shape=(None, num_channels, ps, ps))
    axial_ch = prelu(batch_norm(
        Conv2DLayer(axial_ch,
                    name='axial_ch_conv1',
                    num_filters=20,
                    filter_size=3)),
                     name='axial_ch_prelu1')
    axial_ch = prelu(batch_norm(
        Conv2DLayer(axial_ch,
                    name='axial_ch_conv2',
                    num_filters=20,
                    filter_size=3)),
                     name='axial_ch_prelu2')
    axial_ch = MaxPool2DLayer(axial_ch, name='axial_max_pool_1', pool_size=2)
    axial_ch = prelu(batch_norm(
        Conv2DLayer(axial_ch,
                    name='axial_ch_conv3',
                    num_filters=40,
                    filter_size=3)),
                     name='axial_ch_prelu3')
    axial_ch = prelu(batch_norm(
        Conv2DLayer(axial_ch,
                    name='axial_ch_conv4',
                    num_filters=40,
                    filter_size=3)),
                     name='axial_ch_prelu4')
    axial_ch = MaxPool2DLayer(axial_ch, name='axial_max_pool_2', pool_size=2)
    axial_ch = prelu(batch_norm(
        Conv2DLayer(axial_ch,
                    name='axial_ch_conv5',
                    num_filters=60,
                    filter_size=3)),
                     name='axial_ch_prelu5')
    axial_ch = DropoutLayer(axial_ch, name='axial_l1drop', p=dropout_conv)
    axial_ch = DenseLayer(axial_ch, name='axial_d1', num_units=fc_conv)
    axial_ch = prelu(axial_ch, name='axial_prelu_d1')

    # --------------------------------------------------
    # channel_1: coronal
    # --------------------------------------------------

    coronal_ch = InputLayer(name='in2', shape=(None, num_channels, ps, ps))
    coronal_ch = prelu(batch_norm(
        Conv2DLayer(coronal_ch,
                    name='coronal_ch_conv1',
                    num_filters=20,
                    filter_size=3)),
                       name='coronal_ch_prelu1')
    coronal_ch = prelu(batch_norm(
        Conv2DLayer(coronal_ch,
                    name='coronal_ch_conv2',
                    num_filters=20,
                    filter_size=3)),
                       name='coronal_ch_prelu2')
    coronal_ch = MaxPool2DLayer(coronal_ch,
                                name='coronal_max_pool_1',
                                pool_size=2)
    coronal_ch = prelu(batch_norm(
        Conv2DLayer(coronal_ch,
                    name='coronal_ch_conv3',
                    num_filters=40,
                    filter_size=3)),
                       name='coronal_ch_prelu3')
    coronal_ch = prelu(batch_norm(
        Conv2DLayer(coronal_ch,
                    name='coronal_ch_conv4',
                    num_filters=40,
                    filter_size=3)),
                       name='coronal_ch_prelu4')
    coronal_ch = MaxPool2DLayer(coronal_ch,
                                name='coronal_max_pool_2',
                                pool_size=2)
    coronal_ch = prelu(batch_norm(
        Conv2DLayer(coronal_ch,
                    name='coronal_ch_conv5',
                    num_filters=60,
                    filter_size=3)),
                       name='coronal_ch_prelu5')
    coronal_ch = DropoutLayer(coronal_ch,
                              name='coronal_l1drop',
                              p=dropout_conv)
    coronal_ch = DenseLayer(coronal_ch, name='coronal_d1', num_units=fc_conv)
    coronal_ch = prelu(coronal_ch, name='coronal_prelu_d1')

    # --------------------------------------------------
    # channel_1: saggital
    # --------------------------------------------------

    saggital_ch = InputLayer(name='in3', shape=(None, num_channels, ps, ps))
    saggital_ch = prelu(batch_norm(
        Conv2DLayer(saggital_ch,
                    name='saggital_ch_conv1',
                    num_filters=20,
                    filter_size=3)),
                        name='saggital_ch_prelu1')
    saggital_ch = prelu(batch_norm(
        Conv2DLayer(saggital_ch,
                    name='saggital_ch_conv2',
                    num_filters=20,
                    filter_size=3)),
                        name='saggital_ch_prelu2')
    saggital_ch = MaxPool2DLayer(saggital_ch,
                                 name='saggital_max_pool_1',
                                 pool_size=2)
    saggital_ch = prelu(batch_norm(
        Conv2DLayer(saggital_ch,
                    name='saggital_ch_conv3',
                    num_filters=40,
                    filter_size=3)),
                        name='saggital_ch_prelu3')
    saggital_ch = prelu(batch_norm(
        Conv2DLayer(saggital_ch,
                    name='saggital_ch_conv4',
                    num_filters=40,
                    filter_size=3)),
                        name='saggital_ch_prelu4')
    saggital_ch = MaxPool2DLayer(saggital_ch,
                                 name='saggital_max_pool_2',
                                 pool_size=2)
    saggital_ch = prelu(batch_norm(
        Conv2DLayer(saggital_ch,
                    name='saggital_ch_conv5',
                    num_filters=60,
                    filter_size=3)),
                        name='saggital_ch_prelu5')
    saggital_ch = DropoutLayer(saggital_ch,
                               name='saggital_l1drop',
                               p=dropout_conv)
    saggital_ch = DenseLayer(saggital_ch,
                             name='saggital_d1',
                             num_units=fc_conv)
    saggital_ch = prelu(saggital_ch, name='saggital_prelu_d1')

    # FC layer 540
    layer = ConcatLayer(name='elem_channels',
                        incomings=[axial_ch, coronal_ch, saggital_ch])
    layer = DropoutLayer(layer, name='f1_drop', p=dropout_fc)
    layer = DenseLayer(layer, name='FC1', num_units=540)
    layer = prelu(layer, name='prelu_f1')

    # concatenate channels 540 + 15
    layer = DropoutLayer(layer, name='f2_drop', p=dropout_fc)
    atlas_layer = DropoutLayer(InputLayer(name='in4', shape=(None, 15)),
                               name='Dropout_atlas',
                               p=.2)
    atlas_layer = InputLayer(name='in4', shape=(None, 15))
    layer = ConcatLayer(name='elem_channels2', incomings=[layer, atlas_layer])

    # FC layer 270
    layer = DenseLayer(layer, name='fc_2', num_units=270)
    layer = prelu(layer, name='prelu_f2')

    # FC output 15 (softmax)
    net_layer = DenseLayer(layer,
                           name='out_layer',
                           num_units=15,
                           nonlinearity=softmax)

    net = NeuralNet(
        layers=net_layer,
        objective_loss_function=objectives.categorical_crossentropy,
        update=updates.adam,
        update_learning_rate=0.001,
        on_epoch_finished=[
            save_weights,
            save_training_history,
            early_stopping,
        ],
        verbose=t_verbose,
        max_epochs=num_epochs,
        train_split=TrainSplit(eval_size=train_split_perc),
    )

    if options['load_weights'] == 'True':
        try:
            print "    --> loading weights from ", net_weights
            net.load_params_from(net_weights)
        except:
            pass

    return net
Beispiel #50
0
def main(input_file, model_path):

    batch_size = 128
    nb_classes = 62  # A-Z, a-z and 0-9
    nb_epoch = 2

    # Input image dimensions
    img_rows, img_cols = 32, 32

    # Path of data files
    path = input_file

    ### PREDICTION ###

    # Load the model with the highest validation accuracy
    # model.load_weights("best.kerasModelWeights")

    # Load Kaggle test set
    X_test = np.load(path + "/testPreproc_" + str(img_rows) + "_" +
                     str(img_cols) + ".npy")

    print X_test.shape

    # Load the preprocessed data and labels
    X_train_all = np.load(path + "/trainPreproc_" + str(img_rows) + "_" +
                          str(img_cols) + ".npy")
    Y_train_all = np.load(path + "/labelsPreproc.npy")

    X_train, X_val, Y_train, Y_val = \
        train_test_split(X_train_all, Y_train_all, test_size=0.25, stratify=np.argmax(Y_train_all, axis=1))

    print X_train.shape

    Y_val = convert_(Y_val)

    X_train = X_train.reshape((-1, 1, 32, 32))
    #
    # # input shape for neural network

    # labels = labels.astype(np.uint8)

    X_val = X_val.reshape((-1, 1, 32, 32))
    #
    # # input shape for neural network

    Y_val = Y_val.astype(np.uint8)
    #
    input_image_vector_shape = (None, 1, 32, 32)

    net1 = NeuralNet(
        layers=[
            ('input', layers.InputLayer),
            ('conv2d1', layers.Conv2DLayer),
            ('maxpool1', layers.MaxPool2DLayer),
            ('conv2d2', layers.Conv2DLayer),
            ('maxpool2', layers.MaxPool2DLayer),
            ('conv2d3', layers.Conv2DLayer),
            ('maxpool3', layers.MaxPool2DLayer),
            # ('conv2d4', layers.Conv2DLayer),
            # ('maxpool4', layers.MaxPool2DLayer),
            ('dropout1', layers.DropoutLayer),
            ('dropout2', layers.DropoutLayer),
            ('dense', layers.DenseLayer),
            # ('dense2', layers.DenseLayer),
            ('output', layers.DenseLayer),
        ],
        input_shape=input_image_vector_shape,
        conv2d1_num_filters=128,
        conv2d1_filter_size=(3, 3),
        conv2d1_nonlinearity=lasagne.nonlinearities.tanh,
        conv2d1_W=lasagne.init.GlorotUniform(),
        conv2d1_pad=(2, 2),
        maxpool1_pool_size=(2, 2),
        conv2d2_num_filters=256,
        conv2d2_filter_size=(3, 3),
        conv2d2_nonlinearity=lasagne.nonlinearities.rectify,
        conv2d2_pad=(2, 2),
        maxpool2_pool_size=(2, 2),
        conv2d3_num_filters=512,
        conv2d3_filter_size=(3, 3),
        conv2d3_nonlinearity=lasagne.nonlinearities.rectify,
        conv2d3_pad=(2, 2),
        maxpool3_pool_size=(2, 2),
        dropout1_p=0.5,
        dropout2_p=0.5,
        dense_num_units=8192,
        dense_nonlinearity=lasagne.nonlinearities.rectify,

        # dense2_num_units = 16,
        # dense2_nonlinearity = lasagne.nonlinearities.rectify,
        output_nonlinearity=lasagne.nonlinearities.softmax,
        output_num_units=62,
        update=momentum,
        # 75.5 with tanh init dense num = 256%
        update_learning_rate=0.03,
        update_momentum=0.8,
        max_epochs=1000,
        verbose=1,
    )
    print "Loading Neural Net Parameters..."
    net1.initialize_layers()
    net1.load_weights_from('{}_weightfile.w'.format(model_path))
    net1.load_params_from('{}_paramfile.w'.format(model_path))

    from sklearn.metrics import classification_report, accuracy_score, confusion_matrix

    print 'Testing...'
    y_true, y_pred = Y_val, net1.predict(X_val)  # Get our predictions
    print(classification_report(y_true,
                                y_pred))  # Classification on each digit

    print net1.predict(X_val)
    print Y_val
    a = confusion_matrix(Y_val, net1.predict(X_val))
    b = np.trace(a)
    print 'Training Accuracy: ' + str(float(b) / float(np.sum(a)))
Beispiel #51
0
    total_test_time_points = len(X_test) // NO_TIME_POINTS
    remainder_test_points = len(X_test) % NO_TIME_POINTS

    no_rows = total_test_time_points * NO_TIME_POINTS
    X_test = X_test[0:no_rows, :]

    X_test = X_test.transpose()
    X_test_Samples = np.split(X_test, total_test_time_points, axis=1)
    X_test = np.asarray(X_test_Samples)


###########################################################################
#######get predictions and write to files for series 9 and series 10#######
    print("Testing subject%d...." %(subject))
    params = net.get_all_params_values()
    learned_weights = net.load_params_from(params)
    probabilities = net.predict_proba(X_test)

    sub9 = 'subj{0}_series{1}'.format(subject, 9)
    data_len9 = test_dict[sub9]
    total_time_points9 = data_len9 // NO_TIME_POINTS
    remainder_data9 = data_len9 % NO_TIME_POINTS

    sub10 = 'subj{0}_series{1}'.format(subject, 10)
    data_len10 = test_dict[sub10]
    total_time_points10 = data_len10 // NO_TIME_POINTS
    remainder_data10 = data_len10 % NO_TIME_POINTS

    total_test_points = total_time_points9+total_time_points10

    for i, p in enumerate(probabilities):
Beispiel #52
0
    (DropoutLayer, dict(name='l7drop', p=0.5)),
    (DenseLayer, dict(name='l8', num_units=256)),
    (FeaturePoolLayer, dict(name='l8p', pool_size=2)),
    (DropoutLayer, dict(name='l8drop', p=0.5)),
    (DenseLayer,
     dict(name='out', num_units=10, nonlinearity=nonlinearities.softmax)),
],
                regression=False,
                objective_loss_function=objectives.categorical_crossentropy,
                update=updates.adam,
                update_learning_rate=1e-3,
                batch_iterator_train=train_iterator,
                batch_iterator_test=test_iterator,
                on_epoch_finished=[
                    save_weights, save_training_history, plot_training_history,
                    early_stopping
                ],
                verbose=10,
                max_epochs=100)

if __name__ == '__main__':
    X_train, X_test, y_train, y_test = load_data(test_size=0.25,
                                                 random_state=42)
    net.fit(X_train, y_train)

    # Load the best weights from pickled model
    net.load_params_from('./examples/mnist/model_weights.pkl')

    score = net.score(X_test, y_test)
    print 'Final score %.4f' % score
Beispiel #53
0
class EmotionClassifier:
    def __init__(self,
                 face_size=192,
                 epochs=100,
                 learning_rate=theano.shared(np.cast['float32'](0.1))):
        self.network = NeuralNet(
            layers=[('input', InputLayer), ('conv1', Conv2DLayer),
                    ('conv2', Conv2DLayer), ('pool1', MaxPool2DLayer),
                    ('conv3', Conv2DLayer), ('conv4', Conv2DLayer),
                    ('pool2', MaxPool2DLayer), ('conv5', Conv2DLayer),
                    ('conv6', Conv2DLayer), ('pool3', MaxPool2DLayer),
                    ('conv7', Conv2DLayer), ('conv8', Conv2DLayer),
                    ('pool4', MaxPool2DLayer), ('hidden1', DenseLayer),
                    ('hidden2', DenseLayer), ('output', DenseLayer)],
            input_shape=(None, 1, face_size, face_size),
            conv1_num_filters=32,
            conv1_filter_size=(3, 3),
            conv1_nonlinearity=lasagne.nonlinearities.rectify,
            conv1_W=lasagne.init.GlorotUniform(),
            conv2_num_filters=32,
            conv2_filter_size=(3, 3),
            conv2_nonlinearity=lasagne.nonlinearities.rectify,
            conv2_W=lasagne.init.GlorotUniform(),
            pool1_pool_size=(2, 2),
            conv3_num_filters=32,
            conv3_filter_size=(3, 3),
            conv3_nonlinearity=lasagne.nonlinearities.rectify,
            conv3_W=lasagne.init.GlorotUniform(),
            conv4_num_filters=32,
            conv4_filter_size=(3, 3),
            conv4_nonlinearity=lasagne.nonlinearities.rectify,
            conv4_W=lasagne.init.GlorotUniform(),
            pool2_pool_size=(2, 2),
            conv5_num_filters=64,
            conv5_filter_size=(3, 3),
            conv5_nonlinearity=lasagne.nonlinearities.rectify,
            conv5_W=lasagne.init.GlorotUniform(),
            conv6_num_filters=32,
            conv6_filter_size=(3, 3),
            conv6_nonlinearity=lasagne.nonlinearities.rectify,
            conv6_W=lasagne.init.GlorotUniform(),
            pool3_pool_size=(2, 2),
            conv7_num_filters=32,
            conv7_filter_size=(3, 3),
            conv7_nonlinearity=lasagne.nonlinearities.rectify,
            conv7_W=lasagne.init.GlorotUniform(),
            conv8_num_filters=32,
            conv8_filter_size=(3, 3),
            conv8_nonlinearity=lasagne.nonlinearities.rectify,
            conv8_W=lasagne.init.GlorotUniform(),
            pool4_pool_size=(2, 2),
            hidden1_num_units=4096,
            hidden1_nonlinearity=lasagne.nonlinearities.rectify,
            hidden2_num_units=2048,
            output_nonlinearity=lasagne.nonlinearities.softmax,
            output_num_units=8,
            regression=False,
            update=adadelta,
            # update_momentum=theano.shared(np.cast['float32'](0.9)),
            # on_epoch_finished=[
            #     EarlyStopping(patience=20)
            #     AdjustVariable('update_learning_rate', start=learning_start, stop=learning_end),
            #     AdjustVariable('update_momentum', start=0.9, stop=0.999),
            # ],
            # batch_iterator_train=ShufflingBatchIteratorMixin,
            # batch_iterator_train=BatchIterator(251, shuffle=True),
            max_epochs=epochs,
            verbose=2)

    def train(self, x_train, y_train, epoch=0):
        """
        Fits training data to the Convolutional Neural Network
        :param epoch: number of epochs
        :param x_train: Training x values
        :param y_train: Training y values
        """
        if epoch == 0:
            self.network.fit(x_train, y_train)
        else:
            self.network.fit(x_train, y_train, epoch)

    def predict(self, image):
        return self.network.predict(image)

    def save_network_state(self, paramsname="params.npz"):
        self.network.save_params_to(paramsname)

    def load_network_state(self, paramsname="params.npz"):
        self.network.load_params_from(paramsname)
Beispiel #54
0
class graspNet():
    def __init__(self, param_file=None):
        net_divider = 1.0

        self.layers = [
            (L.layers.InputLayer, {
                'shape': (None, 7, X_H, X_W),
                'name': 'input'
            }),
            (L.layers.Conv2DLayer, {
                'num_filters': 96,
                'stride': 1,
                'pad': 3,
                'filter_size': (7, 7),
                'nonlinearity': L.nonlinearities.rectify,
                'flip_filters': False,
                'name': 'conv0'
            }),
            (L.layers.DropoutLayer, {
                'p': 0.0
            }),
            (L.layers.MaxPool2DLayer, {
                'pool_size': 3,
                'ignore_border': False
            }),
            (L.layers.Conv2DLayer, {
                'num_filters': 256,
                'stride': 1,
                'filter_size': (5, 5),
                'nonlinearity': L.nonlinearities.rectify,
                'flip_filters': False,
                'name': 'conv1'
            }),
            (L.layers.DropoutLayer, {
                'p': 0.0
            }),
            (L.layers.MaxPool2DLayer, {
                'pool_size': 2,
                'ignore_border': False
            }),
            (L.layers.Conv2DLayer, {
                'num_filters': 512,
                'stride': 1,
                'pad': 1,
                'filter_size': (3, 3),
                'nonlinearity': L.nonlinearities.rectify,
                'flip_filters': False,
                'name': 'conv2'
            }),
            (L.layers.DropoutLayer, {
                'p': 0.0
            }),
            (L.layers.Conv2DLayer, {
                'num_filters': 512,
                'stride': 1,
                'pad': 1,
                'filter_size': (3, 3),
                'nonlinearity': L.nonlinearities.rectify,
                'flip_filters': False,
                'name': 'conv3'
            }),
            (L.layers.DropoutLayer, {
                'p': 0.0
            }),
            (L.layers.Conv2DLayer, {
                'num_filters': 512,
                'stride': 1,
                'pad': 1,
                'filter_size': (3, 3),
                'nonlinearity': L.nonlinearities.rectify,
                'flip_filters': False,
                'name': 'conv4'
            }),
            (L.layers.DropoutLayer, {
                'p': 0.0
            }),
            (L.layers.MaxPool2DLayer, {
                'pool_size': 3,
                'ignore_border': False
            }),
            (L.layers.DenseLayer, {
                'num_units': 4096,
                'nonlinearity': L.nonlinearities.sigmoid,
                'name': 'dense0'
            }),
            (L.layers.DropoutLayer, {
                'p': 0.0
            }),
            (L.layers.DenseLayer, {
                'num_units': 4096,
                'nonlinearity': L.nonlinearities.sigmoid,
                'name': 'dense1'
            }),
            (L.layers.DenseLayer, {
                'num_units': 1,
                'nonlinearity': L.nonlinearities.sigmoid
            }),
        ]

        self.net = NeuralNet(
            layers=self.layers,
            update_learning_rate=0.015,
            update=L.updates.nesterov_momentum,
            update_momentum=0.9,
            #update=L.updates.sgd,
            regression=True,
            verbose=1,
            eval_size=0.15,
            objective_loss_function=L.objectives.binary_crossentropy,
            max_epochs=200)

        if param_file != None:
            print "Loading parameters from ", param_file
            self.net.load_params_from(param_file)

    def write_filters_to_file(self, fname):
        params = self.net.get_all_params()
        layer_counter = 0
        for p in params:
            print p
            layer_counter += 1
            filter_counter = 0
            weights = p.get_value()

            if len(weights.shape) > 2:
                for f in weights:
                    kernel = np.asarray(f, dtype=np.float32)
                    kernel = kernel * 255 + 128
                    viz = np.zeros(shape=kernel[0].shape)
                    viz = cv2.resize(kernel[0],
                                     None,
                                     fx=20,
                                     fy=20,
                                     interpolation=cv2.INTER_CUBIC)
                    cv2.normalize(viz, viz)
                    viz = viz * 50

                    #print viz
                    #cv2.imshow('ha', viz)
                    #cv2.waitKey(-1)

                    viz = viz / 50
                    viz = viz * 12000
                    viz = viz
                    #print viz
                    cv2.imwrite(
                        fname + "_" + str(layer_counter) + "_" +
                        str(filter_counter) + ".png", viz)
                    filter_counter += 1

    # evaluates the input array over the neural net
    def eval(self, x):
        y = np.zeros((x.shape[0], ))
        for i in range(x.shape[0]):
            pred = self.net.predict(np.array([x[i]]))
            y[i] = pred
            print i, pred

        return y

    # train the network in input (x,y)
    # param_file: input parameter file (perhaps from previous trainings)
    # out_param_file: output path to write resulting params to
    # filter_file: output path to write images of filters for visualization
    # load_params: boolean flag, when set: 1 loads input parameters from <param_file>
    # pretrain: boolean flag, when set: 1 loads parameters from pretrained network

    def train(self,
              X,
              y,
              param_file=None,
              out_param_file=None,
              filter_file=None,
              load_params=False,
              pretrain=False):

        if pretrain:
            params = createNoLearnParams(param_file)
            print "Parameters", params[1].shape
            conv0_W = np.concatenate((params[0], params[0]), axis=1)
            conv0_W = np.concatenate((conv0_W, params[0]), axis=1)
            conv0_W = conv0_W[:, :7, :, :]

            conv0_b = np.concatenate((params[1], params[1]), axis=0)
            conv0_b = np.concatenate((conv0_b, params[1]), axis=0)
            conv0_b = conv0_b[:96]

            conv1_W = np.concatenate((params[2], params[2]), axis=1)
            conv1_W = np.concatenate((conv1_W, params[2]), axis=1)
            conv1_W = conv1_W[:, :96, :, :]

            conv1_b = np.concatenate((params[3], params[3]), axis=0)
            conv1_b = np.concatenate((conv1_b, params[3]), axis=0)
            conv1_b = conv1_b[:256]

            conv2_W = np.concatenate((params[4], params[4]), axis=1)
            conv2_W = np.concatenate((conv2_W, params[4]), axis=1)
            conv2_W = conv2_W[:, :256, :, :]

            conv2_b = np.concatenate((params[5], params[5]), axis=0)
            conv2_b = np.concatenate((conv2_b, params[5]), axis=0)
            conv2_b = conv2_b[:512]

            conv3_W = np.concatenate((params[6], params[6]), axis=1)
            conv3_W = np.concatenate((conv3_W, params[6]), axis=1)
            conv3_W = conv3_W[:, :512, :, :]

            conv3_b = np.concatenate((params[7], params[7]), axis=0)
            conv3_b = np.concatenate((conv3_b, params[7]), axis=0)
            conv3_b = conv3_b[:512]

            conv4_W = np.concatenate((params[8], params[8]), axis=1)
            conv4_W = np.concatenate((conv4_W, params[8]), axis=1)
            conv4_W = conv4_W[:, :512, :, :]

            conv4_b = np.concatenate((params[9], params[9]), axis=0)
            conv4_b = np.concatenate((conv4_b, params[9]), axis=0)
            conv4_b = conv4_b[:512]

            dense0_W = np.concatenate((params[10], params[10]), axis=1)
            dense0_W = np.concatenate((dense0_W, params[10]), axis=1)
            dense0_W = dense0_W[:2560, :4096]

            dense0_b = np.concatenate((params[11], params[11]), axis=0)
            dense0_b = np.concatenate((dense0_b, params[11]), axis=0)
            dense0_b = dense0_b[:4096]

            dense1_W = np.concatenate((params[12], params[12]), axis=1)
            dense1_W = np.concatenate((dense1_W, params[12]), axis=1)
            dense1_W = dense1_W[:4096, :4096]

            dense1_b = np.concatenate((params[13], params[13]), axis=0)
            dense1_b = np.concatenate((dense1_b, params[13]), axis=0)
            dense1_b = dense1_b[:4096]

            #http://arxiv.org/pdf/1405.3531v4.pdf
            self.net = NeuralNet(
                layers=self.layers,
                conv0_W=np.array(conv0_W),
                conv0_b=np.array(conv0_b),
                conv1_W=np.array(conv1_W),
                conv1_b=np.array(conv1_b),
                conv2_W=np.array(conv2_W),
                conv2_b=np.array(conv2_b),
                conv3_W=np.array(conv3_W),
                conv3_b=np.array(conv3_b),
                conv4_W=np.array(conv4_W),
                conv4_b=np.array(conv4_b),
                dense0_W=np.array(dense0_W),
                dense0_b=np.array(dense0_b),
                dense1_W=np.array(dense1_W),
                dense1_b=np.array(dense1_b),
                update_learning_rate=0.015,
                update=L.updates.nesterov_momentum,
                update_momentum=0.9,
                #update=L.updates.sgd,
                regression=True,
                verbose=1,
                eval_size=0.15,
                objective_loss_function=L.objectives.binary_crossentropy,
                max_epochs=200)

            if load_params:
                print "Loading parameters from ", param_file
                self.net.load_params_from(param_file)

        print "TRAINING!"
        print "input shape: ", X.shape
        print "output shape: ", y.shape
        print "Example X", X[0]
        print "Example Y", y[0]
        #print self.net.get_params()

        self.net.fit(X, y)
        print(self.net.score(X, y))

        print "Saving network parameters to ", out_param_file, "..."
        file = open(out_param_file, 'w+')
        file.close()
        self.net.save_weights_to(out_param_file)

        print "Saving filters to ", filter_file
        self.write_filters_to_file(filter_file)

        plt = visualize.plot_loss(self.net)
        plt.show()
        plt.savefig(DIR_PROJ + 'loss.png')
        plt.clf()
        plt.cla()

        print "Sample predictions"

        for i in range(10):
            pred = self.net.predict(np.array([X[i]]))

            print "---------------------------------"
            print i
            print "prediction", pred
            print y[i]