Exemple #1
0
    def test_X_is_dict(self, TrainSplit, nn):
        X = {
            '1': np.random.random((100, 10)),
            '2': np.random.random((100, 10)),
        }
        y = np.repeat([0, 1, 2, 3], 25)

        X_train, X_valid, y_train, y_valid = TrainSplit(0.2)(X, y, nn)
        assert len(X_train['1']) == len(X_train['2']) == len(y_train) == 80
        assert len(X_valid['1']) == len(X_valid['2']) == len(y_valid) == 20
    def __init__(self, isTrain):
        super(RegressionUniformBlending, self).__init__(isTrain)
        # data preprocessing
        #self.dataPreprocessing()

        self.net1 = NeuralNet(
                        layers=[  # three layers: one hidden layer
                            ('input', layers.InputLayer),
                            ('hidden', layers.DenseLayer),
                            #('hidden2', layers.DenseLayer),
                            #('hidden3', layers.DenseLayer),
                            ('output', layers.DenseLayer),
                            ],
                        # layer parameters:
                        input_shape=(None, 13),  # input dimension is 13
                        hidden_num_units=6,  # number of units in hidden layer
                        #hidden2_num_units=8,  # number of units in hidden layer
                        #hidden3_num_units=4,  # number of units in hidden layer
                        output_nonlinearity=None,  # output layer uses sigmoid function
                        output_num_units=1,  # output dimension is 1

                        # obejctive function
                        objective_loss_function = lasagne.objectives.squared_error,

                        # optimization method:
                        update=lasagne.updates.nesterov_momentum,
                        update_learning_rate=0.002,
                        update_momentum=0.4,

                        # use 25% as validation
                        train_split=TrainSplit(eval_size=0.2),

                        regression=True,  # flag to indicate we're dealing with regression problem
                        max_epochs=100,  # we want to train this many epochs
                        verbose=0,
                        )

        # Create linear regression object
        self.linRegr = linear_model.LinearRegression()

        # Create KNN regression object
        self.knn = neighbors.KNeighborsRegressor(86, weights='distance')

        # Create Decision Tree regression object
        self.decisionTree = DecisionTreeRegressor(max_depth=7, max_features=None)

        # Create AdaBoost regression object
        decisionReg = DecisionTreeRegressor(max_depth=10)
        rng = np.random.RandomState(1)
        self.adaReg = AdaBoostRegressor(decisionReg,
                          n_estimators=400,
                          random_state=rng)

        # Create linear regression object
        self.model = RandomForestRegressor(max_features='sqrt', n_estimators=32, max_depth=39)
Exemple #3
0
    def __init__(self, outputShape, testData, modelSaver):
        self.set_network_specific_settings()
        modelSaver.model = self
        self.net = NeuralNet(
            layers=[
                ('input', layers.InputLayer),
                ('hidden1', layers.DenseLayer),
                ('hidden2', layers.DenseLayer),
                ('output', layers.DenseLayer),
            ],

            # Layer parameter
            input_shape=(None, Settings.NN_CHANNELS,
                         Settings.NN_INPUT_SHAPE[0], Settings.NN_INPUT_SHAPE[1]
                         ),  # variable batch size, single row shape
            hidden1_num_units=500,
            hidden2_num_units=50,
            output_num_units=outputShape,
            output_nonlinearity=lasagne.nonlinearities.softmax,

            # optimization method:
            update=nesterov_momentum,
            update_learning_rate=theano.shared(
                utils.to_float32(Settings.NN_START_LEARNING_RATE)),
            update_momentum=theano.shared(
                utils.to_float32(Settings.NN_START_MOMENTUM)),
            batch_iterator_train=AugmentingLazyBatchIterator(
                Settings.NN_BATCH_SIZE,
                testData,
                "train",
                True,
                loadingSize=(50, 50)),
            batch_iterator_test=LazyBatchIterator(Settings.NN_BATCH_SIZE,
                                                  testData,
                                                  "valid",
                                                  False,
                                                  newSegmentation=False),
            train_split=TrainSplit(
                eval_size=0.0),  # we cross validate on our own
            regression=False,  # classification problem
            on_epoch_finished=[
                AdjustVariable('update_learning_rate',
                               start=Settings.NN_START_LEARNING_RATE,
                               stop=0.0001),
                AdjustVariable('update_momentum',
                               start=Settings.NN_START_MOMENTUM,
                               stop=0.999),
                TrainingHistory("?", str(self), [], modelSaver),
                EarlyStopping(150),
                modelSaver,
            ],
            max_epochs=Settings.NN_EPOCHS,
            verbose=1,
        )
Exemple #4
0
def regress(X, y):
    l = InputLayer(shape=(None, X.shape[1]))
    l = DenseLayer(l, num_units=100, nonlinearity=tanh)
    l = DenseLayer(l, num_units=y.shape[1], nonlinearity=None)
    net = NeuralNet(l,
                    regression=True,
                    update_learning_rate=0.05,
                    train_split=TrainSplit(eval_size=0.2),
                    verbose=1)
    net.fit(X, y)
    print(net.score(X, y))
Exemple #5
0
def main():
    ################
    # LOAD DATASET #
    ################
    dataset = './data/ubiquitous_aug.hkl'
    kfd = './data/ubiquitous_kfold.hkl'
    print('Loading dataset {}...'.format(dataset))
    X, y = hkl.load(open(dataset, 'r'))
    X = X.reshape(-1, 4, 1, 400).astype(floatX)
    y = y.astype('int32')
    print('X shape: {}, y shape: {}'.format(X.shape, y.shape))
    kf = hkl.load(open(kfd, 'r'))
    kfold = [(train, test) for train, test in kf]
    (train, test) = kfold[0]
    print('train_set size: {}, test_set size: {}'.format(len(train), len(test)))
    # shuffle +/- labels in minibatch
    print('shuffling train_set and test_set')
    shuffle(train)
    shuffle(test)
    X_train = X[train]
    X_test = X[test]
    y_train = y[train]
    y_test = y[test]
    print('data prepared!')

    layers = [
            (InputLayer, {'shape': (None, 4, 1, 400)}),
            (Conv2DLayer, {'num_filters': 64, 'filter_size': (1, 4)}),
            (Conv2DLayer, {'num_filters': 64, 'filter_size': (1, 3)}),
            (Conv2DLayer, {'num_filters': 64, 'filter_size': (1, 3)}),
            (MaxPool2DLayer, {'pool_size': (1, 2)}),
            (Conv2DLayer, {'num_filters': 32, 'filter_size': (1, 2)}),
            (Conv2DLayer, {'num_filters': 32, 'filter_size': (1, 2)}),
            (Conv2DLayer, {'num_filters': 32, 'filter_size': (1, 2)}),
            (MaxPool2DLayer, {'pool_size': (1, 2)}),
            (DenseLayer, {'num_units': 64}),
            (DropoutLayer, {}),
            (DenseLayer, {'num_units': 64}),
            (DenseLayer, {'num_units': 2, 'nonlinearity': softmax})]

    net = NeuralNet(
            layers=layers,
            max_epochs=100,
            update=adam,
            update_learning_rate=1e-4,
            train_split=TrainSplit(eval_size=0.1),
            on_epoch_finished=[
                AdjustVariable(1e-4, target=0, half_life=20)],
            verbose=2)

    net.fit(X_train, y_train)
    plot_loss(net)
Exemple #6
0
    def __init__(self, net_type, input_shape, output_size,
                     regression=False,
                     epochs=100, 
                     learning_rate=0.0002,
                     verbose=1):

        layers = self.get_layers(net_type, input_shape, output_size)
        NeuralNet.__init__(self,
                           layers=layers,
                           max_epochs=epochs,
                           regression=regression,
                           update=lasagne.updates.adam,
                           update_learning_rate=learning_rate,
                           objective_l2=0.0025,
                           train_split=TrainSplit(eval_size=0.05),
                           verbose=verbose,
                          )     
Exemple #7
0
def main(resume=None):
    l = 300
    dataset = './data/ubiquitous_train.hkl'
    print('Loading dataset {}...'.format(dataset))
    X_train, y_train = hkl.load(dataset)
    X_train = X_train.reshape(-1, 4, 1, l).astype(floatX)
    y_train = np.array(y_train, dtype='int32')
    indice = np.arange(X_train.shape[0])
    np.random.shuffle(indice)
    X_train = X_train[indice]
    y_train = y_train[indice]
    print('X_train shape: {}, y_train shape: {}'.format(X_train.shape, y_train.shape))

    layers = [
            (InputLayer, {'shape': (None, 4, 1, l)}),
            (Conv2DLayer, {'num_filters': 64, 'filter_size': (1, 4)}),
            (Conv2DLayer, {'num_filters': 64, 'filter_size': (1, 3)}),
            (Conv2DLayer, {'num_filters': 64, 'filter_size': (1, 3)}),
            (MaxPool2DLayer, {'pool_size': (1, 2)}),
            (Conv2DLayer, {'num_filters': 64, 'filter_size': (1, 2)}),
            (Conv2DLayer, {'num_filters': 64, 'filter_size': (1, 2)}),
            (Conv2DLayer, {'num_filters': 64, 'filter_size': (1, 2)}),
            (MaxPool2DLayer, {'pool_size': (1, 2)}),
            (DenseLayer, {'num_units': 64}),
            (DropoutLayer, {}),
            (DenseLayer, {'num_units': 64}),
            (DenseLayer, {'num_units': 2, 'nonlinearity': softmax})]

    lr = theano.shared(np.float32(1e-4))

    net = NeuralNet(
            layers=layers,
            max_epochs=100,
            update=adam,
            update_learning_rate=lr,
            train_split=TrainSplit(eval_size=0.1),
            on_epoch_finished=[
                AdjustVariable(lr, target=1e-8, half_life=20)],
            verbose=4)

    if resume != None:
        net.load_params_from(resume)

    net.fit(X_train, y_train)

    net.save_params_to('./models/net_params.pkl')
def xgb_train_data(train, train_y, ttf):

    num_features = train.shape[1]
    X = train.copy()
    y = np.array(train_y[offset:LINES].copy(), dtype = np.int32)
    rbm1 = KNeighborsClassifier(n_neighbors=5).fit(X[0:offset-1,:], train_y[0:offset-1])
    rbm2 = RandomForestClassifier(n_estimators=100, criterion='entropy', max_features='auto', bootstrap=False, oob_score=False, n_jobs=8, verbose=1).fit(X[0:offset-1,:], train_y[0:offset-1])
    layers0 = [('input', InputLayer),
           ('dropout0', DropoutLayer),
           ('dense0', DenseLayer),
           ('dropout1', DropoutLayer),
           ('dense1', DenseLayer),
           ('dropout2', DropoutLayer),
           ('output', DenseLayer)]

    rbm3 = NeuralNet(layers=layers0,
                     input_shape=(None, num_features),
                     dropout0_p = 0.05, #theano.shared(float32(0.1)),
                    dense0_num_units= 100,
                     dropout1_p= 0.1, #theano.shared(float32(0.5)),
                     dense1_num_units= 200,
                     dropout2_p =  0.3, #theano.shared(float32(0.8)),
                     output_num_units=num_classes,
                     output_nonlinearity=softmax,
                     update=nesterov_momentum,
                     #update_learning_rate=0.005,
                     #update_momentum=0.9,
                     update_learning_rate = theano.shared(float32(0.001)),
             update_momentum=theano.shared(float32(0.9)),
                     #objective_loss_function = log_loss,
                     train_split = TrainSplit(0.2),
                     verbose=1,
                     max_epochs=300,
                   on_epoch_finished=[
                   AdjustVariable('update_learning_rate', start=0.001, stop=0.0001),
                      AdjustVariable('update_momentum', start=0.9, stop=0.99),
                    #   AdjustDropout('dropout0_p', start = 0.1, stop = 0.2),
                     #   #AdjustDropout('dropout1_p', start = 0.5, stop = 0.4),
                     #  AdjustDropout('dropout2_p', start = 0.8, stop = 0.9)
    ])
    rbm3 = rbm3.fit(X[0:offset-1,:], train_y[0:offset-1])
    X =  np.hstack([X[offset:LINES,:], rbm1.predict_proba(X[offset:LINES,:]), rbm2.predict_proba(X[offset:LINES,:]) , rbm3.predict_proba(X[offset:LINES,:]) ] )    
    return np.array(X, dtype = np.float32), y, rbm1, rbm2, rbm3
Exemple #9
0
def network():

    net = NeuralNet(
        layers=[
            ('input', layers.InputLayer),
            ('conv1', ConvLayer),
            ('conv2', ConvLayer),
            ('conv3', ConvLayer),
            ('conv4', ConvLayer),
            ('hidden1', layers.DenseLayer),
            ('hidden2', layers.DenseLayer),
            ('output', layers.DenseLayer),
        ],
        input_shape=(None, cr, 5, 5),
        conv1_num_filters=3 * cr,
        conv1_filter_size=(3, 3),
        conv1_pad=1,
        conv2_num_filters=6 * cr,
        conv2_filter_size=(3, 3),
        conv2_pad=1,
        conv3_num_filters=6 * cr,
        conv3_filter_size=(3, 3),
        conv4_num_filters=9 * cr,
        conv4_filter_size=(3, 3),
        hidden1_num_units=6 * cr,
        hidden2_num_units=3 * cr,
        output_num_units=16,
        output_nonlinearity=softmax,
        update=adagrad,
        update_learning_rate=theano.shared(np.float32(0.005)),
        #update_momentum=theano.shared(np.float32(0.9)),
        regression=False,
        on_epoch_finished=[
            AdjustVariable('update_learning_rate', start=0.005, stop=0.005),
            #AdjustVariable('update_momentum', start=0.9, stop=0.999),
            EarlyStopping(patience=40),
        ],
        train_split=TrainSplit(eval_size=0.1),
        batch_iterator_train=FlipBatchIterator(batch_size=512),
        max_epochs=500,
        verbose=2,
    )
    return net
    def classifyNN_nolearn(self):

        utils.mkdir_p(self.outDir)
        self.readDataset()
        nn = NeuralNet(
            layers=[  # network
                ('input', InputLayer),
                ('fc1', DenseLayer), ('fc2', DenseLayer), ('fc3', DenseLayer),
                ('fc4', DenseLayer), ('fc5', DenseLayer), ('fc6', DenseLayer),
                ('output', DenseLayer)
            ],
            # layer params
            input_shape=(None, self.X_train.shape[1]),
            fc1_num_units=108,
            fc2_num_units=216,
            fc3_num_units=432,
            fc4_num_units=864,
            fc5_num_units=1728,
            fc6_num_units=3456,
            output_num_units=7,
            # non-linearities
            fc1_nonlinearity=nl.tanh,
            fc2_nonlinearity=nl.tanh,
            fc3_nonlinearity=nl.tanh,
            fc4_nonlinearity=nl.tanh,
            fc5_nonlinearity=nl.tanh,
            fc6_nonlinearity=nl.tanh,
            output_nonlinearity=nl.softmax,
            # update params
            update=upd.momentum,
            update_learning_rate=0.01,
            update_momentum=0.9,
            train_split=TrainSplit(eval_size=0.2),
            verbose=1,
            max_epochs=5000)

        nn.fit(self.X_train.astype(np.float32),
               self.y_train.astype(np.int32) - 1)
        print(
            'Prediction.....................................................')
        y_test = nn.predict(self.X_test.astype(np.float32))
        self.save_sub(self.outDir, y_test + 1)
def build_fine_tuning_model(nlayers):
	net3 = NeuralNet(
	layers=nlayers,

		# learning parameters
		update= lasagne.updates.nesterov_momentum,
		#update_learning_rate=theano.shared(np.float32(0.1)),
		#update_momentum=theano.shared(np.float32(0.9)),
		update_learning_rate = 0.01,
		update_momentum = 0.9,
		regression=True,
		#on_epoch_finished = [
		#	AdjustVariable('update_learning_rate', start = 0.1, stop = 0.0001),
		#	AdjustVariable('update_momentum', start = 0.9, stop = 0.9999),
		#],
		max_epochs=10000, # maximum iteration
		train_split = TrainSplit(eval_size=0.4),
		verbose=1,
	)
	return net3
Exemple #12
0
def model_initial(X_train, y_train, max_iter=5):
    global params, val_acc
    params = []
    val_acc = np.zeros(max_iter)
    lr = theano.shared(np.float32(1e-4))
    for iteration in range(max_iter):
        print 'Initializing weights (%d/5) ...' % (iteration + 1)
        network_init = create_network()
        net_init = NeuralNet(
            network_init,
            max_epochs=3,
            update=adam,
            update_learning_rate=lr,
            train_split=TrainSplit(eval_size=0.1),
            batch_iterator_train=BatchIterator(batch_size=32),
            batch_iterator_test=BatchIterator(batch_size=64),
            on_training_finished=[SaveTrainHistory(iteration=iteration)],
            verbose=0)
        net_init.initialize()
        net_init.fit(X_train, y_train)
Exemple #13
0
def model_train(X_train, y_train, learning_rate=1e-4, epochs=50):
    network = create_network()
    lr = theano.shared(np.float32(learning_rate))
    net = NeuralNet(
        network,
        max_epochs=epochs,
        update=adam,
        update_learning_rate=lr,
        train_split=TrainSplit(eval_size=0.1),
        batch_iterator_train=BatchIterator(batch_size=32),
        batch_iterator_test=BatchIterator(batch_size=64),
        #on_training_started=[LoadBestParam(iteration=val_acc.argmax())],
        on_epoch_finished=[EarlyStopping(patience=5)],
        verbose=1)
    print 'Loading pre-training weights...'
    net.load_params_from(params[val_acc.argmax()])
    print 'Continue to train...'
    net.fit(X_train, y_train)
    print 'Model training finished.'
    return net
Exemple #14
0
def make_net(W, H, size1=20, size2=15):
    net = NeuralNet(
        layers=[
            ('input', InputLayer),
            ('dense1', DenseLayer),
            ('dense2', DenseLayer),
            ('output', DenseLayer),
        ],
        input_shape=(None, W * H),
        dense1_num_units=size1,
        dense1_nonlinearity=LeakyRectify(leakiness=0.1),
        dense1_W=HeNormal(),
        dense1_b=Constant(),
        dense2_num_units=size2,
        dense2_nonlinearity=LeakyRectify(leakiness=0.1),
        dense2_W=HeNormal(),
        dense2_b=Constant(),
        output_num_units=4,
        output_nonlinearity=softmax,
        output_W=HeNormal(),
        output_b=Constant(),
        update=nesterov_momentum,  # todo
        update_learning_rate=shared(float32(1.)),
        update_momentum=0.9,
        max_epochs=200,
        on_epoch_finished=[
            StopWhenOverfitting(),
            StopAfterMinimum(),
            AdjustLearningRate(1., 0.0001),
        ],

        #label_encoder = False,
        regression=True,
        verbose=1,
        batch_iterator_train=BatchIterator(batch_size=128),  # todo
        batch_iterator_test=BatchIterator(batch_size=128),
        train_split=TrainSplit(eval_size=0.1),
    )
    net.initialize()

    return net
def nnBagging():
    num_features = train.shape[1]
    layers0 = [('input', InputLayer),
               ('dropout0', DropoutLayer),
               ('dense0', DenseLayer),
               ('dropout1', DropoutLayer),
               ('dense1', DenseLayer),
               ('dropout2', DropoutLayer),
               ('output', DenseLayer)]

    net0 = NeuralNet(layers=layers0,
                     input_shape=(None, num_features),
                     dropout0_p = 0.05, #theano.shared(float32(0.1)),
                    dense0_num_units= 100,
                     dropout1_p= 0.1, #theano.shared(float32(0.5)),
                     dense1_num_units= 200,
                     dropout2_p =  0.3, #theano.shared(float32(0.8)),
                     output_num_units=38,
                     output_nonlinearity=softmax,
                     update=nesterov_momentum,
                     #update_learning_rate=0.005,
                     #update_momentum=0.9,
                     update_learning_rate = theano.shared(float32(0.001)),
                    update_momentum=theano.shared(float32(0.9)),
                     #objective_loss_function = log_loss,
                     train_split = TrainSplit(0.2),
                     verbose=1,
                     max_epochs=300,
                   on_epoch_finished=[
                   AdjustVariable('update_learning_rate', start=0.002, stop=0.0001),
                      AdjustVariable('update_momentum', start=0.9, stop=0.99),
                    #   AdjustDropout('dropout0_p', start = 0.1, stop = 0.2),
                     #   #AdjustDropout('dropout1_p', start = 0.5, stop = 0.4),
                     #  AdjustDropout('dropout2_p', start = 0.8, stop = 0.9)
              ]
    )
    pred =  skleanBagging(train, train_y, test, net0)
    sub   = pd.read_csv('../input/sample_submission.csv')
    cols = sub.columns.values.tolist()[1:]
    sub[cols] = pd.DataFrame(np.around(pred, decimals=5)).applymap(lambda x: round(x, 5))
    sub.to_csv('nn_bagging10.csv', index=False)
    def create_and_train(self, training_data):
        # Load training data
        x, y, speaker_names = load(training_data)

        # Create network
        net = create_net(self.create_paper(x.shape[1]))

        # Set new batch iterator
        net.batch_iterator_train = SegmentBatchIterator(batch_size=128)
        net.batch_iterator_test = SegmentBatchIterator(batch_size=128)
        net.train_split = TrainSplit(eval_size=0)

        # Train the network
        self.logger.info("Fitting...")
        net.fit(x, y)

        # Comments from old spectrogram_cnn_100 implementation, don't delete yet if eventually needed later
        # net.load_params_from('../data/experiments/paper/networks/net_100_81_not_reynolds.pickle');
        # net.save_params_to('../../data/experiments/paper/networks/net_100_81_not_reynolds.pickle');
        # network_helper.save(net, '../../data/experiments/paper/networks/net_100_81_not_reynolds.pickle')
        save(net, self.net_path)
Exemple #17
0
def create_nn():
    '''
    Create a neural net with one (or more) layers to fit the featurized data.
    A single softmax layer is equivalent to doing logistic regression on the featurized data.
    Result:  53% accuracy.
    Adding a fully connected hiddent layer boots accuracy to 67%.
    '''
    nn = NeuralNet(
        layers = [
            (InputLayer, {
                        'name':'input',
                        'shape':(None,4096)
                         }),
            # (DropoutLayer, {
            #             'name':'drop6',
            #             'p':.5
            #             }),
            (DenseLayer, {
                        'name':'fc7',
                        'num_units':4096,
                        }),
            (DenseLayer, {
                        'name':'output',
                        'num_units':3,
                        'nonlinearity':softmax,
                        })
                        ],
        update=nesterov_momentum,
        update_learning_rate=0.01,
        update_momentum=0.9,
    #         regression=True,  # flag to indicate we're dealing with regression problem
        max_epochs=1000,  # we want to train this many epochs
        verbose=1,
        train_split=TrainSplit(eval_size=0.25),

        )

    nn.initialize()

    return nn
Exemple #18
0
def model_train(X_train, y_train, learning_rate=1e-4, epochs=10):
    l = 1000
    layer1 = InputLayer(shape=(None, 1, 4, l + 1024))
    layer2_1 = SliceLayer(layer1, indices=slice(0, l), axis=-1)
    layer2_2 = SliceLayer(layer1, indices=slice(l, None), axis=-1)
    layer2_3 = SliceLayer(layer2_2, indices=slice(0, 4), axis=-2)
    layer2_f = FlattenLayer(layer2_3)
    layer3 = Conv2DLayer(layer2_1, num_filters=64, filter_size=(4, 7))
    layer4 = Conv2DLayer(layer3, num_filters=64, filter_size=(1, 7))
    layer5 = Conv2DLayer(layer4, num_filters=64, filter_size=(1, 7))
    layer6 = MaxPool2DLayer(layer5, pool_size=(1, 6))
    layer7 = Conv2DLayer(layer6, num_filters=64, filter_size=(1, 7))
    layer8 = Conv2DLayer(layer7, num_filters=64, filter_size=(1, 7))
    layer9 = Conv2DLayer(layer8, num_filters=64, filter_size=(1, 7))
    layer10 = MaxPool2DLayer(layer9, pool_size=(1, 6))
    layer11 = Conv2DLayer(layer10, num_filters=64, filter_size=(1, 7))
    layer12 = Conv2DLayer(layer11, num_filters=64, filter_size=(1, 7))
    layer13 = Conv2DLayer(layer12, num_filters=64, filter_size=(1, 7))
    layer14 = MaxPool2DLayer(layer13, pool_size=(1, 6))
    layer14_d = DenseLayer(layer14, num_units=64)
    layer3_2 = DenseLayer(layer2_f, num_units=64)
    layer15 = ConcatLayer([layer14_d, layer3_2])
    #layer15 = ConcatLayer([layer10_d,])
    layer16 = DropoutLayer(layer15)
    layer17 = DenseLayer(layer16, num_units=32)
    network = DenseLayer(layer17, num_units=2, nonlinearity=None)
    lr = theano.shared(np.float32(learning_rate))
    net = NeuralNet(
        network,
        max_epochs=epochs,
        update=adam,
        update_learning_rate=lr,
        regression=True,
        train_split=TrainSplit(eval_size=0.1),
        objective_loss_function=squared_error,
        #on_epoch_finished=[AdjustVariable(lr, target=1e-8, half_life=20)],
        verbose=4)
    net.fit(X_train, y_train)
    return net
Exemple #19
0
    def __init__(self, architecture, hyperparameter={}):
        self.archi = architecture
        self.hyperp = hyperparameter
        self._srng = RandomStreams(get_rng().randint(
            1, 2147462579))  # for adaptive noise
        self._srng2 = rStream(2147462579)

        # Create nolearn ModifiedNeuralNet object
        self.classifier  = ModifiedNeuralNet(
            layers=self.archi,
            max_epochs=self.hyperp.setdefault('epochs',100),
            update=self.hyperp.setdefault('optimizer',lasagne.updates.adam),
            update_learning_rate=self.hyperp.setdefault('learningRate',0.001),
            objective = modifiedObjective,
            objective_logitSens = self.hyperp.setdefault('logitSens',0.),
            objective_probSens = self.hyperp.setdefault('probSens',0.),
            objective_lossSens = self.hyperp.setdefault('lossSens',0.),
            objective_std = self.hyperp.setdefault('trainingDataStd',None),
            objective_loss_function=categorical_crossentropy,
            verbose=0,
            batch_iterator_train = DataAugmentationBatchIterator(
                self.hyperp.setdefault('batchSize',64),
                disturbLabelRate=self.hyperp.setdefault('disturbLabelRate',0),
                sdWidth=self.hyperp.setdefault('sdWidth',0),
                sdNumber=self.hyperp.setdefault('sdNumber',0),
                shuffle=True),
            batch_iterator_test = nolearn.lasagne.BatchIterator(
                self.hyperp.setdefault('batchSize',64),shuffle=False),\
            train_split = TrainSplit(eval_size=self.hyperp.setdefault(
                'validationSetRatio',.1)),
            objective_l1 = self.hyperp.setdefault('l1',0.),
            objective_l2 = self.hyperp.setdefault('l2',0.01),
            on_training_started=[nolearn.lasagne.PrintLayerInfo()],
            on_epoch_finished=[getIndividualLosses,
                               printError,
                               addEndTimeToHistory,
                               printAdaptiveNoise,
                               saveBestValidNet])
        self.classifier.initialize()
def build_model1(nlayers, epochs, frozen=False):
    net3 = NeuralNet(
        layers=nlayers,

        # learning parameters
        update=lasagne.updates.nesterov_momentum,
        update_learning_rate=theano.shared(np.float32(0.01)),
        update_momentum=theano.shared(np.float32(0.9)),
        regression=True,
        on_epoch_finished=[
            AdjustVariable('update_learning_rate', start=0.01, stop=0.00001),
            AdjustVariable('update_momentum', start=0.9, stop=0.9999),
            EarlyStopping(1000)
        ],
        max_epochs=epochs,  # maximum iteration
        train_split=TrainSplit(eval_size=0.4),
        verbose=1,
    )
    if frozen:
        for layer in net3.layers[:frozenlayers]:
            layer.trainable = False
    return net3
Exemple #21
0
def make_network():

    learning_rate = theano.shared(np.float32(LEARNING_RATE))

    args = dict
    layers = [(InputLayer,
               args(name="l_in", shape=(None, SEQ_LENGTH, symbol_count))),
              (LSTMLayer,
               args(name="l_forward_1",
                    num_units=N_HIDDEN,
                    grad_clipping=GRAD_CLIP,
                    nonlinearity=tanh)),
              (DropoutLayer, args(name="l_do_1", p=0.5)),
              (LSTMLayer,
               args(name="l_forward_2",
                    num_units=N_HIDDEN,
                    grad_clipping=GRAD_CLIP,
                    nonlinearity=tanh,
                    only_return_final=True)),
              (DropoutLayer, args(name="l_do_2", p=0.5)),
              (DenseLayer,
               args(name="l_out",
                    num_units=symbol_count,
                    W=lasagne.init.Normal(),
                    nonlinearity=softmax))]
    return NeuralNet(
        y_tensor_type=T.ivector,
        layers=layers,
        batch_iterator_train=MyBatchIterator(batch_size=CHUNK_SIZE),
        max_epochs=int(round(MAX_EPOCHS * chunks_per_epoch)),
        verbose=1,
        train_split=TrainSplit(0),
        objective_loss_function=categorical_crossentropy,
        update=adagrad,
        update_learning_rate=learning_rate,
        on_epoch_finished=[OnEpochFinished()],
    )
Exemple #22
0
    def __init__(self, isTrain, isNN):
        super(RegressionNN, self).__init__(isTrain, isNN)
        # data preprocessing
        #self.dataPreprocessing()

        self.net1 = NeuralNet(
            layers=[  # three layers: one hidden layer
                ('input', layers.InputLayer),
                ('hidden', layers.DenseLayer),
                #('hidden2', layers.DenseLayer),
                #('hidden3', layers.DenseLayer),
                ('output', layers.DenseLayer),
            ],
            # layer parameters:
            input_shape=(None, 13),  # input dimension is 13
            hidden_num_units=6,  # number of units in hidden layer
            #hidden2_num_units=8,  # number of units in hidden layer
            #hidden3_num_units=4,  # number of units in hidden layer
            output_nonlinearity=None,  # output layer uses sigmoid function
            output_num_units=1,  # output dimension is 1

            # obejctive function
            objective_loss_function=lasagne.objectives.squared_error,

            # optimization method:
            update=lasagne.updates.nesterov_momentum,
            update_learning_rate=0.002,
            update_momentum=0.4,

            # use 25% as validation
            train_split=TrainSplit(eval_size=0.2),
            regression=
            True,  # flag to indicate we're dealing with regression problem
            max_epochs=100,  # we want to train this many epochs
            verbose=0,
        )
Exemple #23
0
def build_net(vectorizer, batch_size=1024 * 10, r1_size=100):
    vocab_size = vectorizer.num_chars
    seq_length = vectorizer.seq_length
    net = NeuralNet(
        layers=[('input', layers.InputLayer), ('r1', layers.LSTMLayer),
                ('s1', layers.SliceLayer), ('output', layers.DenseLayer)],
        input_shape=(None, 25, vocab_size),
        r1_num_units=r1_size,
        s1_indices=-1,
        s1_axis=1,
        output_num_units=vocab_size,
        output_nonlinearity=softmax,
        update=nesterov_momentum,
        update_learning_rate=0.1,
        update_momentum=0.9,
        # update=adam,
        # update_learning_rate=0.01,
        max_epochs=10000,
        on_epoch_finished=[SaveBestModel('rnn', vectorizer)],
        batch_iterator_train=BatchIterator(batch_size),
        train_split=TrainSplit(eval_size=0.0),
        regression=False,
        verbose=2)
    return net
Exemple #24
0
def init_nnet(d0,h1,d1,h2,d2,h3,d3,e,l,runtype):
	layers0 = [
			('input', InputLayer),
			('dropout0', DropoutLayer),
			('hidden1', DenseLayer),
			('dropout1', DropoutLayer),
			('hidden2', DenseLayer),
			('dropout2', DropoutLayer),
			('hidden3', DenseLayer),
			('dropout3', DropoutLayer),
			('output', DenseLayer)
			]

	net0 = NeuralNet(
		layers=layers0,
		input_shape=(None, num_features),
		dropout0_p=d0,
		hidden1_num_units=h1,
		hidden1_nonlinearity=tanh,
		dropout1_p=d1,
		hidden2_num_units=h2,
		hidden2_nonlinearity=sigmoid,
		dropout2_p=d2,
		hidden3_num_units=h3,
		hidden3_nonlinearity=sigmoid,
		dropout3_p=d3,
		output_num_units=3,
		output_nonlinearity=softmax,
		update=adagrad,
		update_learning_rate=theano.shared(float32(l)),
		#on_epoch_finished=on_epoch_finished,
		#update_momentum=0.9,
		train_split=TrainSplit(eval_size=0.2),
		max_epochs=e,
		verbose=2)
	return net0
Exemple #25
0
    output_num_units=1,
    output_nonlinearity=sigmoid,
    objective_loss_function=binary_crossentropy,
    update=adam,
    update_learning_rate=theano.shared(float32(0.0003), borrow=True),
    #                 update_momentum=theano.shared(float32(0.001), borrow=True),
    update_beta1=0.9,
    update_beta2=0.99,
    update_epsilon=1e-06,
    on_epoch_finished=[
        #                     AdjustVariable('update_learning_rate', start=0.3, stop=0.05),
        #                     AdjustVariable('update_momentum', start=0.001, stop=0.00299),
        #                     EarlyStopping(patience=200),
    ],
    regression=True,
    train_split=TrainSplit(eval_size=0.00),
    y_tensor_type=T.matrix,
    verbose=1,
    batch_iterator_train=BatchIterator(3200),
    max_epochs=100)

#np.random.seed(7)
#net0_clone = clone(net0)
#net0_clone.fit(t1nn_conc_shared.get_value(), y)
#net0_clone.fit(X_encoded_shared.get_value(), y)

cv_by_hand = [(np.where(cvFolds != fold)[0], np.where(cvFolds == fold)[0])
              for fold in np.unique(cvFolds)]

foldPred = np.zeros((t1nn_conc_shared.get_value().shape[0], 1))
bags = 10
Exemple #26
0
    maxout3_pool_size=2,
    dropout3_p=0.4,
    hidden4_num_units=512,
    hidden4_nonlinearity=very_leaky_rectify,
    output_num_units=2,
    output_nonlinearity=lasagne.nonlinearities.softmax,

    # optimization method:
    update=adagrad,
    #update=nesterov_momentum,
    #update_momentum=theano.shared(np.float32(0.9)),
    update_learning_rate=theano.shared(np.float32(0.013)),
    ###
    regression=False,
    max_epochs=2000,
    train_split=TrainSplit(eval_size=0.1),
    #custom_score=('auc', lambda y_true, y_proba: roc_auc_score(y_true, y_proba[:, 1])),
    on_epoch_finished=[
        AdjustVariable('update_learning_rate', start=0.013, stop=0.001),
        #AdjustVariable('update_momentum', start=0.9, stop=0.999),
        EarlyStopping(patience=60),
    ],
    verbose=1)

clf.fit(X_train, y_train)
from sklearn import metrics
y_pred = clf.predict_proba(X_val0)[:, 1]

score = metrics.roc_auc_score(y_val0, y_pred)
print('score on extra set:%s' % score)
def cascade_model(options):
    """
    3D cascade model using Nolearn and Lasagne
    
    Inputs:
    - model_options:
    - weights_path: path to where weights should be saved

    Output:
    - nets = list of NeuralNets (CNN1, CNN2)
    """

    # model options
    channels = len(options['modalities'])
    train_split_perc = options['train_split']
    num_epochs = options['max_epochs']
    max_epochs_patience = options['patience']

    # save model to disk to re-use it. Create an experiment folder
    # organize experiment
    if not os.path.exists(
            os.path.join(options['weight_paths'], options['experiment'])):
        os.mkdir(os.path.join(options['weight_paths'], options['experiment']))
    if not os.path.exists(
            os.path.join(options['weight_paths'], options['experiment'],
                         'nets')):
        os.mkdir(
            os.path.join(options['weight_paths'], options['experiment'],
                         'nets'))

    # --------------------------------------------------
    # first model
    # --------------------------------------------------

    layer1 = InputLayer(name='in1',
                        shape=(None, channels) + options['patch_size'])
    layer1 = batch_norm(Conv3DLayer(layer1,
                                    name='conv1_1',
                                    num_filters=32,
                                    filter_size=3,
                                    pad='same'),
                        name='BN1')
    layer1 = Pool3DLayer(layer1,
                         name='avgpool_1',
                         mode='max',
                         pool_size=2,
                         stride=2)
    layer1 = batch_norm(Conv3DLayer(layer1,
                                    name='conv2_1',
                                    num_filters=64,
                                    filter_size=3,
                                    pad='same'),
                        name='BN2')
    layer1 = Pool3DLayer(layer1,
                         name='avgpoo2_1',
                         mode='max',
                         pool_size=2,
                         stride=2)
    layer1 = DropoutLayer(layer1, name='l2drop', p=0.5)
    layer1 = DenseLayer(layer1, name='d_1', num_units=256)
    layer1 = DenseLayer(layer1,
                        name='out',
                        num_units=2,
                        nonlinearity=nonlinearities.softmax)

    # save weights
    net_model = 'model_1'
    net_weights = os.path.join(options['weight_paths'], options['experiment'],
                               'nets', net_model + '.pkl')
    net_history = os.path.join(options['weight_paths'], options['experiment'],
                               'nets', net_model + '_history.pkl')

    net1 = NeuralNet(
        layers=layer1,
        objective_loss_function=objectives.categorical_crossentropy,
        batch_iterator_train=Rotate_batch_Iterator(batch_size=128),
        update=updates.adadelta,
        on_epoch_finished=[
            SaveWeights(net_weights, only_best=True, pickle=False),
            SaveTrainingHistory(net_history),
            EarlyStopping(patience=max_epochs_patience)
        ],
        verbose=options['net_verbose'],
        max_epochs=num_epochs,
        train_split=TrainSplit(eval_size=train_split_perc),
    )

    # --------------------------------------------------
    # second model
    # --------------------------------------------------

    layer2 = InputLayer(name='in2',
                        shape=(None, channels) + options['patch_size'])
    layer2 = batch_norm(Conv3DLayer(layer2,
                                    name='conv1_1',
                                    num_filters=32,
                                    filter_size=3,
                                    pad='same'),
                        name='BN1')
    layer2 = Pool3DLayer(layer2,
                         name='avgpool_1',
                         mode='max',
                         pool_size=2,
                         stride=2)
    layer2 = batch_norm(Conv3DLayer(layer2,
                                    name='conv2_1',
                                    num_filters=64,
                                    filter_size=3,
                                    pad='same'),
                        name='BN2')
    layer2 = Pool3DLayer(layer2,
                         name='avgpoo2_1',
                         mode='max',
                         pool_size=2,
                         stride=2)
    layer2 = DropoutLayer(layer2, name='l2drop', p=0.5)
    layer2 = DenseLayer(layer2, name='d_1', num_units=256)
    layer2 = DenseLayer(layer2,
                        name='out',
                        num_units=2,
                        nonlinearity=nonlinearities.softmax)

    # save weights
    net_model = 'model_2'
    net_weights2 = os.path.join(options['weight_paths'], options['experiment'],
                                'nets', net_model + '.pkl')
    net_history2 = os.path.join(options['weight_paths'], options['experiment'],
                                'nets', net_model + '_history.pkl')

    net2 = NeuralNet(
        layers=layer2,
        objective_loss_function=objectives.categorical_crossentropy,
        batch_iterator_train=Rotate_batch_Iterator(batch_size=128),
        update=updates.adadelta,
        on_epoch_finished=[
            SaveWeights(net_weights2, only_best=True, pickle=False),
            SaveTrainingHistory(net_history2),
            EarlyStopping(patience=max_epochs_patience)
        ],
        verbose=options['net_verbose'],
        max_epochs=num_epochs,
        train_split=TrainSplit(eval_size=train_split_perc),
    )

    return [net1, net2]
Exemple #28
0
 def test_eval_size_half(self, TrainSplit, nn):
     X, y = np.random.random((100, 10)), np.repeat([0, 1, 2, 3], 25)
     X_train, X_valid, y_train, y_valid = TrainSplit(0.51)(X, y, nn)
     assert len(X_train) + len(X_valid) == 100
     assert len(y_train) + len(y_valid) == 100
     assert len(X_train) > 45
Exemple #29
0
 def test_reproducable(self, TrainSplit, nn):
     X, y = np.random.random((100, 10)), np.repeat([0, 1, 2, 3], 25)
     X_train1, X_valid1, y_train1, y_valid1 = TrainSplit(0.2)(X, y, nn)
     X_train2, X_valid2, y_train2, y_valid2 = TrainSplit(0.2)(X, y, nn)
     assert np.all(X_train1 == X_train2)
     assert np.all(y_valid1 == y_valid2)
Exemple #30
0
    net0 = NeuralNet(
        layers=layers0,
        input_shape=(None, num_features),
        dense0_num_units=100,
        dropout0_p=0.5,
        dense1_num_units=100,
        output_num_units=num_classes,
        output_nonlinearity=softmax,
        update=nesterov_momentum,
        #update=adam,
        update_learning_rate=0.08,
        update_momentum=0.2,
        #objective_loss_function=squared_error,
        #objective_loss_function = binary_crossentropy,
        train_split=TrainSplit(0.1),
        verbose=1,
        max_epochs=15)

    dfrange = range(0, 15)
    shuffle(dfrange)
    X, y, encoder, scaler = load_train_data(datapath + "train_fixed_data.csv",
                                            0)
    print("Fitting Sample 0")
    net0.fit(X, y)

    for i in range(1, 14):
        print("Loading Sample " + str(i))
        X, y, encoder, scaler1 = load_train_data(
            datapath + "train_fixed_data.csv", i)
        print("Fitting Sample " + str(i))