def lasagne_oneLayer_classifier(param, X, labels):

	## initialize the NN
	layers0 = [('input', InputLayer),
           	('dense0', DenseLayer),
           	('dropout', DropoutLayer),
           	('output', DenseLayer)]


	net0 = NeuralNet(layers=layers0,

                 	input_shape=(None, param['num_features']),
                 	dense0_num_units=param['dense0_num_units'],
                 	dropout_p=param['dropout_p'],
                 	output_num_units=param['num_classes'],
                 	output_nonlinearity=softmax,
                 
                 	update=nesterov_momentum,
                 	update_learning_rate=param['update_learning_rate'],
                 	update_momentum=param['update_momentum'],
                 
                 	eval_size=0.02,
                 	verbose=1,
                 	max_epochs=param['max_epochs'])

	## fit the results
	net0.fit(X, labels)
	
	return net0
def fit_nn_and_predict_probas(features, dv, features_t):
	bwh = BestWeightsHolder()
	tvs = TrainValidSplitter(standardize=True,few=True)

	layers = [('input', InputLayer),
		   ('dense0', DenseLayer),
		   ('dropout0', DropoutLayer),
		   ('dense1', DenseLayer),
		   ('dropout1', DropoutLayer),
		   ('output', DenseLayer)]

	net = NeuralNet(layers=layers,
			input_shape=(None, features.shape[1]),
			dense0_num_units=512,
			dropout0_p=0.4,
			dense1_num_units=256,
			dropout1_p=0.4,
			output_num_units=38,
			output_nonlinearity=softmax,
			update=adagrad,
			update_learning_rate=0.02,
			train_split=tvs,
			verbose=1,
			max_epochs=40,
			on_epoch_finished=[bwh.hold_best_weights])

	holder = net.fit(features, dv)
	holder.load_params_from(bwh.best_weights)
	return holder.predict_proba(np.hstack((tvs.standa.transform(features_t[:,:23]), features_t[:,23:])))
def fit_model(train_x, y, test_x):
    """Feed forward neural network for kaggle digit recognizer competition.
    Intentionally limit network size and optimization time (by choosing max_epochs = 15) to meet runtime restrictions
    """
    print("\n\nRunning Convetional Net.  Optimization progress below\n\n")
    net1 = NeuralNet(
        layers=[  #list the layers here
            ('input', layers.InputLayer),
            ('hidden1', layers.DenseLayer),
            ('output', layers.DenseLayer),
            ],

        # layer parameters:
        input_shape=(None, train_x.shape[1]),
        hidden1_num_units=200, hidden1_nonlinearity=rectify,  #params of first layer
        output_nonlinearity=softmax,  # softmax for classification problems
        output_num_units=10,  # 10 target values

        # optimization method:
        update=nesterov_momentum,
        update_learning_rate=0.05,
        update_momentum=0.7,

        regression=False,
        max_epochs=10,  # Intentionally limited for execution speed
        verbose=1,
        )

    net1.fit(train_x, y)
    predictions = net1.predict(test_x)
    return(predictions)
Example #4
0
    def test_diamond(self, NeuralNet):
        input, hidden1, hidden2, concat, output = (
            Mock(), Mock(), Mock(), Mock(), Mock())
        nn = NeuralNet(
            layers=[
                ('input', input),
                ('hidden1', hidden1),
                ('hidden2', hidden2),
                ('concat', concat),
                ('output', output),
                ],
            input_shape=(10, 10),
            hidden2_incoming='input',
            concat_incoming=['hidden1', 'hidden2'],
            )
        nn.initialize_layers(nn.layers)

        input.assert_called_with(name='input', shape=(10, 10))
        hidden1.assert_called_with(incoming=input.return_value, name='hidden1')
        hidden2.assert_called_with(incoming=input.return_value, name='hidden2')
        concat.assert_called_with(
            incoming=[hidden1.return_value, hidden2.return_value],
            name='concat'
            )
        output.assert_called_with(incoming=concat.return_value, name='output')
def train():
    weather = load_weather()
    training = load_training()
    
    X = assemble_X(training, weather)
    print len(X[0])
    mean, std = normalize(X)
    y = assemble_y(training)
        
    input_size = len(X[0])
    
    learning_rate = theano.shared(np.float32(0.1))
    
    net = NeuralNet(
    layers=[  
        ('input', InputLayer),
         ('hidden1', DenseLayer),
        ('dropout1', DropoutLayer),
        ('hidden2', DenseLayer),
        ('dropout2', DropoutLayer),
        ('output', DenseLayer),
        ],
    # layer parameters:
    input_shape=(None, input_size), 
    hidden1_num_units=325, 
    dropout1_p=0.4,
    hidden2_num_units=325, 
    dropout2_p=0.4,
    output_nonlinearity=sigmoid, 
    output_num_units=1, 

    # optimization method:
    update=nesterov_momentum,
    update_learning_rate=learning_rate,
    update_momentum=0.9,
    
    # Decay the learning rate
    on_epoch_finished=[
            AdjustVariable(learning_rate, target=0, half_life=1),
            ],

    # This is silly, but we don't want a stratified K-Fold here
    # To compensate we need to pass in the y_tensor_type and the loss.
    regression=True,
    y_tensor_type = T.imatrix,
    objective_loss_function = binary_crossentropy,
     
    max_epochs=85, 
    eval_size=0.1,
    verbose=1,
    )

    X, y = shuffle(X, y, random_state=123)
    net.fit(X, y)
    
    _, X_valid, _, y_valid = net.train_test_split(X, y, net.eval_size)
    probas = net.predict_proba(X_valid)[:,0]
    print("ROC score", metrics.roc_auc_score(y_valid, probas))

    return net, mean, std     
Example #6
0
    def test_initialization(self, NeuralNet):
        input, hidden1, hidden2, output = Mock(), Mock(), Mock(), Mock()
        nn = NeuralNet(
            layers=[
                (input, {'shape': (10, 10), 'name': 'input'}),
                (hidden1, {'some': 'param', 'another': 'param'}),
                (hidden2, {}),
                (output, {'name': 'output'}),
                ],
            input_shape=(10, 10),
            mock1_some='iwin',
            )
        out = nn.initialize_layers(nn.layers)

        input.assert_called_with(
            name='input', shape=(10, 10))
        nn.layers_['input'] is input.return_value

        hidden1.assert_called_with(
            incoming=input.return_value, name='mock1',
            some='iwin', another='param')
        nn.layers_['mock1'] is hidden1.return_value

        hidden2.assert_called_with(
            incoming=hidden1.return_value, name='mock2')
        nn.layers_['mock2'] is hidden2.return_value

        output.assert_called_with(
            incoming=hidden2.return_value, name='output')

        assert out is nn.layers_['output']
Example #7
0
 def test_initialization_with_layer_instance(self, NeuralNet):
     layer1 = InputLayer(shape=(128, 13))  # name will be assigned
     layer2 = DenseLayer(layer1, name='output', num_units=2)  # has name
     nn = NeuralNet(layers=layer2)
     out = nn.initialize_layers()
     assert nn.layers_['output'] == layer2 == out[0]
     assert nn.layers_['input0'] == layer1
Example #8
0
File: cw.py Project: YilinGUO/NLP
def train(x_train, y_train):
	clf_nn = NeuralNet(
		layers=[  # three layers: one hidden layer
			('input', layers.InputLayer),
			('hidden1', layers.DenseLayer),
			('hidden2', layers.DenseLayer),
			('output', layers.DenseLayer),
			],
		# layer parameters:
		input_shape=(None, 2538),  # 784 input pixels per batch
		hidden1_num_units=100,  # number of units in hidden layer
		hidden2_num_units=100,
		output_nonlinearity=nonlinearities.softmax,  # output layer uses identity function
		output_num_units=10,  # 10 target values

		# optimization method:
		update=nesterov_momentum,
		update_learning_rate=0.01,
		update_momentum=0.9,
		
		max_epochs=50,  # we want to train this many epochs
		verbose=1,
		)
	clf_nn.fit(x_train, y_train)
	return clf_nn
Example #9
0
    def test_initialization_legacy(self, NeuralNet):
        input, hidden1, hidden2, output = Mock(), Mock(), Mock(), Mock()
        nn = NeuralNet(
            layers=[
                ('input', input),
                ('hidden1', hidden1),
                ('hidden2', hidden2),
                ('output', output),
                ],
            input_shape=(10, 10),
            hidden1_some='param',
            )
        out = nn.initialize_layers(nn.layers)

        input.assert_called_with(
            name='input', shape=(10, 10))
        nn.layers_['input'] is input.return_value

        hidden1.assert_called_with(
            incoming=input.return_value, name='hidden1', some='param')
        nn.layers_['hidden1'] is hidden1.return_value

        hidden2.assert_called_with(
            incoming=hidden1.return_value, name='hidden2')
        nn.layers_['hidden2'] is hidden2.return_value

        output.assert_called_with(
            incoming=hidden2.return_value, name='output')

        assert out is nn.layers_['output']
Example #10
0
    def test_initialization_with_tuples(self, NeuralNet):
        input = Mock(__name__='InputLayer', __bases__=(InputLayer,))
        hidden1, hidden2, output = [
            Mock(__name__='MockLayer', __bases__=(Layer,)) for i in range(3)]
        nn = NeuralNet(
            layers=[
                (input, {'shape': (10, 10), 'name': 'input'}),
                (hidden1, {'some': 'param', 'another': 'param'}),
                (hidden2, {}),
                (output, {'name': 'output'}),
                ],
            input_shape=(10, 10),
            mock1_some='iwin',
            )
        out = nn.initialize_layers(nn.layers)

        input.assert_called_with(
            name='input', shape=(10, 10))
        assert nn.layers_['input'] is input.return_value

        hidden1.assert_called_with(
            incoming=input.return_value, name='mock1',
            some='iwin', another='param')
        assert nn.layers_['mock1'] is hidden1.return_value

        hidden2.assert_called_with(
            incoming=hidden1.return_value, name='mock2')
        assert nn.layers_['mock2'] is hidden2.return_value

        output.assert_called_with(
            incoming=hidden2.return_value, name='output')

        assert out[0] is nn.layers_['output']
Example #11
0
def test_clone():
    from nolearn.lasagne import NeuralNet
    from nolearn.lasagne import BatchIterator
    from nolearn.lasagne import objective

    params = dict(
        layers=[
            ('input', InputLayer),
            ('hidden', DenseLayer),
            ('output', DenseLayer),
            ],
        input_shape=(100, 784),
        output_num_units=10,
        output_nonlinearity=softmax,

        more_params={
            'hidden_num_units': 100,
            },
        update=nesterov_momentum,
        update_learning_rate=0.01,
        update_momentum=0.9,

        regression=False,
        objective=objective,
        objective_loss_function=categorical_crossentropy,
        batch_iterator_train=BatchIterator(batch_size=100),
        y_tensor_type=T.ivector,
        use_label_encoder=False,
        on_epoch_finished=None,
        on_training_finished=None,
        max_epochs=100,
        eval_size=0.1,  # BBB
        check_input=True,
        verbose=0,
        )
    nn = NeuralNet(**params)

    nn2 = clone(nn)
    params1 = nn.get_params()
    params2 = nn2.get_params()

    for ignore in (
        'batch_iterator_train',
        'batch_iterator_test',
        'output_nonlinearity',
        'loss',
        'objective',
        'train_split',
        'eval_size',
        'X_tensor_type',
        'on_epoch_finished',
        'on_batch_finished',
        'on_training_started',
        'on_training_finished',
        'custom_scores',
            ):
        for par in (params, params1, params2):
            par.pop(ignore, None)

    assert params == params1 == params2
Example #12
0
def train_net(X, y):
    net2 = NeuralNet(
    layers=[
        ('input', layers.InputLayer),
        ('ncaa', NCAALayer),
        ('dropout1', layers.DropoutLayer),
        ('hidden', layers.DenseLayer),
        ('dropout2', layers.DropoutLayer),
        ('output', layers.DenseLayer),
        ],
    input_shape = (None, num_features * 2),
    ncaa_num_units = 128,
    dropout1_p=0.2,
    hidden_num_units=128,
    dropout2_p=0.3,
    output_nonlinearity=nonlinearities.sigmoid,
    output_num_units=1,

    update=nesterov_momentum,
    update_learning_rate=theano.shared(float32(0.01)),
    update_momentum=theano.shared(float32(0.9)),

    regression=True,  # flag to indicate we're dealing with regression problem
    max_epochs=20,  # we want to train this many epochs
    verbose=1,
    )

    net2.fit(X, y)
    return net2
def fit(xTrain, yTrain, dense0_num=800, dropout_p=0.5, dense1_num=500, update_learning_rate=0.01,
        update_momentum=0.9, test_ratio=0.2, max_epochs=20):
        #update_momentum=0.9, test_ratio=0.2, max_epochs=20, train_fname='train.csv'):
    #xTrain, yTrain, encoder, scaler = load_train_data(train_fname)
    #xTest, ids = load_test_data('test.csv', scaler)

    num_features = len(xTrain[0,:])
    num_classes = 9
    print num_features

    layers0 = [('input', InputLayer),
           ('dense0', DenseLayer),
           ('dropout', DropoutLayer),
           ('dense1', DenseLayer),
           ('output', DenseLayer)]

    clf = NeuralNet(layers=layers0,
                 input_shape=(None, num_features),
                 dense0_num_units=dense0_num,
                 dropout_p=dropout_p,
                 dense1_num_units=dense1_num,
                 output_num_units=num_classes,
                 output_nonlinearity=softmax,
                 update=nesterov_momentum,
                 update_learning_rate=update_learning_rate,
                 update_momentum=update_momentum,
                 eval_size=test_ratio,
                 verbose=1,
                 max_epochs=max_epochs)

    clf.fit(xTrain, yTrain)
    ll_train = metrics.log_loss(yTrain, clf.predict_proba(xTrain))
    print ll_train

    return clf
Example #14
0
def nn_example(data):
    net1 = NeuralNet(
        layers=[('input', layers.InputLayer),
                ('hidden', layers.DenseLayer),
                ('output', layers.DenseLayer),
                ],
        # layer parameters:
        input_shape=(None, 28*28),
        hidden_num_units=100,  # number of units in 'hidden' layer
        output_nonlinearity=lasagne.nonlinearities.softmax,
        output_num_units=10,  # 10 target values for the digits 0, 1, 2, ..., 9

        # optimization method:
        update=nesterov_momentum,
        update_learning_rate=0.01,
        update_momentum=0.9,

        max_epochs=10,
        verbose=1,
        )

    # Train the network
    net1.fit(data['X_train'], data['y_train'])

    # Try the network on new data
    print("Feature vector (100-110): %s" % data['X_test'][0][100:110])
    print("Label: %s" % str(data['y_test'][0]))
    print("Predicted: %s" % str(net1.predict([data['X_test'][0]])))
 def fit(self,tr,add_feat_tr):
      ## if trend exists, remove trend
      if self.trend ==1:
          trend = self.est_trend(tr)
          tr = tr-np.asarray(trend)
      layers0=[
           ## 2 layers with one hidden layer
           (InputLayer, {'shape': (None,8,self.window_length)}),
           (DenseLayer, {'num_units': 8*self.window_length}),
           (DropoutLayer, {'p':0.3}),
           (DenseLayer, {'num_units': 8*self.window_length/3}),
           ## the output layer
           (DenseLayer, {'num_units': 1, 'nonlinearity': None}),
      ]
      feats = build_feat(tr, add_feat_tr, window_length=self.window_length)
      print feats.shape
      feat_target = get_target(tr,window_length=self.window_length)
      print feat_target.shape
      net0 = NeuralNet(
           layers=layers0,
           max_epochs=400,
           update=nesterov_momentum,
           update_learning_rate=0.01,
           update_momentum=0.9,
           verbose=1,
           regression=True,
      )
      net0.fit(feats[:-1],feat_target)
      return net0,feats,feat_target
Example #16
0
def createNet(X, Y, ln, loadFile = ""):
    net1 = NeuralNet(
        layers=[  # four layers: two hidden layers
            ('input', layers.InputLayer),
            ('hidden', layers.DenseLayer),
            ('hidden1', layers.DenseLayer),
            ('hidden2', layers.DenseLayer),
            ('hidden3', layers.DenseLayer),
            ('output', layers.DenseLayer),
            ],
        # layer parameters: Best 400 400
        input_shape=(None, numInputs),  # 31 inputs
        hidden_num_units=400,  # number of units in hidden layer
        hidden1_num_units=400,
        hidden2_num_units=400,
        hidden3_num_units=400,
        output_nonlinearity=None,  # output layer uses identity function
        output_num_units=numOutputs,  # 4 outputs
    
        # optimization method:
        update=nesterov_momentum,
        update_learning_rate=ln,
        update_momentum=0.9,
    
        regression=True,  # flag to indicate we're dealing with regression problem
        max_epochs=1500,  # we want to train this many epochs
        verbose=1,
        )
    #if (loadFile != ""):
        #net1.load_params_from(loadFile)
    net1.max_epochs = 50
    net1.update_learning_rate = ln;

    return net1
Example #17
0
def loadNet(netName):
    if os.path.exists(netName):
        net = pickle.load(open(netName, "rb"))
    else:
        net = NeuralNet(
            layers=[  # three layers: one hidden layer
                      ('input', layers.InputLayer),
                      ('hidden', layers.DenseLayer),
                      ('output', layers.DenseLayer),
                      ],
            # layer parameters:
            input_shape=(None, 9216),  # 96x96 input pixels per batch
            hidden_num_units=100,  # number of units in hidden layer
            output_nonlinearity=None,  # output layer uses identity function
            output_num_units=30,  # 30 target values

            # optimization method:
            update=nesterov_momentum,
            update_learning_rate=0.01,
            update_momentum=0.9,

            regression=True,  # flag to indicate we're dealing with regression problem
            max_epochs=400,  # we want to train this many epochs
            verbose=1,
        )

        X, y = load()
        net.fit(X, y)

        print("X.shape == {}; X.min == {:.3f}; X.max == {:.3f}".format(X.shape, X.min(), X.max()))
        print("y.shape == {}; y.min == {:.3f}; y.max == {:.3f}".format(y.shape, y.min(), y.max()))

        pickle.dump(net, open(netName, 'wb'), -1)

    return net
Example #18
0
    def net_fitted(self, NeuralNet, X_train, y_train):
        nn = NeuralNet(
            layers=[
                ('input', InputLayer),
                ('conv1', Conv2DLayer),
                ('conv2', Conv2DLayer),
                ('pool2', MaxPool2DLayer),
                ('output', DenseLayer),
                ],
            input_shape=(None, 1, 28, 28),
            output_num_units=10,
            output_nonlinearity=softmax,

            more_params=dict(
                conv1_filter_size=(5, 5), conv1_num_filters=16,
                conv2_filter_size=(3, 3), conv2_num_filters=16,
                pool2_pool_size=(8, 8),
                hidden1_num_units=16,
                ),

            update=nesterov_momentum,
            update_learning_rate=0.01,
            update_momentum=0.9,

            max_epochs=3,
            )

        return nn.fit(X_train, y_train)
Example #19
0
File: net6.py Project: hustmonk/k21
    def train(self, X, y_train, X_test, ids_test, y_test, outfile, is_valid):
        X = np.array(X)
        encoder = LabelEncoder()
        y = encoder.fit_transform(y_train).astype(np.int32)
        num_classes = len(encoder.classes_)
        num_features = X.shape[1]

        layers0 = [('input', InputLayer),
           ('dense1', DenseLayer),
           ('dropout1', DropoutLayer),
           ('dense2', DenseLayer),
           ('dropout2', DropoutLayer),
           ('output', DenseLayer)]

        net0 = NeuralNet(layers=layers0,
                 input_shape=(None, num_features),
                 dense1_num_units=3500,
                 dropout1_p=0.4,
                 dense2_num_units=2300,
                 dropout2_p=0.5,
                 output_num_units=num_classes,
                 output_nonlinearity=softmax,
                 #update=nesterov_momentum,
                 update=adagrad,
                 update_learning_rate=0.01,
                 #update_momentum=0.9,
                 #objective_loss_function=softmax,
                 objective_loss_function=categorical_crossentropy,
                 eval_size=0.2,
                 verbose=1,
                 max_epochs=20)
        net0.fit(X, y)
        X_test = np.array(X_test)
        self.make_submission(net0, X_test, ids_test, encoder)
Example #20
0
    def test_initialization_with_tuples(self, NeuralNet):
        input = Mock(__name__="InputLayer", __bases__=(InputLayer,))
        hidden1, hidden2, output = [Mock(__name__="MockLayer", __bases__=(Layer,)) for i in range(3)]
        nn = NeuralNet(
            layers=[
                (input, {"shape": (10, 10), "name": "input"}),
                (hidden1, {"some": "param", "another": "param"}),
                (hidden2, {}),
                (output, {"name": "output"}),
            ],
            input_shape=(10, 10),
            mock1_some="iwin",
        )
        out = nn.initialize_layers(nn.layers)

        input.assert_called_with(name="input", shape=(10, 10))
        assert nn.layers_["input"] is input.return_value

        hidden1.assert_called_with(incoming=input.return_value, name="mock1", some="iwin", another="param")
        assert nn.layers_["mock1"] is hidden1.return_value

        hidden2.assert_called_with(incoming=hidden1.return_value, name="mock2")
        assert nn.layers_["mock2"] is hidden2.return_value

        output.assert_called_with(incoming=hidden2.return_value, name="output")

        assert out is nn.layers_["output"]
Example #21
0
    def test_diamond(self, NeuralNet):
        input = Mock(__name__='InputLayer', __bases__=(InputLayer,))
        hidden1, hidden2, concat, output = [
            Mock(__name__='MockLayer', __bases__=(Layer,)) for i in range(4)]
        nn = NeuralNet(
            layers=[
                ('input', input),
                ('hidden1', hidden1),
                ('hidden2', hidden2),
                ('concat', concat),
                ('output', output),
                ],
            input_shape=(10, 10),
            hidden2_incoming='input',
            concat_incomings=['hidden1', 'hidden2'],
            )
        nn.initialize_layers(nn.layers)

        input.assert_called_with(name='input', shape=(10, 10))
        hidden1.assert_called_with(incoming=input.return_value, name='hidden1')
        hidden2.assert_called_with(incoming=input.return_value, name='hidden2')
        concat.assert_called_with(
            incomings=[hidden1.return_value, hidden2.return_value],
            name='concat'
            )
        output.assert_called_with(incoming=concat.return_value, name='output')
Example #22
0
def CompileNetwork(l_out, epochs, update, update_learning_rate, objective_l2,
				   earlystopping, patience, batch_size, verbose):
	
    update_fn = getattr(updates, update)
    earlystop = EarlyStopping(patience=patience, verbose=verbose)

    net = NeuralNet(
        l_out,
        max_epochs=epochs,
         
        update=update_fn,
        
        objective_l2=objective_l2,
        
        batch_iterator_train = BatchIterator(batch_size=batch_size),
        batch_iterator_test = BatchIterator(batch_size=batch_size),    
        verbose=verbose,
        on_training_finished = [earlystop.load_best_weights]
    )
    
    if earlystopping == True: 
        net.on_epoch_finished.append(earlystop)
    if update_learning_rate is not None: 
        net.update_learning_rate=update_learning_rate
    

    return net
Example #23
0
 def classif_no_valid(self, NeuralNet, X, y):
     from nolearn.lasagne import TrainSplit
     l = InputLayer(shape=(None, X.shape[1]))
     l = DenseLayer(l, num_units=len(np.unique(y)), nonlinearity=softmax)
     net = NeuralNet(
         l, update_learning_rate=0.01, train_split=TrainSplit(0))
     return net.fit(X, y)
def NN(X,y):

	net1 = NeuralNet(
	    layers=[  # three layers: one hidden layer
	        ('input', layers.InputLayer),
	        ('hidden', layers.DenseLayer),
	        ('output', layers.DenseLayer),
	        ],
	    # layer parameters:
	    input_shape=(None, 9216),  # 96x96 input pixels per batch
	    hidden_num_units=100,  # number of units in hidden layer
	    output_nonlinearity=None,  # output layer uses identity function
	    output_num_units=30,  # 30 target values

	    # optimization method:
	    update=nesterov_momentum,
	    update_learning_rate=0.01,
	    update_momentum=0.9,

	    regression=True,  # flag to indicate we're dealing with regression problem
	    max_epochs=400,  # we want to train this many epochs
	    verbose=1,
	    )

	net1.fit(X, y)
Example #25
0
def neural_network(x_train, y_train):
    X, y, encoder, scaler = load_train_data(x_train, y_train)
    num_classes = len(encoder.classes_)
    num_features = X.shape[1]
    layers0 = [
        ("input", InputLayer),
        ("dropoutf", DropoutLayer),
        ("dense0", DenseLayer),
        ("dropout", DropoutLayer),
        ("dense1", DenseLayer),
        ("dropout2", DropoutLayer),
        ("output", DenseLayer),
    ]
    net0 = NeuralNet(
        layers=layers0,
        input_shape=(None, num_features),
        dropoutf_p=0.15,
        dense0_num_units=1000,
        dropout_p=0.25,
        dense1_num_units=500,
        dropout2_p=0.25,
        output_num_units=num_classes,
        output_nonlinearity=softmax,
        update=adagrad,
        update_learning_rate=0.005,
        eval_size=0.01,
        verbose=1,
        max_epochs=30,
    )
    net0.fit(X, y)
    return (net0, scaler)
Example #26
0
def train_network():
    layers0 = [('input', InputLayer),
               ('dense0', DenseLayer),
               ('dropout0', DropoutLayer),
               ('dense1', DenseLayer),
               ('dropout1', DropoutLayer),
               ('dense2', DenseLayer),
               ('output', DenseLayer)]

    es = EarlyStopping(patience=200)
    net0 = NeuralNet(layers=layers0,
        input_shape=(None, num_features),
        dense0_num_units=256,
        dropout0_p=0.5,
        dense1_num_units=128,
        dropout1_p=0.5,
        dense2_num_units=64,
        output_num_units=num_classes,
        output_nonlinearity=softmax,

        update=nesterov_momentum,
        update_learning_rate=theano.shared(float32(0.01)),
        update_momentum=theano.shared(float32(0.9)),

        eval_size=0.2,
        verbose=1,
        max_epochs=1000,
        on_epoch_finished=[
            AdjustVariable('update_learning_rate', start=0.01, stop=0.0001),
            AdjustVariable('update_momentum', start=0.9, stop=0.999),
            es
            ])

    net0.fit(X, y)
    return (es.best_valid, net0)
Example #27
0
    def test_initialization_legacy(self, NeuralNet):
        input = Mock(__name__='InputLayer', __bases__=(InputLayer,))
        hidden1, hidden2, output = [
            Mock(__name__='MockLayer', __bases__=(Layer,)) for i in range(3)]
        nn = NeuralNet(
            layers=[
                ('input', input),
                ('hidden1', hidden1),
                ('hidden2', hidden2),
                ('output', output),
                ],
            input_shape=(10, 10),
            hidden1_some='param',
            )
        out = nn.initialize_layers(nn.layers)

        input.assert_called_with(
            name='input', shape=(10, 10))
        assert nn.layers_['input'] is input.return_value

        hidden1.assert_called_with(
            incoming=input.return_value, name='hidden1', some='param')
        assert nn.layers_['hidden1'] is hidden1.return_value

        hidden2.assert_called_with(
            incoming=hidden1.return_value, name='hidden2')
        assert nn.layers_['hidden2'] is hidden2.return_value

        output.assert_called_with(
            incoming=hidden2.return_value, name='output')

        assert out[0] is nn.layers_['output']
    def _create_nnet(self, input_dims, output_dims, learning_rate, num_hidden_units=15, batch_size=32, max_train_epochs=1,
                     hidden_nonlinearity=nonlinearities.rectify, output_nonlinearity=None, update_method=updates.sgd):
        """
        A subclass may override this if a different sort
        of network is desired.
        """
        nnlayers = [('input', layers.InputLayer), ('hidden', layers.DenseLayer), ('output', layers.DenseLayer)]
        nnet = NeuralNet(layers=nnlayers,

                           # layer parameters:
                           input_shape=(None, input_dims),
                           hidden_num_units=num_hidden_units,
                           hidden_nonlinearity=hidden_nonlinearity,
                           output_nonlinearity=output_nonlinearity,
                           output_num_units=output_dims,

                           # optimization method:
                           update=update_method,
                           update_learning_rate=learning_rate,

                           regression=True,  # flag to indicate we're dealing with regression problem
                           max_epochs=max_train_epochs,
                           batch_iterator_train=BatchIterator(batch_size=batch_size),
                           train_split=nolearn.lasagne.TrainSplit(eval_size=0),
                           verbose=0,
                         )
        nnet.initialize()
        return nnet
Example #29
0
def build_mlp(input_var=None):
	net1 = NeuralNet(
	layers=[  # three layers: one hidden layer
			('input', layers.InputLayer),
			('hidden1', layers.DenseLayer),
			('hidden2', layers.DenseLayer),
			('output', layers.DenseLayer),
		],
	# layer parameters:
	input_shape=(None, 14, 2177),  #  14 x 2177 input pixels per batch
	hidden1_num_units=100,  # number of units in hidden layer
	hidden2_num_units=100,
	output_nonlinearity=lasagne.nonlinearities.softmax,  # output layer uses identity function
	output_num_units=2,  # 2 target values

	# optimization method:
	update=nesterov_momentum,
	update_learning_rate=0.01,
	update_momentum=0.9,

	#regression=False,  # flag to indicate we're dealing with regression problem
	max_epochs=500,  # we want to train this many epochs
	verbose=1,
	)

	X, y = load_dataset()
	y = np.asanyarray(y,np.int32)
	print(X.shape)
	print(y.shape)
	net1.fit(X, y)
class NN(object):
    
    def __init__(self, input_size, hidden_1_size, hidden_2_size=None):
        n_layers = [
            ('input', layers.InputLayer),
            ('hidden1', layers.DenseLayer),
            ('dropout1', layers.DropoutLayer)
        ]
        if hidden_2_size is not None:
            n_layers.extend(
                [('hidden2', layers.DenseLayer), ('dropout2', layers.DropoutLayer)]
            )
        n_layers.append(('output', layers.DenseLayer))
        
        self.model = NeuralNet(
            layers=n_layers,
            input_shape=(None, input_size),
            hidden1_num_units=hidden_1_size, dropout1_p=0.5,
    
            output_nonlinearity=tanh,
            output_num_units=1,
            regression=True,

            update=nesterov_momentum,
            update_learning_rate=0.01,
            update_momentum=0.9,
    
            eval_size=0.1,
            on_epoch_finished=[
                AdjustVariable('update_learning_rate', stop=0.0001, decrement=0.00001),
                AdjustVariable('update_momentum',      stop=0.999,  increment=0.0001),
                EarlyStopping(patience=100)
            ],
            
            max_epochs=5000,
            verbose=1
        )
        if hidden_2_size is not None:
            self.model.__dict__['hidden2_num_units'] = hidden_2_size
            self.model.__dict__['dropout2_p'] = 0.5            
    
    def train(self, X, Y):
        self.model.fit(np.asarray(X, dtype=np.float32), np.asarray(Y, dtype=np.float32))
    
    def predict_continuous(self, X_test):
        return self.model.predict(np.asarray(X_test, dtype=np.float32))
    
    def predict_classes(self, X_test):
        Y_pred = self.predict_continuous(X_test)
        
        # threshold the continuous values to get the classes
        pos = Y_pred >= .33
        neg = Y_pred <= -0.33
        neu = np.logical_and(Y_pred < 0.33, Y_pred > -0.33)
        Y_pred[pos] = 1
        Y_pred[neg] = -1
        Y_pred[neu] = 0
        
        return Y_pred.reshape(-1)
Example #31
0
num_classes = len(encoder.classes_)
num_features = X.shape[1]

layers0 = [('input', InputLayer),
           ('dense0', DenseLayer),
           ('dropout', DropoutLayer),
           ('dense1', DenseLayer),
           ('output', DenseLayer)]

net0 = NeuralNet(layers=layers0,
                 
                 input_shape=(None, num_features),
                 dense0_num_units=200,
                 dropout_p=0.5,
                 dense1_num_units=200,
                 output_num_units=num_classes,
                 output_nonlinearity=softmax,
                 
                 update=nesterov_momentum,
                 update_learning_rate=0.01,
                 update_momentum=0.9,
                 
                 eval_size=0.2,
                 verbose=1,
                 max_epochs=20)

net0.fit(X, y)

make_submission(net0, X_test, ids, encoder)

Example #32
0
def main(input_file, model_path):

    batch_size = 128
    nb_classes = 62  # A-Z, a-z and 0-9
    nb_epoch = 2

    # Input image dimensions
    img_rows, img_cols = 32, 32

    # Path of data files
    path = input_file

    ### PREDICTION ###

    # Load the model with the highest validation accuracy
    # model.load_weights("best.kerasModelWeights")

    # Load Kaggle test set
    X_test = np.load(path + "/testPreproc_" + str(img_rows) + "_" +
                     str(img_cols) + ".npy")

    print X_test.shape

    # Load the preprocessed data and labels
    X_train_all = np.load(path + "/trainPreproc_" + str(img_rows) + "_" +
                          str(img_cols) + ".npy")
    Y_train_all = np.load(path + "/labelsPreproc.npy")

    X_train, X_val, Y_train, Y_val = \
        train_test_split(X_train_all, Y_train_all, test_size=0.25, stratify=np.argmax(Y_train_all, axis=1))

    print X_train.shape

    Y_val = convert_(Y_val)

    X_train = X_train.reshape((-1, 1, 32, 32))
    #
    # # input shape for neural network

    # labels = labels.astype(np.uint8)

    X_val = X_val.reshape((-1, 1, 32, 32))
    #
    # # input shape for neural network

    Y_val = Y_val.astype(np.uint8)
    #
    input_image_vector_shape = (None, 1, 32, 32)

    net1 = NeuralNet(
        layers=[
            ('input', layers.InputLayer),
            ('conv2d1', layers.Conv2DLayer),
            ('maxpool1', layers.MaxPool2DLayer),
            ('conv2d2', layers.Conv2DLayer),
            ('maxpool2', layers.MaxPool2DLayer),
            ('conv2d3', layers.Conv2DLayer),
            ('maxpool3', layers.MaxPool2DLayer),
            # ('conv2d4', layers.Conv2DLayer),
            # ('maxpool4', layers.MaxPool2DLayer),
            ('dropout1', layers.DropoutLayer),
            ('dropout2', layers.DropoutLayer),
            ('dense', layers.DenseLayer),
            # ('dense2', layers.DenseLayer),
            ('output', layers.DenseLayer),
        ],
        input_shape=input_image_vector_shape,
        conv2d1_num_filters=128,
        conv2d1_filter_size=(3, 3),
        conv2d1_nonlinearity=lasagne.nonlinearities.tanh,
        conv2d1_W=lasagne.init.GlorotUniform(),
        conv2d1_pad=(2, 2),
        maxpool1_pool_size=(2, 2),
        conv2d2_num_filters=256,
        conv2d2_filter_size=(3, 3),
        conv2d2_nonlinearity=lasagne.nonlinearities.rectify,
        conv2d2_pad=(2, 2),
        maxpool2_pool_size=(2, 2),
        conv2d3_num_filters=512,
        conv2d3_filter_size=(3, 3),
        conv2d3_nonlinearity=lasagne.nonlinearities.rectify,
        conv2d3_pad=(2, 2),
        maxpool3_pool_size=(2, 2),
        dropout1_p=0.5,
        dropout2_p=0.5,
        dense_num_units=8192,
        dense_nonlinearity=lasagne.nonlinearities.rectify,

        # dense2_num_units = 16,
        # dense2_nonlinearity = lasagne.nonlinearities.rectify,
        output_nonlinearity=lasagne.nonlinearities.softmax,
        output_num_units=62,
        update=momentum,
        # 75.5 with tanh init dense num = 256%
        update_learning_rate=0.03,
        update_momentum=0.8,
        max_epochs=1000,
        verbose=1,
    )
    print "Loading Neural Net Parameters..."
    net1.initialize_layers()
    net1.load_weights_from('{}_weightfile.w'.format(model_path))
    net1.load_params_from('{}_paramfile.w'.format(model_path))

    from sklearn.metrics import classification_report, accuracy_score, confusion_matrix

    print 'Testing...'
    y_true, y_pred = Y_val, net1.predict(X_val)  # Get our predictions
    print(classification_report(y_true,
                                y_pred))  # Classification on each digit

    print net1.predict(X_val)
    print Y_val
    a = confusion_matrix(Y_val, net1.predict(X_val))
    b = np.trace(a)
    print 'Training Accuracy: ' + str(float(b) / float(np.sum(a)))
Example #33
0
    (DenseLayer, {
        'num_units': 128
    }),

    # the output layer
    (DenseLayer, {
        'num_units': 10,
        'nonlinearity': softmax
    }),
]

net0 = NeuralNet(
    layers=layers0,
    max_epochs=50,
    update=adam,
    update_learning_rate=0.0002,
    #     update_momentum=0.9,
    objective_l2=0.0025,
    train_split=TrainSplit(eval_size=0.1),
    verbose=2,
)

# net0.initialize()
# layer_info = PrintLayerInfo()
# layer_info(net0)
net0.fit(X_train, y_train)
result = net0.predict(X_test)
np.save('cnn_res.npy', result)

#fName = "./features/cnn.pkl"
#print("Saving CNN to ./features/cnn.pkl")
#with open(fName, "w") as f:
Example #34
0
def main():
    print(" - Start.")
    t0 = time()
    trainpath = "../data/train.csv"
    testpath = "../data/test.csv"
    subpath = "sub.csv"

    X_all, y_all, ids_all = load_shuf_all_data_ids(trainpath)
    #    X_all = X_all[:20000]
    #    y_all = y_all[:20000]
    encoder = skl_pre.LabelEncoder()
    y_all = encoder.fit_transform(y_all).astype(np.int32)
    print 'X_all:', X_all
    print 'y_all:', y_all

    # new features
    #    f1=True ; f2=True ; f3=True ; f4=True ; str_opt = '_f1234'
    f1 = False
    f2 = False
    f3 = False
    f4 = False
    str_opt = '_'
    X_all = F_addnewfeats(X_all, f1, f2, f3, f4)

    # to log scale
    LOG = True
    if LOG:
        print '\n log(1+x) to all features...'
        X_all = np.log1p(X_all)
        str_opt = str_opt + 'l'

    # scaling
    SCA = True
    if SCA:
        print '\n Scaling features...'
        scaler = skl_pre.StandardScaler().fit(X_all)
        X_all = scaler.transform(X_all)
        str_opt = str_opt + 'st'

    # PCA
    PCA = False
    if PCA:
        print '\n PCA...'
        pca = skl_dec.PCA(n_components='mle').fit(X_all)
        print('   ... num components: %i , variance retained: %.2f' %
              (len(pca.components_), sum(pca.explained_variance_ratio_)))
        X_all = pca.transform(X_all)
        print '\n X_all[0]:', X_all[0]
        str_opt = str_opt + ('p%.2f' % sum(pca.explained_variance_ratio_))

    # Prepare neural network:
    num_classes = len(encoder.classes_)
    num_features = X_all.shape[1]

    layers0 = [
        ('input', las_lay.InputLayer),
        ('dropout0', las_lay.DropoutLayer),
        ('hidden1', las_lay.DenseLayer),
        ('dropout1', las_lay.DropoutLayer),
        ('hidden2', las_lay.DenseLayer),
        ('dropout2', las_lay.DropoutLayer),
        #               ('hidden3', las_lay.DenseLayer),
        ('output', las_lay.DenseLayer)
    ]

    NNargs = dict(
        layers=layers0,
        input_shape=(None, num_features),
        dropout0_p=0.15,
        hidden1_num_units=1000,
        dropout1_p=0.25,
        hidden2_num_units=500,
        dropout2_p=0.25,
        #                  hidden3_num_units=150,
        #                  hidden*_nonlinearity=rectifier by default, try softmax
        output_num_units=num_classes,
        output_nonlinearity=softmax,
        #
        update=adagrad,  # nesterov_momentum, # adagrad, rmsprop
        update_learning_rate=theano.shared(float32(0.03)),  # 0.01,
        # update_momentum=theano.shared(float32(0.9)), # 0.9, ONLY USE WITH nesterov_momentum
        on_epoch_finished=[
            AdjustVariable('update_learning_rate', start=0.03, stop=0.0001),
            #                                     AdjustVariable('update_momentum', start=0.9, stop=0.999),  # ONLY USE WITH nesterov_momentum
            EarlyStopping(patience=10),
        ],
        #
        eval_size=0.2,  # this fraction of the set is used for validation (test)
        verbose=1,
        max_epochs=150)  #,
    #		  objective = MyObjective)
    global GLOBrealnumepochs
    GLOBrealnumepochs = NNargs["max_epochs"]
    clf = NeuralNet(**NNargs)

    multiclass_log_loss = skl_met.make_scorer(score_func=logloss_mc,
                                              greater_is_better=False,
                                              needs_proba=True)
    num_cores = multiprocessing.cpu_count()

    # single run:
    SINGLE = False
    if SINGLE:
        clf.fit(X_all, y_all)
        train_loss = np.array([i["train_loss"] for i in clf.train_history_])
        valid_loss = np.array([i["valid_loss"] for i in clf.train_history_])
        pyplot.plot(train_loss, linewidth=3, label="train")
        pyplot.plot(valid_loss, linewidth=3, label="valid")
        pyplot.grid()
        pyplot.legend()
        pyplot.xlabel("epoch")
        pyplot.ylabel("loss")
        pyplot.ylim(0.4, 0.8)
        pyplot.yscale("log")
        #        pyplot.show()
        pyplot.savefig('learningcurves.png')

    # cv score on 1 classifier
    CV = False
    if CV:
        scores = skl_cv.cross_val_score(clf,
                                        X_all,
                                        y_all,
                                        cv=3,
                                        scoring=multiclass_log_loss,
                                        n_jobs=3)
        print('\n\n ... done in %.0fs\n\n' % (time() - t0))
        print '\n cv scores of 1 classifier:', scores
        print '\n mean score=', np.mean(scores)
        sys.stdout.flush()

    # grid
    GRID = False
    if GRID:
        print '\n\n ** GRID SEARCH ** \n\n'
        grid_clf = skl_grid.GridSearchCV(
            clf,
            param_grid={
                'hidden1_num_units':
                [3000, 4000],  #[100, 150, 200, 250, 300, 400, 600],
                'dropout1_p': [0.6, 0.8],  #, 0.5, 0.7],
                'hidden2_num_units': [300, 500]
            },
            scoring=multiclass_log_loss,
            cv=3,
            n_jobs=3)
        print(' ... grid created')

        print('\n Fitting grid...')
        tfit = time()
        sys.stdout.flush()
        grid_clf.fit(X_all, y_all)
        print(' ... grid fitted in %.2fs' % (time() - tfit))
        print("\n Grid scores:")
        for params, mean_score, scores in grid_clf.grid_scores_:
            print("%0.5f (+/-%0.03f) for %r" %
                  (mean_score, scores.std() / 2, params))
        print("\n Best estimator:")
        print(grid_clf.best_estimator_)
        print("\n Best params:")
        print(grid_clf.best_params_)
        print("\n Best score:")
        print(grid_clf.best_score_)

    CV4ENS = True
    if CV4ENS:
        # writing the predictions of 5 CV in files for future testing of ensemble
        n_folds = 5
        kf = skl_cv.KFold(len(X_all), n_folds=n_folds, random_state=13)
        scores = []
        ids_total = np.array([])
        y_prob_total = np.empty((1, num_classes))
        for train, test in kf:
            X_train, X_test, y_train, y_test = X_all[train], X_all[
                test], y_all[train], y_all[test]
            ids_total = np.append(ids_total, ids_all[test])
            BAG = True
            if BAG:
                print(
                    "\n\n Fit and predict for different realizations of same architecture (diff seeds)"
                )
                num_bags = 5
                best_max_epochs = {}
                for i in xrange(num_bags):
                    best_max_epochs[i] = 0
                #                best_max_epochs = { 0: 1, 1:1, 2:1, 3:1, 4:1} #68, 1: 57, 2: 46} # zeros for bags you already dont know best n_epochs
                assert len(best_max_epochs) == num_bags
                probs_bags = Parallel(n_jobs=num_cores)(
                    delayed(calc_prob_bag)(i, best_max_epochs[i], NNargs,
                                           X_all, y_all, X_test)
                    for i in xrange(num_bags))
                y_prob = sum(probs_bags) / num_bags
                print 'y_prob:', np.shape(y_prob), y_prob
                y_prob_total = np.concatenate((y_prob_total, y_prob), axis=0)
            else:
                clf = NeuralNet(**NNargs)
                clf.fit(X_train, y_train)
                y_prob = clf.predict_proba(X_test)
                y_prob_total = np.concatenate((y_prob_total, y_prob), axis=0)
        y_prob_total = np.delete(y_prob_total, 0,
                                 0)  # removes first row (created by np.empty)
        #        subfile = 'LNN4' + str_opt + '_e%i_h%i_d%.1f_h%i_CVALL.csv' % (NNargs["max_epochs"],NNargs["hidden1_num_units"],
        #                                                                       NNargs["dropout1_p"], NNargs["hidden2_num_units"])
        subfile = 'LNN4sbag%i%s_d%.2f_h%i_d%.2f_h%i_d%.2f_CVALL.csv' % (
            num_bags, str_opt, NNargs["dropout0_p"],
            NNargs["hidden1_num_units"], NNargs["dropout1_p"],
            NNargs["hidden2_num_units"], NNargs["dropout2_p"])
        #        subfile = 'LNN5' + str_opt + '_e%i_h%i_d%.1f_h%i_d%.1f_h%i_CVALL.csv' % (NNargs["max_epochs"], NNargs["hidden1_num_units"],
        #                                                                NNargs["dropout1_p"], NNargs["hidden2_num_units"],
        #                                                               NNargs["dropout2_p"], NNargs["hidden3_num_units"])

        write_submission(ids_total, encoder, y_prob_total, path=subfile)

    SUB = False
    if SUB:
        print("\n\n  Starting submission process...")
        #        encoder = skl_pre.LabelEncoder()
        #        y_true = encoder.fit_transform(y_all)
        #        assert (encoder.classes_ == clf_final.classes_).all()
        X_test, ids = load_test_data(path=testpath)
        X_test = F_addnewfeats(X_test, f1, f2, f3, f4)
        if LOG: X_test = np.log1p(X_test)
        if SCA: X_test = scaler.transform(X_test)
        if PCA: X_test = pca.transform(X_test)

        if GRID:
            print('\n      Setting NN params to best values in the grid...')
            NNargs["hidden1_num_units"] = grid_clf.best_params_[
                'hidden1_num_units']
            NNargs["dropout1_p"] = grid_clf.best_params_['dropout1_p']
            NNargs["hidden2_num_units"] = grid_clf.best_params_[
                'hidden2_num_units']

        RECALCEPOC = False
        if RECALCEPOC:
            print '\n      Refitting to get the num epochs optimal (with smaller eval_size and more patience)...'
            NNargs[
                "eval_size"] = 0.05  # just a small test set to derive optimal numepochs, closer to final training with all set
            if len(NNargs["on_epoch_finished"]) == 3:
                NNargs["on_epoch_finished"][-1] = EarlyStopping(
                    patience=25)  # more patience for final sub
            clf_final = NeuralNet(**NNargs)
            clf_final.fit(X_all, y_all)
            print '        ... done refitting to obtain GLOBrealnumepochs:', GLOBrealnumepochs

        BAG = True
        if BAG:
            # see https://www.kaggle.com/c/otto-group-product-classification-challenge/forums/t/13851/lasagne-with-2-hidden-layers
            print(
                "\n\n Fit and predict for different realiztions of same architecture (diff seeds)"
            )
            print(
                "\n\n Fit and predict for different realizations of same architecture (diff seeds)"
            )
            num_bags = 5
            best_max_epochs = {
                0: 0,
                1: 0,
                2: 0,
                3: 0,
                4: 0,
                5: 0
            }  # { 0: 68, 1: 57, 2: 46} # zeros for bags you already dont know best n_epochs
            assert len(best_max_epochs) == num_bags
            probs_bags = Parallel(n_jobs=num_cores)(delayed(calc_prob_bag)(
                i, best_max_epochs[i], NNargs, X_all, y_all, X_test)
                                                    for i in xrange(num_bags))
            probs_final = sum(probs_bags) / num_bags

            subfile = 'LNN4bag%i%s_d%.2f_h%i_d%.2f_h%i_d%.2f.csv' % (
                num_bags, str_opt, NNargs["dropout0_p"],
                NNargs["hidden1_num_units"], NNargs["dropout1_p"],
                NNargs["hidden2_num_units"], NNargs["dropout2_p"])
            print("\n writing submission to file: %s" % subfile)
            write_submission(ids, encoder, probs_final, path=subfile)
            sys.exit()

        # re-set properties to train with all set
        NNargs["eval_size"] = 0.0001
        if len(NNargs["on_epoch_finished"]) == 3:
            del NNargs["on_epoch_finished"][-1]  # removes the EarlyStopping
        NNargs["max_epochs"] = GLOBrealnumepochs

        clf_final = NeuralNet(**NNargs)

        print("\n  Writing submission file for best estimator")
        #        subfile = 'LNN4' + str_opt + '_e%i_h%i_d%.1f_h%i.csv' % (NNargs["max_epochs"], NNargs["hidden1_num_units"],
        #                                                                NNargs["dropout1_p"], NNargs["hidden2_num_units"])
        subfile = 'LNN5' + str_opt + '_e%i_h%i_d%.1f_h%i_d%.1f_h%i.csv' % (
            NNargs["max_epochs"], NNargs["hidden1_num_units"],
            NNargs["dropout1_p"], NNargs["hidden2_num_units"],
            NNargs["dropout2_p"], NNargs["hidden3_num_units"])
        print '\n name:', subfile, '\n'

        print("   re-fitting with all training set...")
        clf_final.fit(X_all, y_all)

        make_submission(X_test, ids, clf_final, encoder, path=subfile)
Example #35
0
    (DropoutLayer, {'p': 0.5}),
    (DenseLayer, {'num_units': 512}),

    # the output layer
    (DropoutLayer, {'p': 0.5}),
    (DenseLayer, {'num_units': 2, 'nonlinearity': softmax}),
]

# Network parameters
net0 = NeuralNet(
    layers=layers0,
    max_epochs=100,
    batch_iterator_train = BatchIterator(batch_size=100, shuffle=True),

    update=nesterov_momentum,
    update_learning_rate=0.01,

    objective_l2=0.001,

    train_split=TrainSplit(eval_size=0.2),
    verbose=2,
)

# Train
net0.fit(x_train, y_train)

# Plot learning curve
plot_loss(net0)

# Plot learned filters
#plot_conv_weights(net0.layers_[1], figsize=(4, 4))  # Layer 1 (conv1)
Example #36
0
net1 = NeuralNet(
    layers=[("input", layers.InputLayer), ("conv2d1", layers.Conv2DLayer),
            ("maxpool1", layers.MaxPool2DLayer),
            ("conv2d2", layers.Conv2DLayer),
            ("maxpool2", layers.MaxPool2DLayer),
            ("dropout1", layers.DropoutLayer), ("dense", layers.DenseLayer),
            ("dropout2", layers.DropoutLayer), ("output", layers.DenseLayer)],
    # input layer
    input_shape=(None, 1, 28, 28),
    # layer conv2d1
    conv2d1_num_filters=32,
    conv2d1_filter_size=(5, 5),
    conv2d1_nonlinearity=lasagne.nonlinearities.rectify,
    conv2d1_W=lasagne.init.GlorotUniform(),
    # layer maxpool1
    maxpool1_pool_size=(2, 2),
    # layer conv2d2
    conv2d2_num_filters=32,
    conv2d2_filter_size=(5, 5),
    conv2d2_nonlinearity=lasagne.nonlinearities.rectify,
    # layer maxpool2
    maxpool2_pool_size=(2, 2),
    # dropout1
    dropout1_p=0.5,
    # dense
    dense_num_units=256,
    dense_nonlinearity=lasagne.nonlinearities.rectify,
    # dropout2
    dropout2_p=0.5,
    # output
    output_nonlinearity=lasagne.nonlinearities.softmax,
    output_num_units=10,
    # optimization method params
    update=nesterov_momentum,
    update_learning_rate=0.01,
    update_momentum=0.9,
    max_epochs=10,
    verbose=1,
)
Example #37
0
    else:
        y = None

    return X, y


net1 = NeuralNet(
    layers=[  # three layers: one hidden layer
        ('input', layers.InputLayer),
        ('hidden', layers.DenseLayer),
        ('output', layers.DenseLayer),
    ],
    # layer parameters:
    input_shape=(None, 9216),  # 96x96 input pixels per batch
    hidden_num_units=100,  # number of units in hidden layer
    output_nonlinearity=None,  # output layer uses identity function
    output_num_units=30,  # output layer has 30 units (target values)

    # optimization method:
    update=nesterov_momentum,
    update_learning_rate=0.01,
    update_momentum=0.9,
    regression=True,  # flag to indicate we're dealing with a regression problem
    max_epochs=400,  # we want to train this many epochs
    verbose=1,
)

X, y = load()
print("X.shape == {}; X.min == {:.3f}; X.max == {:.3f}".format(
    X.shape, X.min(), X.max()))
print("y.shape == {}; y.min == {:.3f}; y.max == {:.3f}".format(
    y.shape, y.min(), y.max()))
Example #38
0
 def test_legacy_eval_size(self, NeuralNet):
     net = NeuralNet([], eval_size=0.3, max_epochs=0)
     assert net.train_split.eval_size == 0.3
Example #39
0
 def test_initialization_with_layer_instance_bad_params(self, NeuralNet):
     layer = DenseLayer(InputLayer(shape=(128, 13)), num_units=2)
     nn = NeuralNet(layers=layer, dense1_num_units=3)
     with pytest.raises(ValueError):
         nn.initialize_layers()
Example #40
0
net = NeuralNet(
    layers=[
        ('input', layers.InputLayer),
        ('conv2d1', layers.Conv2DLayer),
        ('maxpool1', layers.MaxPool2DLayer),
        # ('dropout1', layers.DropoutLayer),
        ('conv2d2', layers.Conv2DLayer),
        ('maxpool2', layers.MaxPool2DLayer),
        ('conv2d3', layers.Conv2DLayer),
        ('maxpool3', layers.MaxPool2DLayer),
        ('conv2d4', layers.Conv2DLayer),
        ('maxpool4', layers.MaxPool2DLayer),
        ('dense1', layers.DenseLayer),
        ('dropout2', layers.DropoutLayer),
        ('output', layers.DenseLayer)
    ],
    # input layer descriptors
    input_shape=(None, 1, PIXELS, PIXELS),

    # convolution layer descriptors
    conv2d1_num_filters=16,
    conv2d1_filter_size=(5, 5),
    conv2d1_nonlinearity=lasagne.nonlinearities.rectify,
    conv2d1_W=lasagne.init.GlorotUniform(),

    # maxppol layer descriptors
    maxpool1_pool_size=(2, 2),

    # dropout

    # dropout1_p = 0.5,

    # convolution layer descriptors
    conv2d2_num_filters=32,
    conv2d2_filter_size=(3, 3),
    conv2d2_nonlinearity=lasagne.nonlinearities.rectify,
    conv2d2_W=lasagne.init.GlorotUniform(),

    # maxppol layer descriptors
    maxpool2_pool_size=(2, 2),

    # convolution layer descriptors
    conv2d3_num_filters=64,
    conv2d3_filter_size=(3, 3),
    conv2d3_nonlinearity=lasagne.nonlinearities.rectify,
    conv2d3_W=lasagne.init.GlorotUniform(),
    maxpool3_pool_size=(2, 2),

    # convolution layer descriptors
    conv2d4_num_filters=128,
    conv2d4_filter_size=(3, 3),
    conv2d4_nonlinearity=lasagne.nonlinearities.rectify,
    conv2d4_W=lasagne.init.GlorotUniform(),

    # maxppol layer descriptors
    maxpool4_pool_size=(2, 2),
    dense1_num_units=128,

    # dropout layer descriptors
    dropout2_p=0.5,

    # output layer descriptors
    output_nonlinearity=lasagne.nonlinearities.softmax,
    output_num_units=10,

    #optimization parameters
    update=nesterov_momentum,
    update_learning_rate=0.01,
    max_epochs=max_epochs,
    verbose=1000000)
Example #41
0
def train():
    weather = load_weather()
    training = load_training()

    X = assemble_X(training, weather)
    mean, std = normalize(X)
    y = assemble_y(training)

    input_size = len(X[0])

    learning_rate = theano.shared(np.float32(0.1))

    net = NeuralNet(
        layers=[
            ('input', InputLayer),
            ('hidden1', DenseLayer),
            ('dropout1', DropoutLayer),
            ('hidden2', DenseLayer),
            ('dropout2', DropoutLayer),
            ('hidden3', DenseLayer),
            ('dropout3', DropoutLayer),
            ('output', DenseLayer),
        ],
        # layer parameters:
        input_shape=(None, input_size),
        hidden1_num_units=325,
        dropout1_p=0.4,
        hidden2_num_units=325,
        dropout2_p=0.4,
        hidden3_num_units=160,
        dropout3_p=0.4,
        output_nonlinearity=sigmoid,
        output_num_units=1,

        # optimization method:
        update=nesterov_momentum,
        update_learning_rate=learning_rate,
        update_momentum=0.9,

        # Decay the learning rate
        on_epoch_finished=[
            AdjustVariable(learning_rate, target=0, half_life=4),
        ],

        # This is silly, but we don't want a stratified K-Fold here
        # To compensate we need to pass in the y_tensor_type and the loss.
        regression=True,
        y_tensor_type=T.imatrix,
        objective_loss_function=binary_crossentropy,
        max_epochs=75,
        eval_size=0.1,
        verbose=1,
    )

    X, y = shuffle(X, y, random_state=123)
    net.fit(X, y)

    _, X_valid, _, y_valid = net.train_test_split(X, y, net.eval_size)
    probas = net.predict_proba(X_valid)[:, 0]
    print("ROC score", metrics.roc_auc_score(y_valid, probas))

    return net, mean, std
Example #42
0
net8 = NeuralNet(layers=[('input', layers.InputLayer),
                         ('conv1', layers.Conv2DLayer),
                         ('pool1', layers.MaxPool2DLayer),
                         ('dropout1', layers.DropoutLayer),
                         ('conv2', layers.Conv2DLayer),
                         ('pool2', layers.MaxPool2DLayer),
                         ('dropout2', layers.DropoutLayer),
                         ('conv3', layers.Conv2DLayer),
                         ('pool3', layers.MaxPool2DLayer),
                         ('dropout3', layers.DropoutLayer),
                         ('hidden4', layers.DenseLayer),
                         ('dropout4', layers.DropoutLayer),
                         ('hidden5', layers.DenseLayer),
                         ('output', layers.DenseLayer)],
                 input_shape=(None, 1, 96, 96),
                 conv1_num_filters=32,
                 conv1_filter_size=(3, 3),
                 pool1_pool_size=(2, 2),
                 dropout1_p=0.1,
                 conv2_num_filters=64,
                 conv2_filter_size=(2, 2),
                 pool2_pool_size=(2, 2),
                 dropout2_p=0.2,
                 conv3_num_filters=128,
                 conv3_filter_size=(2, 2),
                 pool3_pool_size=(2, 2),
                 dropout3_p=0.3,
                 hidden4_num_units=1000,
                 dropout4_p=0.5,
                 hidden5_num_units=1000,
                 output_num_units=30,
                 output_nonlinearity=None,
                 update_learning_rate=theano.shared(float32(0.03)),
                 update_momentum=theano.shared(float32(0.9)),
                 regression=True,
                 batch_iterator_train=FlipBatchIterator(batch_size=128),
                 on_epoch_finished=[
                     AdjustVariable('update_learning_rate',
                                    start=0.03,
                                    stop=0.0001),
                     AdjustVariable('update_momentum', start=0.9, stop=0.999),
                     EarlyStopping(patience=200)
                 ],
                 max_epochs=10000,
                 verbose=1)
Example #43
0
from nolearn.lasagne import NeuralNet
from nnet.prepare import normalize_data
from settings import NCLASSES, VERBOSITY
from utils.shuffling import shuffle

X, y, features = normalize_data()

net = NeuralNet(
    layers=[
        ('input', layers.InputLayer),
        ('hidden', layers.DenseLayer),
        ('output', layers.DenseLayer),
    ],
    input_shape=(128, 93),
    hidden_num_units=25,
    output_nonlinearity=softmax,
    output_num_units=NCLASSES,
    update=nesterov_momentum,
    update_learning_rate=0.0001,
    update_momentum=0.9,
    regression=False,
    max_epochs=500,
    verbose=bool(VERBOSITY),
)

B, j, key = shuffle(data=X, classes=y)
j -= 1
print B.shape
out = net.fit(B, j)

prediction = net.predict(B)
net2_aug = NeuralNet(
    layers=[
        ('input', layers.InputLayer),
        ('conv1', layers.Conv2DLayer),
        ('pool1', layers.MaxPool2DLayer),
        ('conv2', layers.Conv2DLayer),
        ('pool2', layers.MaxPool2DLayer),
        ('conv3', layers.Conv2DLayer),
        ('pool3', layers.MaxPool2DLayer),
        ('hidden4', layers.DenseLayer),
        ('hidden5', layers.DenseLayer),
        ('output', layers.DenseLayer),
    ],
    input_shape=(None, 1, 96, 96),
    conv1_num_filters=32,
    conv1_filter_size=(3, 3),
    pool1_pool_size=(2, 2),
    conv2_num_filters=64,
    conv2_filter_size=(2, 2),
    pool2_pool_size=(2, 2),
    conv3_num_filters=128,
    conv3_filter_size=(2, 2),
    pool3_pool_size=(2, 2),
    hidden4_num_units=500,
    hidden5_num_units=500,
    output_num_units=30,
    output_nonlinearity=None,
    update_learning_rate=0.01,
    update_momentum=0.9,
    regression=True,
    batch_iterator_train=FlipBatchIterator(batch_size=128),
    max_epochs=1000,
    verbose=1,
)
Example #45
0
    ('output', DenseLayer),
]

ae = NeuralNet(
    layers=layers,
    max_epochs=50,
    update=nesterov_momentum,
    update_learning_rate=0.01,
    update_momentum=0.975,
    input_shape=(None, num_features),
    dense_num_units=64,
    narrow_num_units=48,
    denseReverse1_num_units=64,
    denseReverse2_num_units=128,
    output_num_units=128,

    #input_nonlinearity = None, #nonlinearities.sigmoid,
    #dense_nonlinearity = nonlinearities.tanh,
    narrow_nonlinearity=nonlinearities.softplus,
    #denseReverse1_nonlinearity = nonlinearities.tanh,
    denseReverse2_nonlinearity=nonlinearities.softplus,
    output_nonlinearity=nonlinearities.linear,  #nonlinearities.softmax,

    #dropout0_p=0.1,
    dropout1_p=0.01,
    dropout2_p=0.001,
    regression=True,
    verbose=1)

ae.initialize()
PrintLayerInfo()(ae)
Example #46
0
 def classif(self, NeuralNet, X, y):
     l = InputLayer(shape=(None, X.shape[1]))
     l = DenseLayer(l, num_units=len(np.unique(y)), nonlinearity=softmax)
     net = NeuralNet(l, update_learning_rate=0.01)
     return net.fit(X, y)
def NN():

    net = NeuralNet(
        layers=[
            ('input', layers.InputLayer),
            ('conv1', layers.Conv2DLayer),  # 2C 1MP
            ('conv2', layers.Conv2DLayer),
            ('pool1', layers.MaxPool2DLayer),
            ('cnndrop1', layers.DropoutLayer),
            ('conv3', layers.Conv2DLayer),
            ('conv4', layers.Conv2DLayer),  # 2C 1 MP
            ('pool2', layers.MaxPool2DLayer),
            ('cnndrop2', layers.DropoutLayer),
            ('conv5', layers.Conv2DLayer),
            ('conv6', layers.Conv2DLayer),  # 2C 1MP
            ('conv7', layers.Conv2DLayer),
            ('pool3', layers.MaxPool2DLayer),
            ('cnndrop3', layers.DropoutLayer),
            ('conv8', layers.Conv2DLayer),
            ('conv9', layers.Conv2DLayer),  # 3C 1MP
            ('conv10', layers.Conv2DLayer),
            ('pool4', layers.MaxPool2DLayer),
            ('cnndrop4', layers.DropoutLayer),
            ('conv11', layers.Conv2DLayer),
            ('conv12', layers.Conv2DLayer),
            ('pool5', layers.MaxPool2DLayer),  # 2C 1MP
            ('dropout1', layers.DropoutLayer),
            ('hidden1', layers.DenseLayer),
            ('maxout1', layers.pool.FeaturePoolLayer),
            ('dropout2', layers.DropoutLayer),
            ('hidden2', layers.DenseLayer),
            ('maxout2', layers.pool.FeaturePoolLayer),
            ('output', layers.DenseLayer)
        ],
        input_shape=(None, 1, 512, 512),
        conv1_num_filters=16,
        conv1_filter_size=(5, 5),
        conv1_stride=(2, 2),
        conv1_nonlinearity=lasagne.nonlinearities.LeakyRectify(0.01),
        conv1_pad=2,
        conv2_num_filters=16,
        conv2_filter_size=(3, 3),
        conv2_stride=(1, 1),
        conv2_nonlinearity=lasagne.nonlinearities.LeakyRectify(0.01),
        conv2_pad=1,
        pool1_pool_size=(2, 2),
        cnndrop1_p=0.1,
        conv3_num_filters=32,
        conv3_filter_size=(3, 3),
        conv3_stride=(2, 2),
        conv3_nonlinearity=lasagne.nonlinearities.LeakyRectify(0.01),
        conv3_pad=1,
        conv4_num_filters=32,
        conv4_filter_size=(3, 3),
        conv4_stride=(1, 1),
        conv4_nonlinearity=lasagne.nonlinearities.LeakyRectify(0.01),
        conv4_pad=1,
        pool2_pool_size=(2, 2),
        cnndrop2_p=0.2,
        conv5_num_filters=48,
        conv5_filter_size=(3, 3),
        conv5_stride=(1, 1),
        conv5_nonlinearity=lasagne.nonlinearities.LeakyRectify(0.01),
        conv5_pad=1,
        conv6_num_filters=48,
        conv6_filter_size=(3, 3),
        conv6_stride=(1, 1),
        conv6_nonlinearity=lasagne.nonlinearities.LeakyRectify(0.01),
        conv6_pad=1,
        conv7_num_filters=48,
        conv7_filter_size=(3, 3),
        conv7_stride=(1, 1),
        conv7_nonlinearity=lasagne.nonlinearities.LeakyRectify(0.01),
        conv7_pad=1,
        pool3_pool_size=(2, 2),
        cnndrop3_p=0.3,
        conv8_num_filters=64,
        conv8_filter_size=(3, 3),
        conv8_stride=(1, 1),
        conv8_nonlinearity=lasagne.nonlinearities.LeakyRectify(0.01),
        conv8_pad=1,
        conv9_num_filters=64,
        conv9_filter_size=(3, 3),
        conv9_stride=(1, 1),
        conv9_nonlinearity=lasagne.nonlinearities.LeakyRectify(0.01),
        conv9_pad=1,
        conv10_num_filters=64,
        conv10_filter_size=(3, 3),
        conv10_stride=(1, 1),
        conv10_nonlinearity=lasagne.nonlinearities.LeakyRectify(0.01),
        conv10_pad=1,
        pool4_pool_size=(2, 2),
        cnndrop4_p=0.4,
        conv11_num_filters=128,
        conv11_filter_size=(3, 3),
        conv11_stride=(1, 1),
        conv11_nonlinearity=lasagne.nonlinearities.LeakyRectify(0.01),
        conv11_pad=1,
        conv12_num_filters=128,
        conv12_filter_size=(3, 3),
        conv12_stride=(1, 1),
        conv12_nonlinearity=lasagne.nonlinearities.LeakyRectify(0.01),
        conv12_pad=1,
        pool5_pool_size=(2, 2),
        dropout1_p=0.5,
        hidden1_num_units=128,
        hidden1_nonlinearity=lasagne.nonlinearities.LeakyRectify(0.01),
        maxout1_pool_size=2,
        dropout2_p=0.5,
        hidden2_num_units=128,
        hidden2_nonlinearity=lasagne.nonlinearities.LeakyRectify(0.01),
        maxout2_pool_size=2,

        #regression = True,
        # output_nonlinearity=None,
        output_num_units=5,
        output_nonlinearity=lasagne.nonlinearities.softmax,
        update=nesterov_momentum,
        update_learning_rate=theano.shared(np.cast['float32'](0.003)),
        #update_learning_rate = 0.003,
        update_momentum=0.9,
        on_epoch_finished=[updateRate.AdjustVariable('update_learning_rate')],

        # objective_l2 = 0.0005,
        max_epochs=300,
        verbose=1,
    )

    return net
Example #48
0
 def regr(self, NeuralNet, X, y):
     l = InputLayer(shape=(None, X.shape[1]))
     l = DenseLayer(l, num_units=y.shape[1], nonlinearity=None)
     net = NeuralNet(l, regression=True, update_learning_rate=0.01)
     return net.fit(X, y)
Example #49
0
net1 = NeuralNet(
    # defining layers, three layers 
    layers=[
        ('input' , layers.InputLayer),
        ('hidden', layers.DenseLayer),
        ('output', layers.DenseLayer),
        ],
    
    
    input_shape = (None, 9216),  # 96*96 input pixels per batch
    hidden_num_units = 100, # number of units in hidden layer
    output_nonlinearity = None, # output  layer uses identity function
    
    
    output_num_units = 30,  # 30 target values
    
    
    # optimization method
    #update = nesterov_momentum,
    update = adagrad,
    update_learning_rate = 0.01,
    #update_momentum = 0.9,
    
    
    regression = True, # a flag indicating we're dealing with regression problem
    max_epochs = 50, # we want to train this many epochs
    verbose=1,
    
    )
Example #50
0
                Xb[i, c][mask] = 0.0
        if yb != None:
            yb = yb.astype(np.uint8)
        return Xb, yb


test_iterator = RadialBatchIterator(batch_size=1)

net = phf.build_GoogLeNet(PATCH_SIZE, PATCH_SIZE)

nn = NeuralNet(
    net['softmax'],
    max_epochs=1,
    update=adam,
    update_learning_rate=.00014,  #start with a really low learning rate
    #objective_l2=0.0001,

    # batch iteration params
    batch_iterator_test=test_iterator,
    train_split=TrainSplit(eval_size=0.2),
    verbose=3,
)

nn.load_params_from(netfile)

nuclei_area = 0.0
mitosis_area = 0.0
num = 0

features = []

for img in images:
Example #51
0
def load_dbn(path='models/oulu_ae.mat'):
    """
    load a pretrained dbn from path
    :param path: path to the .mat dbn
    :return: pretrained deep belief network
    """
    # create the network using weights from pretrain_nn.mat
    nn = sio.loadmat(path)
    w1 = nn['w1']
    w2 = nn['w2']
    w3 = nn['w3']
    w4 = nn['w4']
    w5 = nn['w5']
    w6 = nn['w6']
    w7 = nn['w7']
    w8 = nn['w8']
    b1 = nn['b1'][0]
    b2 = nn['b2'][0]
    b3 = nn['b3'][0]
    b4 = nn['b4'][0]
    b5 = nn['b5'][0]
    b6 = nn['b6'][0]
    b7 = nn['b7'][0]
    b8 = nn['b8'][0]

    layers = [
        (InputLayer, {
            'name': 'input',
            'shape': (None, 1144)
        }),
        (DenseLayer, {
            'name': 'l1',
            'num_units': 2000,
            'nonlinearity': sigmoid,
            'W': w1,
            'b': b1
        }),
        (DenseLayer, {
            'name': 'l2',
            'num_units': 1000,
            'nonlinearity': sigmoid,
            'W': w2,
            'b': b2
        }),
        (DenseLayer, {
            'name': 'l3',
            'num_units': 500,
            'nonlinearity': sigmoid,
            'W': w3,
            'b': b3
        }),
        (DenseLayer, {
            'name': 'l4',
            'num_units': 50,
            'nonlinearity': linear,
            'W': w4,
            'b': b4
        }),
        (DenseLayer, {
            'name': 'l5',
            'num_units': 500,
            'nonlinearity': sigmoid,
            'W': w5,
            'b': b5
        }),
        (DenseLayer, {
            'name': 'l6',
            'num_units': 1000,
            'nonlinearity': sigmoid,
            'W': w6,
            'b': b6
        }),
        (DenseLayer, {
            'name': 'l7',
            'num_units': 2000,
            'nonlinearity': sigmoid,
            'W': w7,
            'b': b7
        }),
        (DenseLayer, {
            'name': 'output',
            'num_units': 1144,
            'nonlinearity': linear,
            'W': w8,
            'b': b8
        }),
    ]

    dbn = NeuralNet(
        layers=layers,
        max_epochs=30,
        objective_loss_function=squared_error,
        update=nesterov_momentum,
        regression=True,
        verbose=1,
        update_learning_rate=0.001,
        update_momentum=0.05,
        objective_l2=0.005,
    )
    return dbn
Example #52
0
    (Conv2DLayer, {'num_filters': 128, 'filter_size': 3}),
    (MaxPool2DLayer, {'pool_size': 2}),

    (DenseLayer, {'num_units': 64}),
    (DropoutLayer, {}),
    (DenseLayer, {'num_units': 64}),

    (DenseLayer, {'num_units': 10, 'nonlinearity': softmax}),
]

# net
net0 = NeuralNet(
    layers = layers0,
    max_epochs = 10,
    update = adam, # For 'adam', a small learning rate is best
    update_learning_rate = 0.0002,
    objective_l2 = 0.0025, # L2 regularization
    train_split = TrainSplit(eval_size = 0.25),
    verbose = 1
)
net0.fit(X_train, y_train)


# visualization
from nolearn.lasagne.visualize import draw_to_notebook, plot_loss
from nolearn.lasagne.visualize import plot_conv_weights, plot_conv_activity
from nolearn.lasagne.visualize import plot_occlusion, plot_saliency

draw_to_notebook(net0)
plot_loss(net0)
#plot helps determine if we are overfitting:
Example #53
0
net = NeuralNet(
    layers=[
        ('input', InputLayer),
        ('hidden1', DenseLayer),
        ('dropout1', DropoutLayer),
        ('hidden2', DenseLayer),
        ('dropout2', DropoutLayer),
        ('output', DenseLayer),
    ],
    # layer parameters:
    input_shape=(None, input_size),
    hidden1_num_units=850,
    dropout1_p=0.1,
    hidden2_num_units=200,
    dropout2_p=0.10,
    output_nonlinearity=sigmoid,
    output_num_units=1,

    # optimization method:
    update=nesterov_momentum,
    update_learning_rate=learning_rate,
    update_momentum=0.91,

    # Decay the learning rate
    on_epoch_finished=[
        AdjustVariable(learning_rate, target=0, half_life=4),
    ],

    # This is silly, but we don't want a stratified K-Fold here
    # To compensate we need to pass in the y_tensor_type and the loss.
    regression=True,
    y_tensor_type=T.imatrix,
    objective_loss_function=binary_crossentropy,
    max_epochs=50,
    eval_size=0.2,
    verbose=1,
)
Example #54
0
net = NeuralNet(
    layers=[
        ('input', InputLayer),
        ('dropout0', DropoutLayer),
        ('dense1', DenseLayer),
        ('dropout1', DropoutLayer),
        ('dense2', DenseLayer),
        ('dropout2', DropoutLayer),
        ('dense3', DenseLayer),
        ('dropout3', DropoutLayer),
        ('output', DenseLayer),
    ],
    update=nesterov_momentum,  #Todo: optimize
    loss=None,
    objective=Objective,
    regression=False,
    max_epochs=1000,
    eval_size=0.1,
    #on_epoch_finished = None,
    #on_training_finished = None,
    verbose=bool(VERBOSITY),
    input_shape=(None, train.shape[1]),
    output_num_units=NCLASSES,
    dense1_num_units=500,
    dense2_num_units=500,
    dense3_num_units=400,
    dense1_nonlinearity=LeakyRectify(leakiness=0.1),
    dense2_nonlinearity=LeakyRectify(leakiness=0.1),
    dense3_nonlinearity=LeakyRectify(leakiness=0.1),
    output_nonlinearity=softmax,
    dense1_W=HeUniform(),
    dense2_W=HeUniform(),
    dense3_W=HeUniform(),
    dense1_b=Constant(0.),
    dense2_b=Constant(0.),
    dense3_b=Constant(0.),
    output_b=Constant(0.),
    dropout0_p=0.1,
    dropout1_p=0.6,
    dropout2_p=0.6,
    dropout3_p=0.6,
    update_learning_rate=shared(float32(0.02)),  #
    update_momentum=shared(float32(0.9)),  #
    batch_iterator_train=BatchIterator(batch_size=128),
    batch_iterator_test=BatchIterator(batch_size=128),
)
Example #55
0
net2 = NeuralNet(
    layers=[
        ('input', layers.InputLayer),
        ('conv1', Conv2DLayer),
        ('pool1', MaxPool2DLayer),
        ('conv2', Conv2DLayer),
        ('conv3', Conv2DLayer),
        ('pool3', MaxPool2DLayer),
        ('hidden1', layers.DenseLayer),
        ('dropout1', layers.DropoutLayer),
        ('hidden2', layers.DenseLayer),
        ('dropout2', layers.DropoutLayer),
        ('hidden3', layers.DenseLayer),
        ('dropout3', layers.DropoutLayer),
        ('output', layers.DenseLayer),
        ],
    input_shape=(None, 1, IMG_SIZE, IMG_SIZE),

    conv1_num_filters=128, 
    conv1_filter_size=(5, 5), 
    conv1_pad=2, 
    conv1_strides=(4,4), 
    conv1_nonlinearity=lasagne.nonlinearities.rectify, 

    pool1_ds=(3, 3),
    pool1_strides=(2,2),

    conv2_num_filters=128, 
    conv2_filter_size=(3, 3), 
    conv2_pad=2, 
    conv2_nonlinearity=lasagne.nonlinearities.rectify,

    conv3_num_filters=256, 
    conv3_filter_size=(3, 3),
    conv3_pad=1, 
    conv3_nonlinearity=lasagne.nonlinearities.rectify, 

    pool3_ds=(3, 3),
    pool3_strides=(2,2),

    hidden1_num_units=512,
    hidden1_nonlinearity=lasagne.nonlinearities.rectify,

    dropout1_p=0.3,

    hidden2_num_units=1024,
    hidden2_nonlinearity=lasagne.nonlinearities.rectify, 

    dropout2_p=0.5,

    hidden3_num_units=1024,
    hidden3_nonlinearity=lasagne.nonlinearities.rectify, 

    dropout3_p=0.5,

    output_num_units=121, 
    output_nonlinearity=lasagne.nonlinearities.softmax,

    update_learning_rate=theano.shared(float32(0.01)),
    update_momentum=theano.shared(float32(0.9)),

    regression=False,
    #loss=lasagne.objectives.multinomial_nll,
    use_label_encoder=True,
    batch_iterator_train=FlipBatchIterator(batch_size=256),
    
    on_epoch_finished=[
        AdjustVariable('update_learning_rate', start=0.01, stop=0.0001),
        AdjustVariable('update_momentum', start=0.9, stop=0.999),
        EarlyStopping(patience=50),
        ],
    max_epochs=500,
    verbose=2,
    test_size=0.1
    )
Example #56
0
net = NeuralNet(eval_size=0.05,
                layers=[
                    ('input', layers.InputLayer),
                    ('l1c', Conv2DCCLayer),
                    ('l1p', MaxPool2DCCLayer),
                    ('l1d', layers.DropoutLayer),
                    ('l2c', Conv2DCCLayer),
                    ('l2p', MaxPool2DCCLayer),
                    ('l2d', layers.DropoutLayer),
                    ('l3c', Conv2DCCLayer),
                    ('l3d', layers.DropoutLayer),
                    ('l4c', Conv2DCCLayer),
                    ('l4d', layers.DropoutLayer),
                    ('l5f', layers.DenseLayer),
                    ('l5p', layers.FeaturePoolLayer),
                    ('l5d', layers.DropoutLayer),
                    ('l6f', layers.DenseLayer),
                    ('l6p', layers.FeaturePoolLayer),
                    ('l6d', layers.DropoutLayer),
                    ('l7f', layers.DenseLayer),
                    ('l7p', layers.FeaturePoolLayer),
                    ('l7d', layers.DropoutLayer),
                    ('output', layers.DenseLayer),
                ],
                input_shape=(None, 1, 48, 48),
                l1c_num_filters=64,
                l1c_filter_size=(3, 3),
                l1p_ds=(2, 2),
                l1d_p=0.2,
                l2c_num_filters=128,
                l2c_filter_size=(3, 3),
                l2p_ds=(2, 2),
                l2d_p=0.3,
                l3c_num_filters=256,
                l3c_filter_size=(3, 3),
                l3d_p=0.4,
                l4c_num_filters=512,
                l4c_filter_size=(3, 3),
                l4d_p=0.4,
                l5f_num_units=2048,
                l5p_ds=2,
                l5d_p=0.5,
                l6f_num_units=2048,
                l6p_ds=2,
                l6d_p=0.5,
                l7f_num_units=2048,
                l7p_ds=2,
                l7d_p=0.5,
                output_num_units=121,
                output_nonlinearity=softmax,
                loss=multinomial_nll,
                batch_iterator_train=train_iterator,
                batch_iterator_test=test_iterator,
                update=rmsprop,
                update_learning_rate=theano.shared(float32(5e-5)),
                update_rho=0.9,
                update_epsilon=1e-6,
                regression=False,
                on_epoch_finished=[
                    StepDecay('update_learning_rate', start=5e-5, stop=1e-7),
                    EarlyStopping(patience=100)
                ],
                max_epochs=1500,
                verbose=1)
Example #57
0
net = NeuralNet(
    eval_size=0.05,
    layers=[
        ('input', layers.InputLayer),
        ('l1c', Conv2DCCLayer),
        ('l1p', MaxPool2DCCLayer),
        ('l1d', layers.DropoutLayer),
        ('l2c', Conv2DCCLayer),
        ('l2p', MaxPool2DCCLayer),
        ('l2d', layers.DropoutLayer),
        ('l3c', Conv2DCCLayer),
        ('l3d', layers.DropoutLayer),
        ('l5f', layers.DenseLayer),
        ('l5d', layers.DropoutLayer),
        ('l6f', layers.DenseLayer),
        ('l6d', layers.DropoutLayer),
        ('l7f', layers.DenseLayer),
        ('l7d', layers.DropoutLayer),
        ('output', layers.DenseLayer),
    ],
    input_shape=(None, 1, 48, 48),
    l1c_num_filters=64,
    l1c_filter_size=(5, 5),
    l1p_ds=(2, 2),
    l1d_p=0.2,
    l2c_num_filters=128,
    l2c_filter_size=(5, 5),
    l2p_ds=(2, 2),
    l2d_p=0.2,
    l3c_num_filters=256,
    l3c_filter_size=(3, 3),
    l3d_p=0.2,
    l5f_num_units=1024,
    l5d_p=0.5,
    l6f_num_units=1024,
    l6d_p=0.5,
    l7f_num_units=1024,
    l7d_p=0.5,
    output_num_units=121,
    output_nonlinearity=softmax,
    loss=multinomial_nll,
    batch_iterator_train=BatchIterator(batch_size=128),
    batch_iterator_test=BatchIterator(batch_size=128),
    update=adadelta,
    update_learning_rate=theano.shared(float32(1e-0)),
    update_rho=0.95,
    update_epsilon=1e-6,
    regression=False,
    on_epoch_finished=[
        # StepDecay('update_learning_rate', start=1e-5, stop=1e-7),
        StepDecay('update_learning_rate', start=1e-0, stop=1e-2),
        EarlyStopping(patience=100)
    ],
    max_epochs=1000,
    verbose=1)
Example #58
0
def build_nn(x_train, y_train, x_test, y_test):
    """
    Construct a regression neural network model from input dataframe

    :param x_train: features dataframe for model training
    :param y_train: target dataframe for model training
    :param x_test: features dataframe for model testing
    :param y_test: target dataframe for model testing
    :return: None
    """

    # Create classification model
    net = NeuralNet(layers=[('input', InputLayer), ('hidden0', DenseLayer),
                            ('hidden1', DenseLayer), ('output', DenseLayer)],
                    input_shape=(None, x_train.shape[1]),
                    hidden0_num_units=NODES,
                    hidden0_nonlinearity=nonlinearities.softmax,
                    hidden1_num_units=NODES,
                    hidden1_nonlinearity=nonlinearities.softmax,
                    output_num_units=len(np.unique(y_train)),
                    output_nonlinearity=nonlinearities.softmax,
                    update_learning_rate=0.1,
                    verbose=1,
                    max_epochs=100)

    param_grid = {
        'hidden0_num_units': [1, 4, 17, 25],
        'hidden0_nonlinearity':
        [nonlinearities.sigmoid, nonlinearities.softmax],
        'hidden1_num_units': [1, 4, 17, 25],
        'hidden1_nonlinearity':
        [nonlinearities.sigmoid, nonlinearities.softmax],
        'update_learning_rate': [0.01, 0.1, 0.5]
    }
    grid = sklearn.grid_search.GridSearchCV(net,
                                            param_grid,
                                            verbose=0,
                                            n_jobs=3,
                                            cv=3)
    grid.fit(x_train, y_train)

    y_pred = grid.predict(x_test)
    # Mean absolute error regression loss
    mean_abs = sklearn.metrics.mean_absolute_error(y_test, y_pred)
    # Mean squared error regression loss
    mean_sq = sklearn.metrics.mean_squared_error(y_test, y_pred)
    # Median absolute error regression loss
    median_abs = sklearn.metrics.median_absolute_error(y_test, y_pred)
    # R^2 (coefficient of determination) regression score function
    r2 = sklearn.metrics.r2_score(y_test, y_pred)
    # Explained variance regression score function
    exp_var_score = sklearn.metrics.explained_variance_score(y_test, y_pred)
    # Accuracy prediction score
    accuracy = sklearn.metrics.accuracy_score(y_test, y_pred)

    with open(NN_PICKLE, 'wb') as results:
        pickle.dump(grid, results, pickle.HIGHEST_PROTOCOL)
        pickle.dump(net, results, pickle.HIGHEST_PROTOCOL)
        pickle.dump(mean_abs, results, pickle.HIGHEST_PROTOCOL)
        pickle.dump(mean_sq, results, pickle.HIGHEST_PROTOCOL)
        pickle.dump(median_abs, results, pickle.HIGHEST_PROTOCOL)
        pickle.dump(r2, results, pickle.HIGHEST_PROTOCOL)
        pickle.dump(exp_var_score, results, pickle.HIGHEST_PROTOCOL)
        pickle.dump(accuracy, results, pickle.HIGHEST_PROTOCOL)
        pickle.dump(y_pred, results, pickle.HIGHEST_PROTOCOL)

    return
def define_net():
    define_net_specific_parameters()
    io = ImageIO()

    # Read pandas csv labels
    y = util.load_labels()

    if params.SUBSET is not 0:
        y = y[:params.SUBSET]

    X = np.arange(y.shape[0])

    mean, std = io.load_mean_std(circularized=params.CIRCULARIZED_MEAN_STD)
    keys = y.index.values

    if params.AUGMENT:
        train_iterator = AugmentingParallelBatchIterator(keys,
                                                         params.BATCH_SIZE,
                                                         std,
                                                         mean,
                                                         y_all=y)
    else:
        train_iterator = ParallelBatchIterator(keys,
                                               params.BATCH_SIZE,
                                               std,
                                               mean,
                                               y_all=y)

    test_iterator = ParallelBatchIterator(keys,
                                          params.BATCH_SIZE,
                                          std,
                                          mean,
                                          y_all=y)

    if params.REGRESSION:
        y = util.float32(y)
        y = y[:, np.newaxis]

    if 'gpu' in theano.config.device:
        # Half of coma does not support cuDNN, check whether we can use it on this node
        # If not, use cuda_convnet bindings
        from theano.sandbox.cuda.dnn import dnn_available
        if dnn_available() and not params.DISABLE_CUDNN:
            from lasagne.layers import dnn
            Conv2DLayer = dnn.Conv2DDNNLayer
            MaxPool2DLayer = dnn.MaxPool2DDNNLayer
        else:
            from lasagne.layers import cuda_convnet
            Conv2DLayer = cuda_convnet.Conv2DCCLayer
            MaxPool2DLayer = cuda_convnet.MaxPool2DCCLayer
    else:
        Conv2DLayer = layers.Conv2DLayer
        MaxPool2DLayer = layers.MaxPool2DLayer

    Maxout = layers.pool.FeaturePoolLayer

    net = NeuralNet(
        layers=[
            ('input', layers.InputLayer),
            ('conv0', Conv2DLayer),
            ('pool0', MaxPool2DLayer),
            ('conv1', Conv2DLayer),
            ('pool1', MaxPool2DLayer),
            ('conv2', Conv2DLayer),
            ('pool2', MaxPool2DLayer),
            ('conv3', Conv2DLayer),
            ('pool3', MaxPool2DLayer),
            ('conv4', Conv2DLayer),
            ('pool4', MaxPool2DLayer),
            ('dropouthidden1', layers.DropoutLayer),
            ('hidden1', layers.DenseLayer),
            ('maxout1', Maxout),
            ('dropouthidden2', layers.DropoutLayer),
            ('hidden2', layers.DenseLayer),
            ('maxout2', Maxout),
            ('dropouthidden3', layers.DropoutLayer),
            ('output', layers.DenseLayer),
        ],
        input_shape=(None, params.CHANNELS, params.PIXELS, params.PIXELS),
        conv0_num_filters=32,
        conv0_filter_size=(5, 5),
        conv0_stride=(2, 2),
        pool0_pool_size=(2, 2),
        pool0_stride=(2, 2),
        conv1_num_filters=64,
        conv1_filter_size=(3, 3),
        conv1_border_mode='same',
        pool1_pool_size=(2, 2),
        pool1_stride=(2, 2),
        conv2_num_filters=128,
        conv2_filter_size=(3, 3),
        conv2_border_mode='same',
        pool2_pool_size=(2, 2),
        pool2_stride=(2, 2),
        conv3_num_filters=192,
        conv3_filter_size=(3, 3),
        conv3_border_mode='same',
        pool3_pool_size=(2, 2),
        pool3_stride=(2, 2),
        conv4_num_filters=256,
        conv4_filter_size=(3, 3),
        conv4_border_mode='same',
        pool4_pool_size=(2, 2),
        pool4_stride=(2, 2),
        hidden1_num_units=1024,
        hidden2_num_units=1024,
        dropouthidden1_p=0.5,
        dropouthidden2_p=0.5,
        dropouthidden3_p=0.5,
        maxout1_pool_size=2,
        maxout2_pool_size=2,
        output_num_units=1 if params.REGRESSION else 5,
        output_nonlinearity=None
        if params.REGRESSION else nonlinearities.softmax,
        update_learning_rate=theano.shared(
            util.float32(params.START_LEARNING_RATE)),
        update_momentum=theano.shared(util.float32(params.MOMENTUM)),
        custom_score=('kappa', quadratic_kappa),
        regression=params.REGRESSION,
        batch_iterator_train=train_iterator,
        batch_iterator_test=test_iterator,
        on_epoch_finished=[
            AdjustVariable('update_learning_rate',
                           start=params.START_LEARNING_RATE),
            stats.Stat(),
            ModelSaver()
        ],
        max_epochs=500,
        verbose=1,

        # Only relevant when create_validation_split = True
        eval_size=0.1,

        # Need to specify splits manually like indicated below!
        create_validation_split=params.SUBSET > 0,
    )

    # It is recommended to use the same training/validation split every model for ensembling and threshold optimization
    #
    # To set specific training/validation split:
    net.X_train = np.load(params.IMAGE_SOURCE + "/X_train.npy")
    net.X_valid = np.load(params.IMAGE_SOURCE + "/X_valid.npy")
    net.y_train = np.load(params.IMAGE_SOURCE + "/y_train.npy")
    net.y_valid = np.load(params.IMAGE_SOURCE + "/y_valid.npy")

    return net, X, y
    num_classes = len(encoder.classes_)
    num_features = X.shape[1]

    net1 = NeuralNet(
        layers=[  # three layers: one hidden layer
            ('input', layers.InputLayer),
            ('hidden', layers.DenseLayer),
            ('dropout', layers.DropoutLayer),
            ('output', layers.DenseLayer),
        ],
        # layer parameters:
        input_shape=(None, num_features),
        hidden_num_units=200,  # number of units in hidden layer #!200-600
        output_nonlinearity=lasagne.nonlinearities.softmax,  # output layer
        output_num_units=num_classes,  # 10 target values
        dropout_p=0.2,
        #!dropout 0.2-0.7

        # optimization method:
        update=nesterov_momentum,
        update_learning_rate=0.01,  #!0.001-0.01
        update_momentum=0.9,  #!0.6-0.9
        regression=
        False,  # flag to indicate we're dealing with regression problem
        max_epochs=500,  # we want to train this many epochs
        verbose=1,
    )

    random_search = RandomizedSearchCV(
        net1, {
            'hidden_num_units': sp_randint(200, 600),