Пример #1
0
def nn_example(data):
    net1 = NeuralNet(
        layers=[('input', layers.InputLayer),
                ('hidden', layers.DenseLayer),
                ('output', layers.DenseLayer),
                ],
        # layer parameters:
        input_shape=(None, 28*28),
        hidden_num_units=100,  # number of units in 'hidden' layer
        output_nonlinearity=lasagne.nonlinearities.softmax,
        output_num_units=10,  # 10 target values for the digits 0, 1, 2, ..., 9

        # optimization method:
        update=nesterov_momentum,
        update_learning_rate=0.01,
        update_momentum=0.9,

        max_epochs=10,
        verbose=1,
        )

    # Train the network
    net1.fit(data['X_train'], data['y_train'])

    # Try the network on new data
    print("Feature vector (100-110): %s" % data['X_test'][0][100:110])
    print("Label: %s" % str(data['y_test'][0]))
    print("Predicted: %s" % str(net1.predict([data['X_test'][0]])))
def fit_model(train_x, y, test_x):
    """Feed forward neural network for kaggle digit recognizer competition.
    Intentionally limit network size and optimization time (by choosing max_epochs = 15) to meet runtime restrictions
    """
    print("\n\nRunning Convetional Net.  Optimization progress below\n\n")
    net1 = NeuralNet(
        layers=[  #list the layers here
            ('input', layers.InputLayer),
            ('hidden1', layers.DenseLayer),
            ('output', layers.DenseLayer),
            ],

        # layer parameters:
        input_shape=(None, train_x.shape[1]),
        hidden1_num_units=200, hidden1_nonlinearity=rectify,  #params of first layer
        output_nonlinearity=softmax,  # softmax for classification problems
        output_num_units=10,  # 10 target values

        # optimization method:
        update=nesterov_momentum,
        update_learning_rate=0.05,
        update_momentum=0.7,

        regression=False,
        max_epochs=10,  # Intentionally limited for execution speed
        verbose=1,
        )

    net1.fit(train_x, y)
    predictions = net1.predict(test_x)
    return(predictions)
Пример #3
0
def test_nolearn():
    iris = load_iris()
    X = iris.data.astype(np.float32)  #data is the key
    y_true = iris.target.astype(np.int32)  #target is the key

    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y_true,
                                                        random_state=14)

    layers = [('input', layers.InputLayer), ('hidden', layers.DenseLayer),
              ('output', layers.DenseLayer)]

    net1 = NeuralNet(layers=layers,
                     input_shape=X.shape,
                     hidden_num_units=100,
                     output_num_units=26,
                     hidden_nonlinearity=sigmoid,
                     output_nonlinearity=softmax,
                     hidden_b=np.zeros((100, ), dtype=np.float32),
                     update=updates.momentum,
                     update_learning_rate=0.9,
                     update_momentum=0.1,
                     regression=True,
                     max_epochs=1000)

    net1.fit(X_train, y_train)

    y_pred = net1.predict(X_test)
    y_pred = y_pred.argmax(axis=1)
    assert len(y_pred) == len(X_test)
    if len(y_test.shape) > 1:
        y_test = y_test.argmax(axis=1)
    print(f1_score(y_test, y_pred))
    def gridsearch_alpha(self, learning_rate, index, params=None):

        self.l_in = ls.layers.InputLayer(shape=(None, n_input),
                                         input_var=None,
                                         W=params.T)
        self.l_hidden = ls.layers.DenseLayer(
            self.l_in, num_units=15, nonlinearity=ls.nonlinearities.rectify)
        self.network = l_out = ls.layers.DenseLayer(self.l_hidden, num_units=1)
        list_results = np.array([learning_rate.shape[0]], dtype=np.float64)
        for item in learning_rate:
            #Init Neural net
            net1 = NeuralNet(
                layers=self.network,
                # optimization method:
                update=nesterov_momentum,
                update_learning_rate=item,
                update_momentum=0.9,
                regression=
                True,  # flag to indicate we're dealing with regression problem
                max_epochs=800,  # we want to train this many epochs
                #                 verbose=1,
                eval_size=0.4)
            #
            net1.fit(self.X_training, self.y_training)
            self.pred = net1.predict(self.n_sample2)
            name_file = "GeneticParams/saveNeuralNetwork_%s_%s.tdn" % (item,
                                                                       index)
            net1.save_params_to(name_file)
            score_nn = net1.score(self.n_sample2, self.n_test2)
            list_results[item] = score_nn
            print "index=%s,item=%f,score=%f" % (index, item, score_nn)
        return list_results
Пример #5
0
    def gridsearch_alpha(self,learning_rate,index,params=None):
        hidden_unit = ((index+1)*2)/3
        self.l_in = ls.layers.InputLayer(shape=(None,n_input),input_var=None)
        self.l_hidden = ls.layers.DenseLayer(self.l_in,num_units=15,nonlinearity=ls.nonlinearities.rectify)
        self.network = l_out = ls.layers.DenseLayer(self.l_hidden,num_units=1)
        list_results = np.array([learning_rate.shape[0]],dtype=np.float64)
        for item in learning_rate:
            #Init Neural net
            net1 = NeuralNet(
                layers=self.network,
                # optimization method:
                update=nesterov_momentum,
                update_learning_rate=item,
                update_momentum=0.9,
                regression=True,  # flag to indicate we're dealing with regression problem
                max_epochs=800,  # we want to train this many epochs
#                 verbose=1,
                eval_size = 0.4
            )
            net1.fit(self.X_training,self.y_training)
            self.pred = net1.predict(self.n_sample2)
            name_file = "Params/saveNeuralNetwork_%s_%s.tdn" %(item,index)
            net1.save_params_to(name_file)
            score_nn = net1.score(self.n_sample2,self.n_test2)
            list_results[item] = score_nn
            print "index=%f,item=%f,score=%f"%(index,item,score_nn)
        return list_results
Пример #6
0
def nn_example(data):
    net1 = NeuralNet(
        layers=[
            ('input', layers.InputLayer),
            ('hidden', layers.DenseLayer),
            ('output', layers.DenseLayer),
        ],
        # layer parameters:
        input_shape=(None, 28 * 28),
        hidden_num_units=100,  # number of units in 'hidden' layer
        output_nonlinearity=lasagne.nonlinearities.softmax,
        output_num_units=10,  # 10 target values for the digits 0, 1, 2, ..., 9

        # optimization method:
        update=nesterov_momentum,
        update_learning_rate=0.01,
        update_momentum=0.9,
        max_epochs=10,
        verbose=1,
    )

    # Train the network
    net1.fit(data['X_train'], data['y_train'])
    with open('net.pickle', 'wb') as f:
        pickle.dump(net1, f, -1)

    # Try the network on new data
    print("Feature vector (100-110): %s" % data['X_test'][0][100:110])
    print("Label: %s" % str(data['y_test'][0]))
    print("Predicted: %s" % str(net1.predict([data['X_test'][0]])))
class NN():
    def __init__(self):
        self.nn = None
        self.scaler = MinMaxScaler(feature_range = (-1, 1))
        self.y_scaler = MinMaxScaler(feature_range = (-1,1))

    def fit(self, X, y):
        
        """incremental online fitting"""

        X = np.asarray(X).reshape(1, -1).astype(np.float32)
        y = np.asarray(y).reshape(-1, 1).astype(np.float32)

        self.scaler = self.scaler.partial_fit(X)
        self.y_scaler = self.y_scaler.partial_fit(y)

        self.nn = NeuralNet(
                layers=[
                    ('input', layers.InputLayer),
                    ('hidden', layers.DenseLayer),
                    ('output', layers.DenseLayer),
                    ],
                # layer parameters:
                input_shape=(None, len(X[0])),
                hidden_num_units=15,  # number of units in hidden layer

                output_nonlinearity=None,  # output layer uses identity function
                output_num_units=1,  # 2 target values

                # optimization method:
                update=nesterov_momentum,
                update_learning_rate=0.01,
                update_momentum=0.9,

                regression=True,  # flag to indicate we're dealing with regression problem
                max_epochs=2,  # TRY 50 and 46 epochs!
                verbose=3,
                eval_size=0.0
                )

        print self.scaler.transform(X), '|', self.y_scaler.transform(y)
        self.nn.fit(self.scaler.transform(X), self.y_scaler.transform(y))
        return self

    def predict(self, X):
    	print self.nn.predict(X)
        return self.nn.predict(X) 
Пример #8
0
class NN(object):
    
    def __init__(self, input_size, hidden_1_size, hidden_2_size=None):
        n_layers = [
            ('input', layers.InputLayer),
            ('hidden1', layers.DenseLayer),
            ('dropout1', layers.DropoutLayer)
        ]
        if hidden_2_size is not None:
            n_layers.extend(
                [('hidden2', layers.DenseLayer), ('dropout2', layers.DropoutLayer)]
            )
        n_layers.append(('output', layers.DenseLayer))
        
        self.model = NeuralNet(
            layers=n_layers,
            input_shape=(None, input_size),
            hidden1_num_units=hidden_1_size, dropout1_p=0.5,
    
            output_nonlinearity=tanh,
            output_num_units=1,
            regression=True,

            update=nesterov_momentum,
            update_learning_rate=0.01,
            update_momentum=0.9,
    
            eval_size=0.1,
            on_epoch_finished=[
                AdjustVariable('update_learning_rate', stop=0.0001, decrement=0.00001),
                AdjustVariable('update_momentum',      stop=0.999,  increment=0.0001),
                EarlyStopping(patience=100)
            ],
            
            max_epochs=5000,
            verbose=1
        )
        if hidden_2_size is not None:
            self.model.__dict__['hidden2_num_units'] = hidden_2_size
            self.model.__dict__['dropout2_p'] = 0.5            
    
    def train(self, X, Y):
        self.model.fit(np.asarray(X, dtype=np.float32), np.asarray(Y, dtype=np.float32))
    
    def predict_continuous(self, X_test):
        return self.model.predict(np.asarray(X_test, dtype=np.float32))
    
    def predict_classes(self, X_test):
        Y_pred = self.predict_continuous(X_test)
        
        # threshold the continuous values to get the classes
        pos = Y_pred >= .33
        neg = Y_pred <= -0.33
        neu = np.logical_and(Y_pred < 0.33, Y_pred > -0.33)
        Y_pred[pos] = 1
        Y_pred[neg] = -1
        Y_pred[neu] = 0
        
        return Y_pred.reshape(-1)
Пример #9
0
def nn_example():
    global nChannels
    X_train, y_train, X_val, y_val, X_test, y_test = load_data()
    net1 = NeuralNet(
        layers=[
            ('input', layers.InputLayer),
            ('aug1', layers.LocalResponseNormalization2DLayer),
            ('aug2', layers.GaussianNoiseLayer),
            ('bn1', layers.BatchNormLayer),
            ('conv2d1', layers.Conv2DLayer),
            ('bn2', layers.BatchNormLayer),
            ('maxpool1', layers.MaxPool2DLayer),
            ('conv2d2', layers.Conv2DLayer),
            ('bn3', layers.BatchNormLayer),
            ('maxpool2', layers.MaxPool2DLayer),
            ('dropout1', layers.DropoutLayer),
            ('dense', layers.DenseLayer),
            ('bn4', layers.BatchNormLayer),
            ('dropout2', layers.DropoutLayer),
            ('output', layers.DenseLayer),
        ],
        # input layer
        input_shape=(None, nChannels, patchSize, patchSize),
        # layer conv2d1
        conv2d1_num_filters=32,
        conv2d1_filter_size=(3, 3),
        conv2d1_nonlinearity=lasagne.nonlinearities.rectify,
        conv2d1_W=lasagne.init.GlorotUniform(),
        # layer maxpool1
        maxpool1_pool_size=(2, 2),
        # layer conv2d2
        conv2d2_num_filters=32,
        conv2d2_filter_size=(3, 3),
        conv2d2_nonlinearity=lasagne.nonlinearities.rectify,
        # layer maxpool2
        maxpool2_pool_size=(2, 2),
        # dropout1
        dropout1_p=0.5,
        # dense
        dense_num_units=256,
        dense_nonlinearity=lasagne.nonlinearities.rectify,
        # dropout2
        dropout2_p=0.5,
        # output
        output_nonlinearity=lasagne.nonlinearities.softmax,
        output_num_units=nOutPutClasses,
        # optimization method params
        update=nesterov_momentum,
        update_learning_rate=0.01,
        update_momentum=0.9,
        max_epochs=100,
        verbose=2,
    )

    nn = net1.fit(X_train, y_train)
    print("Test Accuracy : %s" %
          str(1 - np.mean(np.abs(net1.predict(X_test) - y_test))))
    sys.setrecursionlimit(6000)
    pickle.dump(net1, open(modelPath + sys.argv[2], 'wb'))
Пример #10
0
def train_nn_model():
    imageSize = 400 # 20 x 20 pixels

    from lasagne import layers
    from lasagne.updates import nesterov_momentum, sgd
    from nolearn.lasagne import NeuralNet

    model = NeuralNet(layers=[('input', layers.InputLayer),
                              ('hidden', layers.DenseLayer),
                              ('output', layers.DenseLayer),],
        # layer parameters:
        input_shape=(None, 400),  # 96x96 input pixels per batch
        hidden_num_units=100,  # number of units in hidden layer
        output_nonlinearity=None,  # output layer uses identity function
        output_num_units=62,  # 30 target values

        # optimization method:
        update=nesterov_momentum,
        update_learning_rate=0.01,
        update_momentum=0.9,

        regression=True,  # flag to indicate we're dealing with regression problem
        max_epochs=400,  # we want to train this many epochs
        verbose=1,)

    xTrain, yTrain, xTest, labelsInfoTest = load_train_test_data(nn_ytrain=True)

    print xTrain.shape, yTrain.shape
    print xTrain.dtype, yTrain.dtype

    model.fit(xTrain, yTrain)
    ytest_pred = model.predict(xTrain)
    print model.score(xTrain, yTrain)
    print accuracy_score(ytest_pred, yTrain)

    yTest = model.predict(xTest)

    print labelsInfoTest.shape, yTest.shape

    yTest2 = transform_to_class(yTest)

    submit_df = labelsInfoTest
    submit_df['Class'] = yTest2
    submit_df.to_csv('submission.csv', index=False)

    return model
Пример #11
0
    def predict(self, X):
        X = np.array(X,dtype=np.float32)
        preds = NeuralNet.predict(self,X)

        preds = np.argmax(preds,axis=1)
        preds = self.label_encoder.inverse_transform(preds)

        return preds
Пример #12
0
class network:
    """
    a base class for a neural network
    """

    name = 'baseclass'
    network = []

    # this variable is read after each epoch
    again = True

    def __init__(self):
        """
        set up a network
        """

        self.network = NeuralNet(layers=[])

    def fit(self, X, y):
        """
        use the training set to get a model
        """

        # handle the interrupt signal gracefully
        # (by stopping after the current epoch)
        for instance in self.network.on_epoch_finished:
            if isinstance(instance, checkAgain):
                signal.signal(signal.SIGINT, self.handle_break)
                break

        print('\nusing network {}\n'.format(self.name))

        return self.network.fit(X,y)

    def predict(self, X):
        """
        predict the targets after the network is fitted
        """

        return self.network.predict(X)

    def handle_break(self, signum, frame):
        """
        this function handles the siginterrupt by setting the variable 'again'
        to false
        """

        if self.again:
            # first signal - soft stop
            print(
                "\ninterrupt signal received. Stopping after the current epoch")
            self.again = False
        else:
            # second signal - break immediately
            print("\nsecond interrupt signal received. Goodbye")
            sys.exit(1)
Пример #13
0
class RegressionNN(RegressionBase.RegressionBase):
    def __init__(self, isTrain, isNN):
        super(RegressionNN, self).__init__(isTrain, isNN)
        # data preprocessing
        #self.dataPreprocessing()

        self.net1 = NeuralNet(
                        layers=[  # three layers: one hidden layer
                            ('input', layers.InputLayer),
                            ('hidden', layers.DenseLayer),
                            #('hidden2', layers.DenseLayer),
                            #('hidden3', layers.DenseLayer),
                            ('output', layers.DenseLayer),
                            ],
                        # layer parameters:
                        input_shape=(None, 13),  # input dimension is 13
                        hidden_num_units=6,  # number of units in hidden layer
                        #hidden2_num_units=8,  # number of units in hidden layer
                        #hidden3_num_units=4,  # number of units in hidden layer
                        output_nonlinearity=None,  # output layer uses sigmoid function
                        output_num_units=1,  # output dimension is 1

                        # obejctive function
                        objective_loss_function = lasagne.objectives.squared_error,

                        # optimization method:
                        update=lasagne.updates.nesterov_momentum,
                        update_learning_rate=0.002,
                        update_momentum=0.4,

                        # use 25% as validation
                        train_split=TrainSplit(eval_size=0.2),

                        regression=True,  # flag to indicate we're dealing with regression problem
                        max_epochs=100,  # we want to train this many epochs
                        verbose=0,
                        )

    def dataPreprocessing(self):
        # due to the observation, standization does not help the optimization.
        # So do not use it!
        #self.Standardization()
        pass

    def training(self):
        # train the NN model
        self.net1.fit(self.X_train, self.y_train)

    def predict(self):
        # predict the test data
        self.y_pred = self.net1.predict(self.X_test)

        # print MSE
        mse = mean_squared_error(self.y_pred, self.y_test)
        print "MSE: {}".format(mse)
Пример #14
0
class NN(object):
    def __init__(self, input_size, hidden_1_size, hidden_2_size=None):
        n_layers = [('input', layers.InputLayer),
                    ('hidden1', layers.DenseLayer),
                    ('dropout1', layers.DropoutLayer)]
        if hidden_2_size is not None:
            n_layers.extend([('hidden2', layers.DenseLayer),
                             ('dropout2', layers.DropoutLayer)])
        n_layers.append(('output', layers.DenseLayer))

        self.model = NeuralNet(layers=n_layers,
                               input_shape=(None, input_size),
                               hidden1_num_units=hidden_1_size,
                               dropout1_p=0.5,
                               output_nonlinearity=tanh,
                               output_num_units=1,
                               regression=True,
                               update=nesterov_momentum,
                               update_learning_rate=0.01,
                               update_momentum=0.9,
                               eval_size=0.1,
                               on_epoch_finished=[
                                   AdjustVariable('update_learning_rate',
                                                  stop=0.0001,
                                                  decrement=0.00001),
                                   AdjustVariable('update_momentum',
                                                  stop=0.999,
                                                  increment=0.0001),
                                   EarlyStopping(patience=100)
                               ],
                               max_epochs=5000,
                               verbose=1)
        if hidden_2_size is not None:
            self.model.__dict__['hidden2_num_units'] = hidden_2_size
            self.model.__dict__['dropout2_p'] = 0.5

    def train(self, X, Y):
        self.model.fit(np.asarray(X, dtype=np.float32),
                       np.asarray(Y, dtype=np.float32))

    def predict_continuous(self, X_test):
        return self.model.predict(np.asarray(X_test, dtype=np.float32))

    def predict_classes(self, X_test):
        Y_pred = self.predict_continuous(X_test)

        # threshold the continuous values to get the classes
        pos = Y_pred >= .33
        neg = Y_pred <= -0.33
        neu = np.logical_and(Y_pred < 0.33, Y_pred > -0.33)
        Y_pred[pos] = 1
        Y_pred[neg] = -1
        Y_pred[neu] = 0

        return Y_pred.reshape(-1)
Пример #15
0
class RegressionNN(RegressionBase.RegressionBase):
    def __init__(self, isTrain, isNN):
        super(RegressionNN, self).__init__(isTrain, isNN)
        # data preprocessing
        #self.dataPreprocessing()

        self.net1 = NeuralNet(
            layers=[  # three layers: one hidden layer
                ('input', layers.InputLayer),
                ('hidden', layers.DenseLayer),
                #('hidden2', layers.DenseLayer),
                #('hidden3', layers.DenseLayer),
                ('output', layers.DenseLayer),
            ],
            # layer parameters:
            input_shape=(None, 13),  # input dimension is 13
            hidden_num_units=6,  # number of units in hidden layer
            #hidden2_num_units=8,  # number of units in hidden layer
            #hidden3_num_units=4,  # number of units in hidden layer
            output_nonlinearity=None,  # output layer uses sigmoid function
            output_num_units=1,  # output dimension is 1

            # obejctive function
            objective_loss_function=lasagne.objectives.squared_error,

            # optimization method:
            update=lasagne.updates.nesterov_momentum,
            update_learning_rate=0.002,
            update_momentum=0.4,

            # use 25% as validation
            train_split=TrainSplit(eval_size=0.2),
            regression=
            True,  # flag to indicate we're dealing with regression problem
            max_epochs=100,  # we want to train this many epochs
            verbose=0,
        )

    def dataPreprocessing(self):
        # due to the observation, standization does not help the optimization.
        # So do not use it!
        #self.Standardization()
        pass

    def training(self):
        # train the NN model
        self.net1.fit(self.X_train, self.y_train)

    def predict(self):
        # predict the test data
        self.y_pred = self.net1.predict(self.X_test)

        # print MSE
        mse = mean_squared_error(self.y_pred, self.y_test)
        print "MSE: {}".format(mse)
Пример #16
0
def test_lasagne_functional_regression(boston):
    from nolearn.lasagne import NeuralNet

    X, y = boston

    layer1 = InputLayer(shape=(128, 13))
    layer2 = DenseLayer(layer1, num_units=100)
    output = DenseLayer(layer2, num_units=1, nonlinearity=identity)

    nn = NeuralNet(
        layers=output,
        update_learning_rate=0.01,
        update_momentum=0.1,
        regression=True,
        max_epochs=50,
    )

    nn.fit(X[:300], y[:300])
    assert mean_absolute_error(nn.predict(X[300:]), y[300:]) < 3.0
    assert r2_score(nn.predict(X[300:]), y[300:]) == nn.score(X[300:], y[300:])
Пример #17
0
def test_lasagne_functional_regression(boston):
    from nolearn.lasagne import NeuralNet

    X, y = boston

    layer1 = InputLayer(shape=(128, 13))
    layer2 = DenseLayer(layer1, num_units=100)
    output = DenseLayer(layer2, num_units=1, nonlinearity=identity)

    nn = NeuralNet(
        layers=output,
        update_learning_rate=0.01,
        update_momentum=0.1,
        regression=True,
        max_epochs=50,
        )

    nn.fit(X[:300], y[:300])
    assert mean_absolute_error(nn.predict(X[300:]), y[300:]) < 3.0
    assert r2_score(nn.predict(X[300:]), y[300:]) == nn.score(X[300:], y[300:])
Пример #18
0
def cancer(X, y, X_valid, y_valid):
    l = InputLayer(shape=(None, X.shape[1]))
    l = DenseLayer(l, num_units=len(np.unique(y)), nonlinearity=softmax)
    net = NeuralNet(l, update_learning_rate=0.01, max_epochs=1000)
    net.fit(X, y)
    print(net.score(X, y))
    y_pred = net.predict(X_valid)
    print(y_valid)
    print(y_pred)
    plot_loss(net)
    plt.title('Cancer')
    plt.show()
Пример #19
0
class Classifier(BaseEstimator):
    def __init__(self):
        self.net = None
        self.label_encoder = None

    def fit(self, X, y):
        layers0 = [('input', InputLayer), ('dropoutf', DropoutLayer),
                   ('dense0', DenseLayer), ('dropout0', DropoutLayer),
                   ('dense1', DenseLayer), ('dropout1', DropoutLayer),
                   ('dense2', DenseLayer), ('dropout2', DropoutLayer),
                   ('dense3', DenseLayer), ('dropout3', DropoutLayer),
                   ('output', DenseLayer)]

        X = X.astype(theano.config.floatX)
        self.label_encoder = LabelEncoder()
        y = self.label_encoder.fit_transform(y).astype(np.int32)
        self.scaler = StandardScaler()
        X = self.scaler.fit_transform(X)
        num_classes = len(self.label_encoder.classes_)
        num_features = X.shape[1]
        self.net = NeuralNet(layers=layers0,
                             input_shape=(None, num_features),
                             dropoutf_p=0.15,
                             dense0_num_units=1024,
                             dropout0_p=0.5,
                             dense0_nonlinearity=rectify,
                             dense1_num_units=1024,
                             dropout1_p=0.15,
                             dense1_nonlinearity=rectify,
                             dense2_num_units=1024,
                             dropout2_p=0.15,
                             dense2_nonlinearity=rectify,
                             dense3_num_units=1024,
                             dropout3_p=0.15,
                             dense3_nonlinearity=rectify,
                             output_num_units=num_classes,
                             update=adagrad,
                             update_learning_rate=0.01,
                             eval_size=0.2,
                             verbose=1,
                             max_epochs=150)
        self.net.fit(X, y)
        return self

    def predict(self, X):
        X = X.astype(theano.config.floatX)
        X = self.scaler.fit_transform(X)
        return self.label_encoder.inverse_transform(self.net.predict(X))

    def predict_proba(self, X):
        X = X.astype(theano.config.floatX)
        X = self.scaler.fit_transform(X)
        return self.net.predict_proba(X)
Пример #20
0
def test_lasagne_functional_mnist(mnist):
    # Run a full example on the mnist dataset
    from nolearn.lasagne import NeuralNet

    X, y = mnist
    X_train, y_train = X[:60000], y[:60000]
    X_test, y_test = X[60000:], y[60000:]

    epochs = []

    def on_epoch_finished(nn, train_history):
        epochs[:] = train_history
        if len(epochs) > 1:
            raise StopIteration()

    nn = NeuralNet(
        layers=[
            ('input', InputLayer),
            ('hidden1', DenseLayer),
            ('dropout1', DropoutLayer),
            ('hidden2', DenseLayer),
            ('dropout2', DropoutLayer),
            ('output', DenseLayer),
        ],
        input_shape=(None, 784),
        output_num_units=10,
        output_nonlinearity=softmax,
        more_params=dict(
            hidden1_num_units=512,
            hidden2_num_units=512,
        ),
        update=nesterov_momentum,
        update_learning_rate=0.01,
        update_momentum=0.9,
        max_epochs=5,
        on_epoch_finished=on_epoch_finished,
    )

    nn.fit(X_train, y_train)
    assert len(epochs) == 2
    assert epochs[0]['valid_accuracy'] > 0.85
    assert epochs[1]['valid_accuracy'] > epochs[0]['valid_accuracy']
    assert sorted(epochs[0].keys()) == [
        'epoch',
        'train_loss',
        'valid_accuracy',
        'valid_loss',
    ]

    y_pred = nn.predict(X_test)
    assert accuracy_score(y_pred, y_test) > 0.85
Пример #21
0
def test_lasagne_functional_mnist(mnist):
    # Run a full example on the mnist dataset
    from nolearn.lasagne import NeuralNet

    X, y = mnist
    X_train, y_train = X[:60000], y[:60000]
    X_test, y_test = X[60000:], y[60000:]

    epochs = []

    def on_epoch_finished(nn, train_history):
        epochs[:] = train_history
        if len(epochs) > 1:
            raise StopIteration()

    nn = NeuralNet(
        layers=[
            ('input', InputLayer),
            ('hidden1', DenseLayer),
            ('dropout1', DropoutLayer),
            ('hidden2', DenseLayer),
            ('dropout2', DropoutLayer),
            ('output', DenseLayer),
            ],
        input_shape=(None, 784),
        output_num_units=10,
        output_nonlinearity=softmax,

        more_params=dict(
            hidden1_num_units=512,
            hidden2_num_units=512,
            ),

        update=nesterov_momentum,
        update_learning_rate=0.01,
        update_momentum=0.9,

        max_epochs=5,
        on_epoch_finished=on_epoch_finished,
        )

    nn.fit(X_train, y_train)
    assert len(epochs) == 2
    assert epochs[0]['valid_accuracy'] > 0.85
    assert epochs[1]['valid_accuracy'] > epochs[0]['valid_accuracy']
    assert sorted(epochs[0].keys()) == [
        'epoch', 'train_loss', 'valid_accuracy', 'valid_loss',
        ]

    y_pred = nn.predict(X_test)
    assert accuracy_score(y_pred, y_test) > 0.85
Пример #22
0
def nnet(pipe):
    pipe.features = pipe.features.astype(np.float32)
    pipe.labels = pipe.labels.astype(np.int32)
    pipe.features = StandardScaler().fit_transform(pipe.features)
    X_train, X_test, y_train, y_test = train_test_split(
        pipe.features, pipe.labels)
    nnet = NeuralNet(
        # Specify the layers
        layers=[('input', layers.InputLayer), ('hidden1', layers.DenseLayer),
                ('hidden2', layers.DenseLayer), ('hidden3', layers.DenseLayer),
                ('output', layers.DenseLayer)],

        # Input Layer
        input_shape=(None, pipe.features.shape[1]),

        # Hidden Layer 1
        hidden1_num_units=512,
        hidden1_nonlinearity=rectify,

        # Hidden Layer 2
        hidden2_num_units=512,
        hidden2_nonlinearity=rectify,

        # # Hidden Layer 3
        hidden3_num_units=512,
        hidden3_nonlinearity=rectify,

        # Output Layer
        output_num_units=2,
        output_nonlinearity=softmax,

        # Optimization
        update=nesterov_momentum,
        update_learning_rate=0.001,
        update_momentum=0.3,
        max_epochs=30,

        # Others,
        regression=False,
        verbose=1,
    )

    nnet.fit(X_train, y_train)
    y_predict = nnet.predict(X_test)

    print "precision for nnet:", precision_score(y_test, y_predict)
    print "recall for nnet:", recall_score(y_test, y_predict)
    print "f1 for nnet:", f1_score(y_test, y_predict, average='weighted')
    pickle.dump(nnet,
                open("model.pkl", "wb"),
                protocol=cPickle.HIGHEST_PROTOCOL)
def nn_example(data):
    net1 = NeuralNet(
        layers=[
            ('input', layers.InputLayer),
            ('hidden', layers.DenseLayer),
            ('output', layers.DenseLayer),
        ],
        # layer parameters:
        input_shape=(None, 28 * 28),
        hidden_num_units=100,  # number of units in 'hidden' layer
        output_nonlinearity=lasagne.nonlinearities.softmax,
        output_num_units=10,  # 10 target values for the digits 0, 1, 2, ..., 9

        # optimization method:
        update=nesterov_momentum,
        update_learning_rate=0.01,
        update_momentum=0.9,
        max_epochs=30,
        verbose=1,
    )

    # Train the network
    net1.fit(numpy.array(data['X_train']), numpy.array(data['y_train']))

    # Try the network on new data
    # print("Feature vector (100-110): %s" % data['X_test'][0][100:110])
    print("Actual Label: %s" % str(data['y_test'][9000]))
    print("Predicted: %s" % str(net1.predict([data['X_test'][9000]])))

    preds = net1.predict(data['X_test'])

    cm = confusion_matrix(data['y_test'], preds)
    plt.matshow(cm)
    plt.title('Confusion matrix')
    plt.colorbar()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')
    plt.show()
Пример #24
0
class network(object):
    def __init__(self,X_train, Y_train):
        #self.__hidden=0

        self.__hidden=int(math.ceil((2*(X_train.shape[1]+ 1))/3))
        self.net= NeuralNet(
            layers=[
                ('input', layers.InputLayer),
                ('hidden', layers.DenseLayer),
                ('output', layers.DenseLayer)
            ],
            input_shape=( None, X_train.shape[1] ),
            hidden_num_units=self.__hidden,
            #hidden_nonlinearity=nonlinearities.tanh,
            output_nonlinearity=None,
            batch_iterator_train=BatchIterator(batch_size=256),
            output_num_units=1,

            on_epoch_finished=[EarlyStopping(patience=50)],
            update=momentum,
            update_learning_rate=theano.shared(np.float32(0.03)),
            update_momentum=theano.shared(np.float32(0.8)),
            regression=True,
            max_epochs=1000,
            verbose=1,
        )

        self.net.fit(X_train,Y_train)

    def predict(self,X):
        return self.net.predict(X)

    def showMetrics(self):
        train_loss = np.array([i["train_loss"] for i in self.net.train_history_])
        valid_loss = np.array([i["valid_loss"] for i in self.net.train_history_])
        pyplot.plot(train_loss, linewidth=3, label="training")
        pyplot.plot(valid_loss, linewidth=3, label="validation")
        pyplot.grid()
        pyplot.legend()
        pyplot.xlabel("epoch")
        pyplot.ylabel("loss")
        # pyplot.ylim(1e-3, 1e-2)
        pyplot.yscale("log")
        pyplot.show()

    def saveNet(self,fname):
        self.net.save_params_to(fname)

    def loadNet(self,fname):
        self.net.load_params_from(fname)
Пример #25
0
def nnet(pipe):
	pipe.features = pipe.features.astype(np.float32)
	pipe.labels = pipe.labels.astype(np.int32)
	pipe.features = StandardScaler().fit_transform(pipe.features)
	X_train, X_test, y_train, y_test = train_test_split(pipe.features, pipe.labels)
	nnet = NeuralNet(
	          # Specify the layers
	          layers=[('input', layers.InputLayer),
	                  ('hidden1', layers.DenseLayer),
	                  ('hidden2', layers.DenseLayer),
	                  ('hidden3', layers.DenseLayer),
	                  ('output', layers.DenseLayer)],

	          # Input Layer
	          input_shape=(None, pipe.features.shape[1]),

	          # Hidden Layer 1
	          hidden1_num_units=512,
	          hidden1_nonlinearity=rectify,

	          # Hidden Layer 2
	          hidden2_num_units=512,
	          hidden2_nonlinearity=rectify,

	          # # Hidden Layer 3
	          hidden3_num_units=512,
	          hidden3_nonlinearity=rectify,

	          # Output Layer
	          output_num_units=2,
	          output_nonlinearity=softmax,

	          # Optimization
	          update=nesterov_momentum,
	          update_learning_rate=0.001,
	          update_momentum=0.3,
	          max_epochs=30,

	          # Others,
	          regression=False,
	          verbose=1,
	   		)
	         
	nnet.fit(X_train, y_train)
	y_predict = nnet.predict(X_test)

	print "precision for nnet:", precision_score(y_test, y_predict)
	print "recall for nnet:", recall_score(y_test, y_predict)
	print "f1 for nnet:", f1_score(y_test, y_predict, average='weighted')
	pickle.dump( nnet, open( "model.pkl", "wb" ), protocol = cPickle.HIGHEST_PROTOCOL)
Пример #26
0
class network(object):
    def __init__(self, X_train, Y_train):
        #self.__hidden=0

        self.__hidden = int(math.ceil((2 * (X_train.shape[1] + 1)) / 3))
        self.net = NeuralNet(
            layers=[('input', layers.InputLayer),
                    ('hidden', layers.DenseLayer),
                    ('output', layers.DenseLayer)],
            input_shape=(None, X_train.shape[1]),
            hidden_num_units=self.__hidden,
            #hidden_nonlinearity=nonlinearities.tanh,
            output_nonlinearity=None,
            batch_iterator_train=BatchIterator(batch_size=256),
            output_num_units=1,
            on_epoch_finished=[EarlyStopping(patience=50)],
            update=momentum,
            update_learning_rate=theano.shared(np.float32(0.03)),
            update_momentum=theano.shared(np.float32(0.8)),
            regression=True,
            max_epochs=1000,
            verbose=1,
        )

        self.net.fit(X_train, Y_train)

    def predict(self, X):
        return self.net.predict(X)

    def showMetrics(self):
        train_loss = np.array(
            [i["train_loss"] for i in self.net.train_history_])
        valid_loss = np.array(
            [i["valid_loss"] for i in self.net.train_history_])
        pyplot.plot(train_loss, linewidth=3, label="training")
        pyplot.plot(valid_loss, linewidth=3, label="validation")
        pyplot.grid()
        pyplot.legend()
        pyplot.xlabel("epoch")
        pyplot.ylabel("loss")
        # pyplot.ylim(1e-3, 1e-2)
        pyplot.yscale("log")
        pyplot.show()

    def saveNet(self, fname):
        self.net.save_params_to(fname)

    def loadNet(self, fname):
        self.net.load_params_from(fname)
Пример #27
0
def main():
    xtrain, ytrain, xval, yval, xtest, ytest = loaddata()

    # <codecell>
    conv_filters = 32
    deconv_filters = 32
    filter_sizes = 7
    epochs = 20
    encode_size = 40
    ae = NeuralNet(
        layers=[
            ('input', layers.InputLayer),
            ('conv', layers.Conv2DLayer),
            ('pool', layers.MaxPool2DLayer),
            ('flatten', ReshapeLayer),  # output_dense
            ('encode_layer', layers.DenseLayer),
            ('hidden', layers.DenseLayer),  # output_dense
            ('unflatten', ReshapeLayer),
            ('unpool', Unpool2DLayer),
            ('deconv', layers.Conv2DLayer),
            ('output_layer', ReshapeLayer),
            ],
        input_shape=(None, 1, 80, 80),
        conv_num_filters=conv_filters,
        conv_filter_size=(filter_sizes, filter_sizes),
        conv_nonlinearity=None,
        pool_pool_size=(2, 2),
        flatten_shape=(([0], -1)), # not sure if necessary?
        encode_layer_num_units=encode_size,
        hidden_num_units=deconv_filters * (28 + filter_sizes - 1) ** 2 / 4,
        unflatten_shape=(([0], deconv_filters, (28 + filter_sizes - 1) / 2, (28 + filter_sizes - 1) / 2 )),
        unpool_ds=(2, 2),
        deconv_num_filters=1,
        deconv_filter_size=(filter_sizes, filter_sizes),
        # deconv_border_mode="valid",
        deconv_nonlinearity=None,
        output_layer_shape=(([0],-1)),
        update_learning_rate=0.01,
        update_momentum=0.975,
        batch_iterator_train=FlipBatchIterator(batch_size=128),
        regression=True,
        max_epochs=epochs,
        verbose=1,
        )
    ae.fit(xtrain, ytrain)

    X_train_pred = ae.predict(xtrain).reshape(-1, 80, 80)
def regressNN(X,y):
	layers_all = [('input',InputLayer),
				   ('dense',DenseLayer),
				   	('output',DenseLayer)]
	np.random.shuffle(X)

	print(X.shape,y.shape)
	#net.fit(X,y)
	folds=3
	skf = KFold( X.shape[0], n_folds=folds)
	for train_index,test_index in skf:
		net = NeuralNet(layers = layers_all,
 					 input_shape = (None,X.shape[1]),
					 dense_num_units=2,
					 dense_nonlinearity=None,
					 regression=True,
					 update_momentum=0.9,
					 update_learning_rate=0.001,
	 				 output_nonlinearity=None,
 					 output_num_units=1,
 					 max_epochs=100)
		Xtrain,Xtest = X[train_index], X[test_index]
		ytrain,ytest = y[train_index], y[test_index]
		
		Xtrain = np.array(Xtrain,dtype='float64')
		Xtest = np.array(Xtest,dtype='float64')
		#Xtrain[np.isinf(Xtrain)] = 0
		net.fit(Xtrain,ytrain)


		error=0
		errorList =[]
		predictions= []
		for i in range(0,Xtest.shape[0]):
			a= np.transpose(Xtest[i,:].reshape(Xtest[i,:].shape[0],1))
			
			pr = net.predict(a)
			temp_err=np.absolute(pr-ytest[i])*60
			errorList.append(temp_err)	
			predictions.append(pr)
			error += temp_err

		print('Average error in minutes: {0}'.format(error/Xtest.shape[0]))
		print('Max/min/median error: {0} , {1} , {2}'.format(max(errorList),min(errorList),np.median(errorList)))
		del errorList[:]
		del predictions[:]
def regressNN(X, y):
    layers_all = [('input', InputLayer), ('dense', DenseLayer),
                  ('output', DenseLayer)]
    np.random.shuffle(X)

    print(X.shape, y.shape)
    #net.fit(X,y)
    folds = 3
    skf = KFold(X.shape[0], n_folds=folds)
    for train_index, test_index in skf:
        net = NeuralNet(layers=layers_all,
                        input_shape=(None, X.shape[1]),
                        dense_num_units=2,
                        dense_nonlinearity=None,
                        regression=True,
                        update_momentum=0.9,
                        update_learning_rate=0.001,
                        output_nonlinearity=None,
                        output_num_units=1,
                        max_epochs=100)
        Xtrain, Xtest = X[train_index], X[test_index]
        ytrain, ytest = y[train_index], y[test_index]

        Xtrain = np.array(Xtrain, dtype='float64')
        Xtest = np.array(Xtest, dtype='float64')
        #Xtrain[np.isinf(Xtrain)] = 0
        net.fit(Xtrain, ytrain)

        error = 0
        errorList = []
        predictions = []
        for i in range(0, Xtest.shape[0]):
            a = np.transpose(Xtest[i, :].reshape(Xtest[i, :].shape[0], 1))

            pr = net.predict(a)
            temp_err = np.absolute(pr - ytest[i]) * 60
            errorList.append(temp_err)
            predictions.append(pr)
            error += temp_err

        print('Average error in minutes: {0}'.format(error / Xtest.shape[0]))
        print('Max/min/median error: {0} , {1} , {2}'.format(
            max(errorList), min(errorList), np.median(errorList)))
        del errorList[:]
        del predictions[:]
Пример #30
0
def lasagne_model(train, y_train, test):
    layers = [('input', InputLayer),
            ('dense0', DenseLayer),
            ('dropout0', DropoutLayer),
            ('dense1', DenseLayer),
            ('dropout1', DropoutLayer),
            ('dense2', DenseLayer),
            ('dropout2', DropoutLayer),
            ('output', DenseLayer)]

    num_features = len(train[0])
    num_classes = 1

    model = NeuralNet(layers=layers,
            input_shape=(None, num_features),
            objective_loss_function=squared_error,
            dense0_num_units=6,
            dropout0_p=0.4, #0.1,
            dense1_num_units=4,
            dropout1_p=0.4, #0.1,
            dense2_num_units=2,
            dropout2_p=0.4, #0.1,
            output_num_units=num_classes,
            output_nonlinearity=tanh,
            regression=True,
            update=nesterov_momentum, #adagrad,
            update_momentum=0.9,
            update_learning_rate=0.004,
            eval_size=0.2,
            verbose=1,
            max_epochs=5) #15)

    x_train = np.array(train).astype(np.float32)
    x_test = np.array(test).astype(np.float32)

    model.fit(x_train, y_train)
    pred_val = model.predict(x_test)
    print pred_val.shape
    test_probs = np.array(pred_val).reshape(len(pred_val),)
    print test_probs.shape

    indices = test_probs < 0
    test_probs[indices] = 0
    return test_probs
Пример #31
0
def regr(X, y, X_valid, y_valid):
    l = InputLayer(shape=(None, X.shape[1]))
    l = DenseLayer(l, num_units=100, nonlinearity=softmax)
    # l = DenseLayer(l, num_units=40, nonlinearity=softmax)
    l = DenseLayer(l, num_units=len(np.unique(y)), nonlinearity=softmax)
    net = NeuralNet(l,
                    update=adam,
                    update_learning_rate=0.01,
                    max_epochs=2000,
                    objective_loss_function=squared_error,
                    regression=True)
    net.fit(X, y)
    print(net.score(X, y))
    y_pred = net.predict(X_valid)
    print(y_valid)
    print(y_pred)
    plot_loss(net)
    plt.title('Digits')
    plt.show()
Пример #32
0
def fit_model(reshaped_train_x, y, image_width, image_height, reshaped_test_x):
    net = NeuralNet(
        layers=[
            ('input', layers.InputLayer),
            ('conv1', layers.Conv2DLayer),
            ('pool1', layers.MaxPool2DLayer),
            ('dropout1', layers.DropoutLayer),
            ('conv2', layers.Conv2DLayer),
            ('pool2', layers.MaxPool2DLayer),
            ('dropout2', layers.DropoutLayer),
            ('conv3', layers.Conv2DLayer),
            ('hidden4', layers.DenseLayer),
            ('output', layers.DenseLayer),
        ],
        input_shape=(None, 1, 32, 32),
        conv1_num_filters=32,
        conv1_filter_size=(5, 5),
        pool1_pool_size=(2, 2),
        dropout1_p=0.2,
        conv2_num_filters=64,
        conv2_filter_size=(5, 5),
        pool2_pool_size=(2, 2),
        dropout2_p=0.2,
        conv3_num_filters=128,
        conv3_filter_size=(5, 5),
        hidden4_num_units=500,
        output_num_units=62,
        output_nonlinearity=softmax,
        update_learning_rate=0.01,
        update_momentum=0.9,
        batch_iterator_train=BatchIterator(batch_size=100),
        batch_iterator_test=BatchIterator(batch_size=100),
        use_label_encoder=True,
        regression=False,
        max_epochs=100,
        verbose=1,
    )

    net.fit(reshaped_train_x, y)
    prediction = net.predict(reshaped_test_x)

    return prediction
Пример #33
0
class CNN(object):
	__metaclass__ = Singleton
	channels = 3
	image_size = [64,64]
	layers = [ 
		# layer dealing with the input data
		(InputLayer, {'shape': (None, channels, image_size[0], image_size[1])}),
		# first stage of our convolutional layers 
		(Conv2DLayer, {'num_filters': 32, 'filter_size': 9}),
		(Conv2DLayer, {'num_filters': 32, 'filter_size': 5}),
		(MaxPool2DLayer, {'pool_size': 2}),
		# second stage of our convolutional layers
		(Conv2DLayer, {'num_filters': 32, 'filter_size': 5}),
		(Conv2DLayer, {'num_filters': 32, 'filter_size': 3}),
		(MaxPool2DLayer, {'pool_size': 2}),
		# two dense layers with dropout
		(DenseLayer, {'num_units': 256}),
		(DropoutLayer, {}),
		(DenseLayer, {'num_units': 256}),
		# the output layer
		(DenseLayer, {'num_units': 2, 'nonlinearity': softmax}),
	]
	def __init__(self):
		logger = logging.getLogger(__name__)
		logger.info("Initializing neural net...")
		self.net = NeuralNet(layers=self.layers, update_learning_rate=0.0002 )
		self.net.load_params_from("conv_params")
		logger.info("Finished loading parameters")
	
	def resize(self, infile):
		try:
			im = Image.open(infile)
			resized_im = np.array(ImageOps.fit(im, (self.image_size[0], self.image_size[1]), Image.ANTIALIAS), dtype=np.uint8)
			rgb = np.array([resized_im[:,:,0], resized_im[:,:,1], resized_im[:,:,2]])
			return rgb.reshape(1,self.channels,self.image_size[0],self.image_size[1])

		except IOError:
			return "cannot create thumbnail for '%s'" % infile

	def predict(self, X):
		p**n = self.net.predict(X)[0] == 1
		return "true" if p**n else "false"
Пример #34
0
def find_digits(X, y, X_valid, y_valid):
    max_hidden_layers = 4
    max_neuron_units = 110

    loss = []
    kf = KFold(n_splits=5)
    for i in range(1, max_hidden_layers):
        for j in range((64 + 10) // 2 // i, max_neuron_units // i, 10 // i):
            print('=' * 40)
            print('%s hidden layers' % i)
            print('%s neurons' % j)
            print('=' * 40)
            l = InputLayer(shape=(None, X.shape[1]))
            for k in range(i):
                l = DenseLayer(l, num_units=j, nonlinearity=softmax)
            l = DenseLayer(l,
                           num_units=len(np.unique(y)),
                           nonlinearity=softmax)
            net = NeuralNet(l,
                            update=adam,
                            update_learning_rate=0.01,
                            max_epochs=500)

            k_loss = []
            y_data = np.array([y]).transpose()
            data = np.concatenate((X, y_data), axis=1)
            for train_index, test_index in kf.split(data):
                X_train, X_test = X[train_index], X[test_index]
                y_train, y_test = y[train_index], y[test_index]

                net.fit(X_train, y_train)
                y_pred = net.predict(X_test)
                loss_error = mean_squared_error(y_test, y_pred)
                k_loss.append(loss_error)
                print(loss_error)

            loss_net = (i, j, np.array(k_loss).mean())
            print(loss_net)
            loss.append(loss_net)
            print('=' * 40)

    print(min(loss, key=lambda x: x[2]))
    def classifyNN_nolearn(self):

        utils.mkdir_p(self.outDir)
        self.readDataset()
        nn = NeuralNet(
            layers=[  # network
                ('input', InputLayer),
                ('fc1', DenseLayer), ('fc2', DenseLayer), ('fc3', DenseLayer),
                ('fc4', DenseLayer), ('fc5', DenseLayer), ('fc6', DenseLayer),
                ('output', DenseLayer)
            ],
            # layer params
            input_shape=(None, self.X_train.shape[1]),
            fc1_num_units=108,
            fc2_num_units=216,
            fc3_num_units=432,
            fc4_num_units=864,
            fc5_num_units=1728,
            fc6_num_units=3456,
            output_num_units=7,
            # non-linearities
            fc1_nonlinearity=nl.tanh,
            fc2_nonlinearity=nl.tanh,
            fc3_nonlinearity=nl.tanh,
            fc4_nonlinearity=nl.tanh,
            fc5_nonlinearity=nl.tanh,
            fc6_nonlinearity=nl.tanh,
            output_nonlinearity=nl.softmax,
            # update params
            update=upd.momentum,
            update_learning_rate=0.01,
            update_momentum=0.9,
            train_split=TrainSplit(eval_size=0.2),
            verbose=1,
            max_epochs=5000)

        nn.fit(self.X_train.astype(np.float32),
               self.y_train.astype(np.int32) - 1)
        print(
            'Prediction.....................................................')
        y_test = nn.predict(self.X_test.astype(np.float32))
        self.save_sub(self.outDir, y_test + 1)
Пример #36
0
def operate(data):
    # data = [0.0592330098, 0.140761971, 0.0757750273, 0.119381011, 0.0651519895, 0.120247006, 0.0454769731]

    batch_size = len(data)

    thres = 0.4

    net = NeuralNet(
        layers=[('input', layers.InputLayer),
                ('hidden', layers.DenseLayer),
                ('output', layers.DenseLayer),
                ],
        # layer parameters:
        input_shape=(None, batch_size),
        hidden_num_units=batch_size,  # number of units in 'hidden' layer
        hidden_nonlinearity=lasagne.nonlinearities.sigmoid,
        output_nonlinearity=lasagne.nonlinearities.elu,
        output_num_units=batch_size,  # 10 target values for the digits 0, 1, 2, ..., 9

        # optimization method:
        update=nesterov_momentum,
        update_learning_rate=0.1,
        update_momentum=0.9,

        max_epochs=2000,
        verbose=1,

        regression=True,
        objective_loss_function=lasagne.objectives.squared_error
        # custom_score=("validation score", lambda x, y: np.mean(np.abs(x - y)))
    )
    net.load_params_from("/home/loringit/Bulat/neuron/bulik_nn")

    net_answer = net.predict([data])
    result = np.linalg.norm(data - net_answer)
    # return result < thres
    if result < thres:
        return "true"
    else:
        return "false"
Пример #37
0
def classify(X, y, X_test, y_test):
    layers0 = [('input', InputLayer),
           ('dense0', DenseLayer),
           ('dropout0', DropoutLayer),  
           ('dense1', DenseLayer),
           ('dropout1', DropoutLayer),  
           ('output', DenseLayer)]
               
    net = NeuralNet(layers=layers0,
                     input_shape=(None, X.shape[1]),
                     dense0_num_units=300,
                     dropout0_p=0.075,
                     dropout1_p=0.1,
                     dense1_num_units=750,
                     output_num_units=3,
                     output_nonlinearity=softmax,
                     update=nesterov_momentum,
                     update_learning_rate=0.001,
                     update_momentum=0.99,
                 
                     eval_size=0.2,
                     verbose=1,
                     max_epochs=15)

    net.fit(X, y)
    print(net.score(X, y))
    
    preds = net.predict(X_test)
    print(classification_report(y_test, preds))
    cm = confusion_matrix(y_test, preds)
    plt.matshow(cm)
    plt.title('Confusion matrix')
    plt.colorbar()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')
    plt.savefig('confmatrix.png')
    plt.show()
    
    print(cm)
Пример #38
0
def train_nolearn_model(X, y):
    '''
        NeuralNet with nolearn
    '''
    X = X.astype(np.float32)
    y = y.astype(np.int32)

    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.2,
                                                        random_state=5)
    X_train, X_test = impute_nan(X_train, X_test)
    X_train, X_test = normalize_features(X_train, X_test)

    lays = [
        ('input', layers.InputLayer),
        ('hidden', layers.DenseLayer),
        ('output', layers.DenseLayer),
    ]

    net = NeuralNet(
        layers=lays,
        input_shape=(None, 23),
        hidden_num_units=10,
        objective_loss_function=lasagne.objectives.categorical_crossentropy,
        output_nonlinearity=lasagne.nonlinearities.sigmoid,
        output_num_units=10,
        update=nesterov_momentum,
        update_learning_rate=0.001,
        update_momentum=0.9,
        max_epochs=10,
        verbose=1,
    )
    #net.fit(X_train, y_train)
    #predicted = net.predict(X_test)
    test_score = net.predict(X_test, y_test)
    train_score = net.score(X_train, y_train)
    return train_score, test_score
Пример #39
0
def test_lasagne_functional_regression(boston):
    from nolearn.lasagne import NeuralNet

    X, y = boston

    nn = NeuralNet(
        layers=[
            ('input', InputLayer),
            ('hidden1', DenseLayer),
            ('output', DenseLayer),
        ],
        input_shape=(128, 13),
        hidden1_num_units=100,
        output_nonlinearity=identity,
        output_num_units=1,
        update_learning_rate=0.01,
        update_momentum=0.1,
        regression=True,
        max_epochs=50,
    )

    nn.fit(X[:300], y[:300])
    assert mean_absolute_error(nn.predict(X[300:]), y[300:]) < 3.0
Пример #40
0
def train_nolearn_model(X, y):
    '''
        NeuralNet with nolearn
    '''
    X = X.astype(np.float32)
    y = y.astype(np.int32)

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 5)
    X_train, X_test = impute_nan(X_train, X_test)
    X_train, X_test = normalize_features(X_train, X_test)

    lays = [('input', layers.InputLayer),
              ('hidden', layers.DenseLayer),
              ('output', layers.DenseLayer),
             ]

    net = NeuralNet(
        layers = lays,
        input_shape=(None, 23),
        hidden_num_units=10,
        objective_loss_function=lasagne.objectives.categorical_crossentropy,
        output_nonlinearity=lasagne.nonlinearities.sigmoid,
        output_num_units=10,


        update = nesterov_momentum,
        update_learning_rate= 0.001,
        update_momentum=0.9,

        max_epochs=10,
        verbose=1,
        )
    #net.fit(X_train, y_train)
    #predicted = net.predict(X_test)
    test_score = net.predict(X_test, y_test)
    train_score = net.score(X_train, y_train)
    return train_score, test_score
Пример #41
0
def test_lasagne_functional_regression(boston):
    from nolearn.lasagne import NeuralNet

    X, y = boston

    nn = NeuralNet(
        layers=[
            ('input', InputLayer),
            ('hidden1', DenseLayer),
            ('output', DenseLayer),
            ],
        input_shape=(128, 13),
        hidden1_num_units=100,
        output_nonlinearity=identity,
        output_num_units=1,

        update_learning_rate=0.01,
        update_momentum=0.1,
        regression=True,
        max_epochs=50,
        )

    nn.fit(X[:300], y[:300])
    assert mean_absolute_error(nn.predict(X[300:]), y[300:]) < 3.0
Пример #42
0
        ('output', layers.DenseLayer),
    ],
    input_shape=(None, 1, 28, 28),
    conv2d1_num_filters=32,
    conv2d1_filter_size=(5, 5),
    conv2d1_nonlinearity=lasagne.nonlinearities.rectify,
    conv2d1_W=lasagne.init.GlorotUniform(),
    maxpool1_pool_size=(2, 2),
    conv2d2_num_filters=32,
    conv2d2_filter_size=(5, 5),
    conv2d2_nonlinearity=lasagne.nonlinearities.rectify,
    maxpool2_pool_size=(2, 2),
    dropout1_p=0.5,
    dense_num_units=256,
    dense_nonlinearity=lasagne.nonlinearities.rectify,
    dropout2_p=0.5,
    output_nonlinearity=lasagne.nonlinearities.softmax,
    output_num_units=10,
    update=nesterov_momentum,
    update_learning_rate=0.01,
    update_momentum=0.9,
    max_epochs=10,
    verbose=1,
)

nn = CNN.fit(X_train, y_train)

prediction = CNN.predict(X_test)

visualize.plot_conv_weights(CNN.layers_['conv2d1'])
Пример #43
0
X, y = load()
net1.fit(X, y)


with open('net1.pickle', 'wb') as f:
    pickle.dump(net1, f, -1)
'''
net1 = pickle.load( open( "net1.pickle", "rb" ) )

def plot_sample(x, y, axis):
    img = x.reshape(96, 96)
    axis.imshow(img, cmap='gray')
    axis.scatter(y[0::2] * 48 + 48, y[1::2] * 48 + 48, marker='x', s=10)
'''
X, _ = load(test=True)
y_pred = net1.predict(X)
'''
X = np.vstack([img])

y_pred = net1.predict(X)

fig = pyplot.figure(figsize=(6, 6))
fig.subplots_adjust(
    left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)

for i in range(1):
    ax = fig.add_subplot(4, 4, i + 1, xticks=[], yticks=[])
    plot_sample(X[i], y_pred[i], ax)

pyplot.show()
Пример #44
0
    y_tensor_type = T.imatrix,                   
    objective_loss_function = binary_crossentropy,
    #batch_iterator_train = BatchIterator(batch_size = 256),
    max_epochs=40, 
    eval_size=0.1,
    #train_split =0.0,
    verbose=2,
    )


seednumber=int(num)
np.random.seed(seednumber)
net.fit(train, labels)


preds = net.predict(test)[:,0] 


submission = pd.read_csv('../cv/good/xgb4.csv')
submission["PredictedProb"] = preds
submission.to_csv('nn2cv/nn2cv_%s.csv'%num, index=False)
score=str(llfun(submission['real'],submission["PredictedProb"]))
print 'score',score
"""
import subprocess
cmd='cp nn2cv/nn2cv_0.csv tmp/nn2cv%s.csv'%score
subprocess.call(cmd,shell=True)
cmd='cp nn2cv.py tmp/nn2cv%s.py'%score
subprocess.call(cmd,shell=True)
"""
Пример #45
0
    conv2d1_W=lasagne.init.GlorotUniform(),  
    # layer maxpool1
    maxpool1_pool_size=(2, 2),    
    # layer conv2d2
    conv2d2_num_filters=100,
    conv2d2_filter_size=(5, 5),
    conv2d2_nonlinearity=lasagne.nonlinearities.rectify,
    # layer maxpool2
    maxpool2_pool_size=(2, 2),
    # dropout1
    dropout1_p=0.5,    
    # dense
    dense_num_units=256,
    dense_nonlinearity=lasagne.nonlinearities.rectify,    
    # dropout2
    dropout2_p=0.5,    
    # output
    output_nonlinearity=lasagne.nonlinearities.softmax,
    output_num_units=10,
    # optimization method params
    update=nesterov_momentum,
    update_learning_rate=0.01,
    update_momentum=0.9,
    max_epochs=10,
    verbose=1,
    )

# Train the network
nn = net1.fit(X_train, y_train)
print net1.predict(X_train)
Пример #46
0
  dropout_p=0.5,
  output_num_units=num_classes, output_nonlinearity=lasagne.nonlinearities.softmax,
  output_W = GlorotUniform(gain = 1.0),

  # ----------------------- ConvNet Params -------------------------------------------
  update = nesterov_momentum,
  update_learning_rate = learning_rate,
  update_momentum = momentum,
  max_epochs = num_epochs,
  verbose = 1,

)

tic = time.time()
for i in range(12):
  convNet.fit(dataset['X_train'], dataset['Y_train'])
  fl = './model1/saved_model_data' + str(i+1) + '.npz'
  convNet.save_weights_to(fl)
  print 'Model saved to file :- ', fl
toc = time.time()

fl = './model1/saved_model_data' + str(6) + '.npz'
convNet.load_weights_from(fl)
y_pred = convNet.predict(dataset['X_test'])
print classification_report(Y_test, y_pred)
print accuracy_score(Y_test, y_pred)
print 'Time taken to train the data :- ', toc-tic, 'seconds'


with open('models/net1.pickle', 'wb') as f:
  pickle.dump(net1, f, -1)


#save plot to file

try:
  os.mkdir('plots')
except:
  pass

train_loss = np.array([i["train_loss"] for i in net1.train_history_])
valid_loss = np.array([i["valid_loss"] for i in net1.train_history_])

plot(train_loss, linewidth=3, label='train')
plot(valid_loss, linewidth=3, label='valid')
yscale("log")
legend()
savefig('plots/{method}.png'.format(method=method))

#predicting
predictions = net1.predict(X_test_reshaped)

try:
  os.mkdir('predictions')
except:
  pass

pd.DataFrame({"ImageId": range(1, len(predictions) + 1), "Label": predictions}).to_csv('predictions/' + method +'.csv',
                                                                                       index=False,
                                                                                       header=True)
Пример #48
0
    update_learning_rate=0.01,
    update_momentum=0.9,

    regression=True,
    max_epochs=1000,
    verbose=1,
    )

X, y = load2d()
net.fit(X, y)
with open('netvol.pickle', 'wb') as f:
    pickle.dump(net, f, -1)


X,_ = load2d(test=True)
y_pred2 = net.predict(X)


fig = pyplot.figure(figsize=(6, 6))
fig.subplots_adjust(
    left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
'''
for i in range(16):
    ax = fig.add_subplot(4, 4, i + 1, xticks=[], yticks=[])
    plot_sample(X[i], y_pred2[i], ax)
    fig.savefig(str(i + 2000) + '.jpg')
'''


train_loss = np.array([i["train_loss"] for i in net.train_history_])
valid_loss = np.array([i["valid_loss"] for i in net.train_history_])
Пример #49
0
class NNet(BaseEstimator, ClassifierMixin):
    def __init__(
        self,
        name='nameless_net',  # used for saving, so maybe make it unique
        dense1_size=60,
        dense1_nonlinearity='tanh',
        dense1_init='orthogonal',
        dense2_size=None,
        dense2_nonlinearity=None,  # inherits dense1
        dense2_init=None,  # inherits dense1
        dense3_size=None,
        dense3_nonlinearity=None,  # inherits dense2
        dense3_init=None,  # inherits dense2
        learning_rate=0.001,
        learning_rate_scaling=100,
        momentum=0.9,
        momentum_scaling=100,
        max_epochs=3000,
        epoch_steps=None,
        dropout0_rate=0,  # this is the input layer
        dropout1_rate=None,
        dropout2_rate=None,  # inherits dropout1_rate
        dropout3_rate=None,  # inherits dropout2_rate
        weight_decay=0,
        adaptive_weight_decay=False,
        batch_size=128,
        output_nonlinearity='softmax',
        auto_stopping=True,
        save_snapshots_stepsize=None,
    ):
        """
			Create the network with the selected parameters.

			:param name: Name for save files
			:param dense1_size: Number of neurons for first hidden layer
			:param dense1_nonlinearity: The activation function for the first hidden layer
			:param dense1_init: The weight initialization for the first hidden layer
			:param learning_rate: The (initial) learning rate (how fast the network learns)
			:param learning_rate_scaling: The total factor to gradually decrease the learning rate by
			:param momentum: The (initial) momentum
			:param momentum_scaling: Similar to learning_rate_scaling
			:param max_epochs: Total number of epochs (at most)
			:param dropout1_rate: Percentage of connections dropped each step for first hidden layer
			:param weight_decay: Palatalizes the weights by L2 norm (regularizes but decreases results)
			:param adaptive_weight_decay: Should the weight decay adapt automatically?
			:param batch_size: How many samples to send through the network at a time
			:param auto_stopping: Stop early if the network seems to stop performing well
			:param pretrain: Filepath of the previous weights to start at (or None)
			:return:
		"""
        """
			Input argument storage: automatically store all locals, which should be exactly the arguments at this point, but storing a little too much is not a big problem.
		"""
        params = locals()
        del params['self']
        #self.__dict__.update(params)
        self.parameter_names = sorted(params.keys())
        """
			Check the parameters and update some defaults (will be done for 'self', no need to store again).
		"""
        self.set_params(**params)

    def init_net(self,
                 feature_count,
                 class_count=NCLASSES,
                 verbosity=VERBOSITY >= 2):
        """
			Initialize the network (needs to be done when data is available in order to set dimensions).
		"""
        if VERBOSITY >= 1:
            print 'initializing network {0:s} {1:d}x{2:d}x{3:d}'.format(
                self.name, self.dense1_size or 0, self.dense2_size or 0,
                self.dense3_size or 0)
            if VERBOSITY >= 2:
                print 'parameters: ' + ', '.join(
                    '{0:s} = {1:}'.format(k, v)
                    for k, v in self.get_params(deep=False).items())
        self.feature_count = feature_count
        self.class_count = class_count
        """
			Create the layers and their settings.
		"""
        self.layers = [
            ('input', InputLayer),
        ]
        self.params = {
            'dense1_num_units': self.dense1_size,
            'dense1_nonlinearity': nonlinearities[self.dense1_nonlinearity],
            'dense1_W': initializers[self.dense1_init],
            'dense1_b': Constant(0.),
        }
        if self.dropout0_rate:
            self.layers += [('dropout0', DropoutLayer)]
            self.params['dropout0_p'] = self.dropout0_rate
        self.layers += [
            ('dense1', DenseLayer),
        ]
        if self.dropout1_rate:
            self.layers += [('dropout1', DropoutLayer)]
            self.params['dropout1_p'] = self.dropout1_rate
        if self.dense2_size:
            self.layers += [('dense2', DenseLayer)]
            self.params.update({
                'dense2_num_units':
                self.dense2_size,
                'dense2_nonlinearity':
                nonlinearities[self.dense2_nonlinearity],
                'dense2_W':
                initializers[self.dense2_init],
                'dense2_b':
                Constant(0.),
            })
        else:
            assert not self.dense3_size, 'There cannot be a third dense layer without a second one'
        if self.dropout2_rate:
            assert self.dense2_size is not None, 'There cannot be a second dropout layer without a second dense layer.'
            self.layers += [('dropout2', DropoutLayer)]
            self.params['dropout2_p'] = self.dropout2_rate
        if self.dense3_size:
            self.layers += [('dense3', DenseLayer)]
            self.params.update({
                'dense3_num_units':
                self.dense3_size,
                'dense3_nonlinearity':
                nonlinearities[self.dense3_nonlinearity],
                'dense3_W':
                initializers[self.dense3_init],
                'dense3_b':
                Constant(0.),
            })
        if self.dropout3_rate:
            assert self.dense2_size is not None, 'There cannot be a third dropout layer without a third dense layer.'
            self.layers += [('dropout3', DropoutLayer)]
            self.params['dropout3_p'] = self.dropout2_rate
        self.layers += [('output', DenseLayer)]
        self.params.update({
            'output_nonlinearity':
            nonlinearities[self.output_nonlinearity],
            'output_W':
            GlorotUniform(),
            'output_b':
            Constant(0.),
        })
        """
			Create meta parameters and special handlers.
		"""
        if VERBOSITY >= 3:
            print 'learning rate: {0:.6f} -> {1:.6f}'.format(
                abs(self.learning_rate),
                abs(self.learning_rate) / float(self.learning_rate_scaling))
            print 'momentum:      {0:.6f} -> {1:.6f}'.format(
                abs(self.momentum),
                1 - ((1 - abs(self.momentum)) / float(self.momentum_scaling)))
        self.step_handlers = [
            LinearVariable('update_learning_rate',
                           start=abs(self.learning_rate),
                           stop=abs(self.learning_rate) /
                           float(self.learning_rate_scaling)),
            LinearVariable(
                'update_momentum',
                start=abs(self.momentum),
                stop=1 -
                ((1 - abs(self.momentum)) / float(self.momentum_scaling))),
            StopNaN(),
        ]
        self.end_handlers = [
            SnapshotEndSaver(base_name=self.name),
            TrainProgressPlotter(base_name=self.name),
        ]
        snapshot_name = 'nn_' + params_name(self.params, prefix=self.name)[0]
        if self.save_snapshots_stepsize:
            self.step_handlers += [
                SnapshotStepSaver(every=self.save_snapshots_stepsize,
                                  base_name=snapshot_name),
            ]
        if self.auto_stopping:
            self.step_handlers += [
                StopWhenOverfitting(loss_fraction=0.9,
                                    base_name=snapshot_name),
                StopAfterMinimum(patience=40, base_name=self.name),
            ]
        weight_decay = shared(float32(abs(self.weight_decay)), 'weight_decay')
        if self.adaptive_weight_decay:
            self.step_handlers += [
                AdaptiveWeightDecay(weight_decay),
            ]
        if self.epoch_steps:
            self.step_handlers += [
                BreakEveryN(self.epoch_steps),
            ]
        """
			Create the actual nolearn network with information from __init__.
		"""
        self.net = NeuralNet(
            layers=self.layers,
            objective=partial(WeightDecayObjective, weight_decay=weight_decay),
            input_shape=(None, feature_count),
            output_num_units=class_count,
            update=nesterov_momentum,  # todo: make parameter
            update_learning_rate=shared(float32(self.learning_rate)),
            update_momentum=shared(float(self.weight_decay)),
            on_epoch_finished=self.step_handlers,
            on_training_finished=self.end_handlers,
            regression=False,
            max_epochs=self.max_epochs,
            verbose=verbosity,
            batch_iterator_train=BatchIterator(batch_size=self.batch_size),
            batch_iterator_test=BatchIterator(batch_size=self.batch_size),
            eval_size=0.1,

            #custom_score = ('custom_loss', categorical_crossentropy),
            **self.params)
        self.net.parent = self

        self.net.initialize()

        return self.net

    def get_params(self, deep=True):
        return OrderedDict(
            (name, getattr(self, name)) for name in self.parameter_names)

    def set_params(self, **params):
        """
			Set all the parameters.
		"""
        for name, val in params.items():
            assert name in self.parameter_names, '"{0:s}" is not a valid parameter name (known parameters: "{1:s}")'.format(
                name, '", "'.join(self.parameter_names))
            setattr(self, name, val)
        """
			Arguments checks.
		"""
        assert self.dropout1_rate is None or 0 <= self.dropout1_rate < 1, 'Dropout rate 1 should be a value between 0 and 1 (value: {0})'.format(
            self.dropout1_rate)
        assert self.dropout2_rate is None or 0 <= self.dropout2_rate < 1, 'Dropout rate 2 should be a value between 0 and 1, or None for inheritance (value: {0})'.format(
            self.dropout2_rate)
        assert self.dropout3_rate is None or 0 <= self.dropout3_rate < 1, 'Dropout rate 3 should be a value between 0 and 1, or None for inheritance (value: {0})'.format(
            self.dropout3_rate)
        assert self.dense1_nonlinearity in nonlinearities.keys(
        ), 'Linearity 1 should be one of "{0}", got "{1}" instead.'.format(
            '", "'.join(nonlinearities.keys()), self.dense1_nonlinearity)
        assert self.dense2_nonlinearity in nonlinearities.keys() + [
            None
        ], 'Linearity 2 should be one of "{0}", got "{1}" instead.'.format(
            '", "'.join(nonlinearities.keys()), self.dense2_nonlinearity)
        assert self.dense3_nonlinearity in nonlinearities.keys() + [
            None
        ], 'Linearity 3 should be one of "{0}", got "{1}" instead.'.format(
            '", "'.join(nonlinearities.keys()), self.dense3_nonlinearity)
        assert self.dense1_init in initializers.keys(
        ), 'Initializer 1 should be one of "{0}", got "{1}" instead.'.format(
            '", "'.join(initializers.keys()), self.dense1_init)
        assert self.dense2_init in initializers.keys() + [
            None
        ], 'Initializer 2 should be one of "{0}", got "{1}" instead.'.format(
            '", "'.join(initializers.keys()), self.dense2_init)
        assert self.dense3_init in initializers.keys() + [
            None
        ], 'Initializer 3 should be one of "{0}", got "{1}" instead.'.format(
            '", "'.join(initializers.keys()), self.dense3_init)
        """
			Argument defaults.
		"""
        if self.dense2_nonlinearity is None:
            self.dense2_nonlinearity = self.dense1_nonlinearity
        if self.dense2_init is None:
            self.dense2_init = self.dense1_init
        if self.dense3_nonlinearity is None:
            self.dense3_nonlinearity = self.dense2_nonlinearity
        if self.dense3_init is None:
            self.dense3_init = self.dense2_init
        if self.dropout2_rate is None and self.dense2_size:
            self.dropout2_rate = self.dropout1_rate
        if self.dropout3_rate is None and self.dense3_size:
            self.dropout3_rate = self.dropout2_rate

    def fit(self, X, y, random_sleep=None):
        if random_sleep:
            sleep(random_sleep *
                  random())  # this is to prevent compiler lock problems
        labels = y - y.min()
        #todo: don't use labels.max(), occasionally (rarely) it will not have the highest class
        self.init_net(feature_count=X.shape[1], class_count=labels.max() + 1)
        net = self.net.fit(X, labels)
        self.save()
        return net

    def interrupted_fit(self, X, y):
        """ DEPRECATED """
        labels = y - y.min()
        self.init_net(feature_count=X.shape[1], class_count=labels.max() + 1)
        knowledge = get_knowledge(self.net)
        for epoch in range(0, self.max_epochs, self.epoch_steps):
            set_knowledge(self.net, knowledge)
            self.init_net(feature_count=X.shape[1],
                          class_count=labels.max() + 1)
            print 'epoch {0:d}: learning {1:d} epochs'.format(
                epoch, self.epoch_steps)
            self.net.fit(X, labels)
            ratio = mean([d['valid_loss'] for d in self.net._train_history[-self.epoch_steps:]]) / \
              mean([d['train_loss'] for d in self.net._train_history[-self.epoch_steps:]])
            if ratio < 0.85:
                self.weight_decay *= 1.3
            if ratio > 0.95:
                self.weight_decay /= 1.2
            self.init_net(feature_count=X.shape[1],
                          class_count=labels.max() + 1)
            knowledge = get_knowledge(self.net)
        exit()
        net = self.net.fit(X, labels)
        self.save()
        return net

    def predict_proba(self, X):
        probs = self.net.predict_proba(X)
        if not isfinite(probs).sum():
            errmsg = 'network "{0:s}" predicted infinite/NaN probabilities'.format(
                self.name)
            stderr.write(errmsg)
            raise DivergenceError(errmsg)
        return probs

    def predict(self, X):
        return self.net.predict(X)

    def score(self, X, y, **kwargs):
        return self.net.score(X, y)

    def save(self, filepath=None):
        assert hasattr(
            self, 'net'
        ), 'Cannot save a network that is not initialized; .fit(X, y) something first [or use net.initialize(..) for random initialization].'
        parameters = self.get_params(deep=False)
        filepath = filepath or join(NNET_STATE_DIR, self.name)
        if VERBOSITY >= 1:
            print 'saving network to "{0:s}.net.npz|json"'.format(filepath)
        with open(filepath + '.net.json', 'w+') as fh:
            dump([parameters, self.feature_count, self.class_count],
                 fp=fh,
                 indent=2)
        save_knowledge(self.net, filepath + '.net.npz')

    @classmethod
    def load(cls, filepath=None, name=None):
        """
			:param filepath: The base path (without extension) to load the file from, OR:
			:param name: The name of the network to load (if filename is not given)
			:return: The loaded network
		"""
        filepath = filepath or join(NNET_STATE_DIR, name)
        if VERBOSITY >= 1:
            print 'loading network from "{0:s}.net.npz|json"'.format(filepath)
        with open(filepath + '.net.json', 'r') as fh:
            [parameters, feature_count, class_count] = load(fp=fh)
        nnet = cls(**parameters)
        nnet.init_net(feature_count=feature_count, class_count=class_count)
        load_knowledge(nnet.net, filepath + '.net.npz')
        return nnet
Пример #50
0
y_train=extract_y(dftrain)

net1.fit(X_train,y_train)


# Prediction section:
#
# Read test file
dftest = pd.read_csv(FTEST,header=0)

preprocess(dftest)

X_test=extract_X(dftest)

y_predict=net1.predict(X_test)
# 
# SimpleModelPredict(dftest)
# 
# # =================================================
# # Export of results in expected submission format
# # =================================================
#         
# listExport=[\
#     'ID',
#     'Name',
#     'Adoption',
#     'Died',
#     'Euthanasia',
#     'Return_to_owner',
#     'Transfer'
class ClassificationNN(ClassficationBase.ClassificationBase):
    def __init__(self, isTrain, isOutlierRemoval, isNN=1):
        super(ClassificationNN, self).__init__(isTrain,
                                               isOutlierRemoval,
                                               isNN=1)
        # data preprocessing
        self.dataPreprocessing()

        self.net1 = NeuralNet(
            layers=[  # three layers: one hidden layer
                ('input', layers.InputLayer),
                ('hidden', layers.DenseLayer),
                #('hidden2', layers.DenseLayer),
                ('output', layers.DenseLayer),
            ],
            # layer parameters:
            input_shape=(None, 12),  # inut dimension is 12
            hidden_num_units=6,  # number of units in hidden layer
            #hidden2_num_units=3,  # number of units in hidden layer
            output_nonlinearity=lasagne.nonlinearities.
            sigmoid,  # output layer uses sigmoid function
            output_num_units=1,  # output dimension is 1

            # optimization method:
            update=nesterov_momentum,
            update_learning_rate=0.002,
            update_momentum=0.9,
            regression=
            True,  # flag to indicate we're dealing with regression problem
            max_epochs=25,  # we want to train this many epochs
            verbose=0,
        )

    def dataPreprocessing(self):
        # normalize different currency units == already normalized!
        #self.priceNormalize()

        # deal with unbalanced data
        self.dealingUnbalancedData()

        # Standardization
        self.Standardization()

    def training(self):
        # train the NN model
        self.net1.fit(self.X_train, self.y_train)

    def predict(self):
        # predict the test data
        y_pred_train = self.net1.predict(self.X_train)
        self.y_pred = self.net1.predict(self.X_test)

        # 1 for buy, 0 for wait
        median = np.median(y_pred_train)
        mean = np.mean(y_pred_train)
        self.y_pred[self.y_pred >= median] = 1  # change this threshold
        self.y_pred[self.y_pred < median] = 0

        print "Number of buy: {}".format(np.count_nonzero(self.y_pred))
        print "Number of wait: {}".format(np.count_nonzero(1 - self.y_pred))
        #print np.concatenate((y_test, y_pred), axis=1)
        #print y_pred.T.tolist()[0]
        #print map(round, y_pred.T.tolist()[0])
        #print len(y_pred.T.tolist())

        # print the error rate
        self.y_pred = self.y_pred.reshape((self.y_pred.shape[0], 1))
        err = 1 - np.sum(
            self.y_test == self.y_pred) * 1.0 / self.y_pred.shape[0]
        print "Error rate: {}".format(err)

        return self.X_test, self.y_pred
Пример #52
0
class EmotionClassifier:
    def __init__(self,
                 face_size=192,
                 epochs=100,
                 learning_rate=theano.shared(np.cast['float32'](0.1))):
        self.network = NeuralNet(
            layers=[('input', InputLayer), ('conv1', Conv2DLayer),
                    ('conv2', Conv2DLayer), ('pool1', MaxPool2DLayer),
                    ('conv3', Conv2DLayer), ('conv4', Conv2DLayer),
                    ('pool2', MaxPool2DLayer), ('conv5', Conv2DLayer),
                    ('conv6', Conv2DLayer), ('pool3', MaxPool2DLayer),
                    ('conv7', Conv2DLayer), ('conv8', Conv2DLayer),
                    ('pool4', MaxPool2DLayer), ('hidden1', DenseLayer),
                    ('hidden2', DenseLayer), ('output', DenseLayer)],
            input_shape=(None, 1, face_size, face_size),
            conv1_num_filters=32,
            conv1_filter_size=(3, 3),
            conv1_nonlinearity=lasagne.nonlinearities.rectify,
            conv1_W=lasagne.init.GlorotUniform(),
            conv2_num_filters=32,
            conv2_filter_size=(3, 3),
            conv2_nonlinearity=lasagne.nonlinearities.rectify,
            conv2_W=lasagne.init.GlorotUniform(),
            pool1_pool_size=(2, 2),
            conv3_num_filters=32,
            conv3_filter_size=(3, 3),
            conv3_nonlinearity=lasagne.nonlinearities.rectify,
            conv3_W=lasagne.init.GlorotUniform(),
            conv4_num_filters=32,
            conv4_filter_size=(3, 3),
            conv4_nonlinearity=lasagne.nonlinearities.rectify,
            conv4_W=lasagne.init.GlorotUniform(),
            pool2_pool_size=(2, 2),
            conv5_num_filters=64,
            conv5_filter_size=(3, 3),
            conv5_nonlinearity=lasagne.nonlinearities.rectify,
            conv5_W=lasagne.init.GlorotUniform(),
            conv6_num_filters=32,
            conv6_filter_size=(3, 3),
            conv6_nonlinearity=lasagne.nonlinearities.rectify,
            conv6_W=lasagne.init.GlorotUniform(),
            pool3_pool_size=(2, 2),
            conv7_num_filters=32,
            conv7_filter_size=(3, 3),
            conv7_nonlinearity=lasagne.nonlinearities.rectify,
            conv7_W=lasagne.init.GlorotUniform(),
            conv8_num_filters=32,
            conv8_filter_size=(3, 3),
            conv8_nonlinearity=lasagne.nonlinearities.rectify,
            conv8_W=lasagne.init.GlorotUniform(),
            pool4_pool_size=(2, 2),
            hidden1_num_units=4096,
            hidden1_nonlinearity=lasagne.nonlinearities.rectify,
            hidden2_num_units=2048,
            output_nonlinearity=lasagne.nonlinearities.softmax,
            output_num_units=8,
            regression=False,
            update=adadelta,
            # update_momentum=theano.shared(np.cast['float32'](0.9)),
            # on_epoch_finished=[
            #     EarlyStopping(patience=20)
            #     AdjustVariable('update_learning_rate', start=learning_start, stop=learning_end),
            #     AdjustVariable('update_momentum', start=0.9, stop=0.999),
            # ],
            # batch_iterator_train=ShufflingBatchIteratorMixin,
            # batch_iterator_train=BatchIterator(251, shuffle=True),
            max_epochs=epochs,
            verbose=2)

    def train(self, x_train, y_train, epoch=0):
        """
        Fits training data to the Convolutional Neural Network
        :param epoch: number of epochs
        :param x_train: Training x values
        :param y_train: Training y values
        """
        if epoch == 0:
            self.network.fit(x_train, y_train)
        else:
            self.network.fit(x_train, y_train, epoch)

    def predict(self, image):
        return self.network.predict(image)

    def save_network_state(self, paramsname="params.npz"):
        self.network.save_params_to(paramsname)

    def load_network_state(self, paramsname="params.npz"):
        self.network.load_params_from(paramsname)
Пример #53
0
###  expect training / val error of about 0.087 with these parameters
###  if your GPU not fast enough, reduce the number of filters in the conv/deconv step

# <codecell>

import pickle
import sys
sys.setrecursionlimit(10000)

pickle.dump(ae, open('mnist/conv_ae.pkl', 'w'))
#ae = pickle.load(open('mnist/conv_ae.pkl','r'))
ae.save_weights_to('mnist/conv_ae.np')

# <codecell>

X_train_pred = ae.predict(X_train).reshape(-1, 28, 28) * sigma + mu
X_pred = np.rint(X_train_pred).astype(int)
X_pred = np.clip(X_pred, a_min=0, a_max=255)
X_pred = X_pred.astype('uint8')
print X_pred.shape, X.shape

# <codecell>

###  show random inputs / outputs side by side


def get_picture_array(X, index):
    array = X[index].reshape(28, 28)
    array = np.clip(array, a_min=0, a_max=255)
    return array.repeat(4, axis=0).repeat(4, axis=1).astype(np.uint8())
Пример #54
0
pyplot.grid()
pyplot.legend()
pyplot.xlabel("epoch")
pyplot.ylabel("loss")
pyplot.ylim(1e-3, 1e-2)
pyplot.yscale("log")
#pyplot.show()
fig = pyplot.gcf()
fig.savefig("signal.png")



def plot_sample(x, y, axis):
    img = x.reshape(96, 96)
    axis.imshow(img, cmap='gray')
    axis.scatter(y[0::2] * 48 + 48, y[1::2] * 48 + 48, marker='x', s=10)

X, _ = load(test=True)
y_pred = net1.predict(X)

fig = pyplot.figure(figsize=(6, 6))
fig.subplots_adjust(
    left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)

for i in range(16):
    ax = fig.add_subplot(4, 4, i + 1, xticks=[], yticks=[])
    plot_sample(X[i], y_pred[i], ax)

#pyplot.show()
fig.savefig("imageTest.png")
Пример #55
0
#These can be used to check my zscore calc to numpy
#print(X_train_z)
#print(scipy.stats.mstats.zscore(X_train))

# Provide our own validation set
def my_split(self, X, y, eval_size):
    return X_train_z,X_validate_z,y_train,y_validate

net0.train_test_split = types.MethodType(my_split, net0)

# Train the network
net0.fit(X_train_z,y_train)

# Predict the validation set
pred_y = net0.predict(X_validate_z)

# Display predictions and count the number of incorrect predictions.
species_names = ['setosa','versicolour','virginica']

count = 0
wrong = 0
for element in zip(X_validate,y_validate,pred_y):
    print("Input: sepal length: {}, sepal width: {}, petal length: {}, petal width: {}; Expected: {}; Actual: {}".format(
        element[0][0],element[0][1],element[0][2],element[0][3],
        species_names[element[1]],
        species_names[element[2]]))
    if element[1] != element[2]:
        wrong = wrong + 1
    count = count + 1
Пример #56
0
netfile='./cuisine_net.pkl.gz'
with gzip.open(netfile, 'wb') as file:
    pkl.dump(net, file, -1)

print('Network saved as ' + netfile)

# Load network
with gzip.open(netfile, 'rb') as f:
    net_pretrain = pkl.load(f)



#================#
##  VALIDATION  ##
#================#
y_pred = net.predict(x_valid)

acc = accuracy_score(y_valid,y_pred)
print('Total Accuracy: {0:2.4}%'.format(acc*100))
# cm = confusion_matrix(y_valid, y_pred)
# cm_norm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# print(cm_norm)


## Fit to validation 
# print('fitting to validation data ...')
# net.fit(x_valid,y_valid,epochs=50)
# with gzip.open('./cuisine_net2.pkl', 'wb') as file:
#     pkl.dump(net, file, -1)

#==========================#
def main():
    # my code here
    y_train=load_train()
    y_test =load_test()
    X_train=load_images("/home/pratik/Desktop/Skinzy Code SVM/Datasets2/scabies_train.gz")
    X_test=load_images("/home/pratik/Desktop/Skinzy Code SVM/Datasets2/scabies_test.gz")
    X_train,y_train= shuffle(X_train, y_train, random_state=0)
    X_val=X_train[80:]
    X_train=X_train[:80]
    y_val=y_train[80:]
    y_train=y_train[:80]
    y_train =np.array(y_train)
    y_test=np.array(y_test)
    y_val=np.array(y_val)
    y_test = y_test.astype(np.uint8)
    y_train = y_train.astype(np.uint8)
    y_val = y_val.astype(np.uint8)
    X_train,y_train= shuffle(X_train, y_train, random_state=0)
    #print("Plotting the graph")
    #plt.imshow(X_train[0][0])
   
    
    net1 = NeuralNet(
    layers=[('input', layers.InputLayer),
            ('conv2d1', layers.Conv2DLayer),
            ('maxpool1', layers.MaxPool2DLayer),
            ('conv2d2', layers.Conv2DLayer),
            ('maxpool2', layers.MaxPool2DLayer),
            ('dropout1', layers.DropoutLayer),
            ('dense', layers.DenseLayer),
            ('dropout2', layers.DropoutLayer),
            ('output', layers.DenseLayer),
            ],
   # input layer
    input_shape=(None, 3, 64,64),
    # layer conv2d1
    conv2d1_num_filters=32,
    conv2d1_filter_size=(5, 5),
    conv2d1_nonlinearity=lasagne.nonlinearities.rectify,
    conv2d1_W=lasagne.init.GlorotUniform(),  
    # layer maxpool1
    maxpool1_pool_size=(3, 3),
    maxpool1_stride=1,
    maxpool1_pad=0,    
    # layer conv2d2
    conv2d2_num_filters=32,
    conv2d2_filter_size=(5, 5),
    conv2d2_nonlinearity=lasagne.nonlinearities.rectify,
    # layer maxpool2
    maxpool2_pool_size=(3, 3),
    maxpool2_stride=1,
    maxpool2_pad=0,
    # dropout1
    dropout1_p=0.5,    
    # dense
    dense_num_units=256,
    dense_nonlinearity=lasagne.nonlinearities.rectify,    
    # dropout2
    dropout2_p=0.5,    
    # output
    output_nonlinearity=lasagne.nonlinearities.softmax,
    output_num_units=2,
    # optimization method params
    update=nesterov_momentum,
    update_learning_rate=0.001,
    update_momentum=0.9,
    max_epochs=50,
    verbose=1,
    )
    print ("Training starts :")
    net1.fit(X_train, y_train)

    #preds = net1.predict(X_test[0])
     	
    out = X_train[0].reshape(-1, 3, 64, 64)
    pred=net1.predict_proba(out)
    print pred
    #cm = confusion_matrix(y_test, preds)
    #plt.matshow(cm)
    #plt.title('Confusion matrix')
    #plt.colorbar()
    #plt.ylabel('True label')
    #plt.xlabel('Predicted label')
    #plt.show()
    
    #print (net1.predict_proba(X_test))
     
    sys.setrecursionlimit(9999999)
    joblib.dump(net1, 'AndroidFileUpload/classifier_2disease/cnn_1.pkl',compress=9)
    
    y_true, y_pred = y_test, net1.predict(X_test) # Get our predictions
    print(classification_report(y_true, y_pred)) # Classification on each digit
    print 'The accuracy is:', accuracy_score(y_true, y_pred)


   # visualize.plot_conv_weights(net1.layers_['conv2d1'])

    dense_layer = layers.get_output(net1.layers_['dense'], deterministic=True)
    output_layer = layers.get_output(net1.layers_['output'], deterministic=True)
    input_var = net1.layers_['input'].input_var
    f_output = theano.function([input_var], output_layer)
    f_dense = theano.function([input_var], dense_layer)

    instance = X_test[0][None, :, :]
    #%timeit -n 500 f_output(instance)
    train_features = f_dense(X_train)
    test_features=f_dense(X_test)
    train_labels=y_train
    test_labels=y_test

    '''
	Logistic Regression
	clf2 = LogisticRegression().fit(train_features, train_labels)
    joblib.dump(clf2, 'AndroidFileUpload/classifier_2disease/log_reg.pkl',compress=9)'''

    #SVM
    clf = svm.SVC(kernel="linear",C=10000.0,probability=True)
    clf.fit(train_features,train_labels)

    sys.setrecursionlimit(9999999)
    joblib.dump(clf, 'AndroidFileUpload/classifier_2disease/svm.pkl',compress=9)
    
    
    pred=clf.predict(test_features)
    print(classification_report(test_labels, pred))

    print 'The accuracy is:', accuracy_score(test_labels, pred)
    pred=clf.predict_proba(test_features)
    print pred

    '''#KNN
Пример #58
0
    print "X_training shape must match y_training shape"
print "Generate X_test and y_test"
n_input = 11
print "X_test..."

print "Multi Layer Perceptron..."
#Build layer for MLP
l_in = ls.layers.InputLayer(shape=(None,10),input_var=None)
l_hidden = ls.layers.DenseLayer(l_in,num_units=15,nonlinearity=ls.nonlinearities.sigmoid)
network = l_out = ls.layers.DenseLayer(l_hidden,num_units=1)
print "Neural network initialize"
#Init Neural net
net1 = NeuralNet(
    layers=network,
    # optimization method:
    update=nesterov_momentum,
    update_learning_rate=0.001,
    update_momentum=0.9,
    regression=True,  # flag to indicate we're dealing with regression problem
    max_epochs=400,  # we want to train this many epochs
    verbose=1,
)
#
print "Training time!!!!!....."
net1.fit(X_training,y_training)
net1.save_params_to("saveNeuralNetwork.tdn")
print "Score rate = "
print net1.score(n_sample2,n_test2)
print net1.predict(n_sample2)[0:2]

Пример #59
0
	def rodar(self):

			np.set_printoptions(threshold=np.nan)
			sourcepath = Classificar.sourcepath
			numerodeimagens = Classificar.numerodeimagens

			X_test = np.zeros((numerodeimagens, 19200),
							  dtype=np.int)  # Allocates space for each new image you want to classify, each line is an image

			for i in range(1, numerodeimagens):  # read the images
				X_test[i - 1] = np.asarray(Image.open(sourcepath+"galaxy" + str(i) + ".jpg")).reshape(
					-1)[0:19200]

			# Reshape the images to help the CNN execution
			X_test = X_test.reshape((-1, 3, 80, 80))

			# Define the CNN, must be the same CNN that is saved into your model that you generated running CNN.py
			net1 = NeuralNet(
				layers=[('input', layers.InputLayer),
						('conv2d1', layers.Conv2DLayer),
						('maxpool1', layers.MaxPool2DLayer),
						('conv2d2', layers.Conv2DLayer),
						('maxpool2', layers.MaxPool2DLayer),
						('conv2d3', layers.Conv2DLayer),
						('maxpool3', layers.MaxPool2DLayer),
						# ('conv2d4', layers.Conv2DLayer),
						# ('maxpool4', layers.MaxPool2DLayer),
						('dropout1', layers.DropoutLayer),
						# s('dropout2', layers.DropoutLayer),
						('dense', layers.DenseLayer),
						# ('dense2', layers.DenseLayer),
						('output', layers.DenseLayer),
						],

				input_shape=(None, 3, 80, 80),

				conv2d1_num_filters=16,
				conv2d1_filter_size=(3, 3),
				conv2d1_nonlinearity=lasagne.nonlinearities.rectify,
				conv2d1_W=lasagne.init.GlorotUniform(),

				maxpool1_pool_size=(2, 2),

				conv2d2_num_filters=16,
				conv2d2_filter_size=(3, 3),
				conv2d2_nonlinearity=lasagne.nonlinearities.rectify,

				maxpool2_pool_size=(2, 2),

				conv2d3_num_filters=16,
				conv2d3_filter_size=(3, 3),
				conv2d3_nonlinearity=lasagne.nonlinearities.rectify,

				maxpool3_pool_size=(2, 2),

				# conv2d4_num_filters = 16,
				# conv2d4_filter_size = (2,2),
				# conv2d4_nonlinearity = lasagne.nonlinearities.rectify,

				# maxpool4_pool_size = (2,2),

				dropout1_p=0.5,

				# dropout2_p = 0.5,

				dense_num_units=16,
				dense_nonlinearity=lasagne.nonlinearities.rectify,

				# dense2_num_units = 16,
				# dense2_nonlinearity = lasagne.nonlinearities.rectify,

				output_nonlinearity=lasagne.nonlinearities.softmax,
				output_num_units=2,

				update=nesterov_momentum,
				update_learning_rate=0.001,
				update_momentum=0.9,
				max_epochs=1000,
				verbose=1,
			)

			net1.load_params_from("/Users/Pedro/PycharmProjects/BIDHU/docs/train.txt")  # Read model

			preds = net1.predict(X_test)  # make predictions


			strpreds = str(preds)
			strpreds = strpreds.replace(" ", "\n")

			strpreds = strpreds.replace("1", "yes")
			strpreds = strpreds.replace("0", "no")
			xstrpreds = (strpreds.splitlines())
			for i in range(len(xstrpreds)):
				xstrpreds[i] = str(i + 1) + "-" + xstrpreds[i]
			strpreds = str(xstrpreds)
			strpreds = strpreds.replace(" ", "\n")
			strpreds = strpreds.replace("[", "")
			strpreds = strpreds.replace("]", "")
			strpreds = strpreds.replace("'", "")
			strpreds = strpreds.replace(",", "")
			strpreds = strpreds.replace("-", " - ")

			return strpreds