Пример #1
0
    def learn_data(self):
        first_half = []
        fh_wins = []
        second_half = []
        sh_wins = []
        key_t = []

        for key, stats in self.results.items():
            if key[0] < 2006:
                first_half += [stats.stat_arr()]
                fh_wins += [stats.wins]
            else:
                second_half += [stats.stat_arr()]
                sh_wins += [stats.wins]
                key_t += [key]

        x_ = np.array([second_half])
        x = np.array([first_half])
        y_learn = np.array([fh_wins])

        nn = Classifier(layers=[Layer("Sigmoid", units=100),
                                Layer("Softmax")],
                        learning_rate=0.01,
                        n_iter=50)
        nn.fit(x, y_learn)

        prdt = nn.predict(x_)

        for i in range(len(second_half)):
            if prdt[0][i] >= 10 or sh_wins[i] >= 11:
                print((str(key_t[i]) + " actually won " + str(sh_wins[i]) +
                       " and " + "was predicted with " + str(prdt[0][i])))
Пример #2
0
def make_neural_network():
    dataset = np.loadtxt("/Users/BARNES_3/Documents/niki/courses/Decision making/riot_predictor/data_for_neural.csv", delimiter=",")

    score_total = 0
    for i in xrange(0, 5):
        msk = np.random.rand(len(dataset)) < 0.8
        train = dataset[msk]
        test = dataset[~msk]
        x_train = train[:,0:6]
        y_train = train[:,6]
        x_test = test[:,0:6]
        y_test = test[:,6]
        # print type(x_test)
        # score = 0.797035347777
        # 0.801596351197
        nn = Classifier(
            layers=[
                # Layer("Tanh", units = 1000),
                # Layer("Sigmoid", units = 1000),
                # Layer("Linear")],
                Layer("ExpLin", units = 800),
                Layer("Softmax"),
                ],
            learning_rate=0.0002,
            n_iter=20)
        nn.fit(x_train, y_train)
        score = nn.score(x_test, y_test)
        score_total += score
    print score_total/5
    # print score
    return nn
def CNN(X, y):
    print("1-layer Tanh 100 NN")
    #l2 normalize
    preprocessing.normalize(X, 'max')
    print("Done normalization")
    X = equalize_hist(X)
    #print("Done histogram equalization")
    #scale centre to the mean to unit vector
    #preprocessing.scale(X_train)
    #preprocessing.scale(X_test)
    #X = equalize_hist(X)
    X_train, X_test, y_train, y_test = cross_validation.train_test_split(
        X, y, test_size=0.2)
    print("Creating neural net...")
    nn = Classifier(layers=[
        Layer("Tanh", units=98, weight_decay=0.0001),
        Layer("Softmax")
    ],
                    learning_rate=0.01,
                    n_iter=1000,
                    batch_size=5)
    print("Done creating neural net")
    print("Neural net fitting....")
    nn.fit(X_train, y_train)
    print("Done Neural net fitting!")
    print('\nTRAIN SCORE', nn.score(X_train, y_train))
    print('TEST SCORE', nn.score(X_test, y_test))
Пример #4
0
def mlp(number_layers, number_neurons_1, number_neurons_2, number_neurons_3,
        number_neurons_4, dropout_rate):

    layers = []
    number_neurons = []

    number_neurons.append(number_neurons_1)
    number_neurons.append(number_neurons_2)
    number_neurons.append(number_neurons_3)
    number_neurons.append(number_neurons_4)

    for i in np.arange(number_layers):
        layers.append(
            Layer("Sigmoid", units=number_neurons[i], dropout=dropout_rate))

    layers.append(Layer("Softmax", units=2))

    scores = []

    for i in np.arange(n_validations):

        X_train, X_test, Y_train, Y_test = sklearn.cross_validation.train_test_split(
            X, Y, test_size=0.3, random_state=1)

        predictor = Classifier(layers=layers, learning_rate=0.001, n_iter=25)

        predictor.fit(X_train, Y_train)

        scores.append(metrics.accuracy_score(Y_test,
                                             predictor.predict(X_test)))

    return -median(scores)
def CNN(X, y):
    #l2 normalize
    #preprocessing.normalize(X, 'max')
    #scale centre to the mean to unit vector
    #preprocessing.scale(X_train)
    #preprocessing.scale(X_test)
    #X = equalize_hist(X)
    X_train, X_test, y_train, y_test = cross_validation.train_test_split(
        X, y, test_size=0.2, random_state=42)
    nn = Classifier(
        layers=[
            Convolution("Rectifier",
                        channels=100,
                        kernel_shape=(10, 10),
                        dropout=0.25,
                        normalize="batch",
                        weight_decay=0.0001,
                        pool_shape=(2, 2),
                        pool_type="max"),
            #Layer("Tanh", units=100),
            Layer("Softmax")
        ],
        learning_rate=0.05,
        n_iter=10)
    nn.fit(X_train, y_train)
    print('\nTRAIN SCORE', nn.score(X_train, y_train))
    print('TEST SCORE', nn.score(X_test, y_test))
Пример #6
0
def train(X, ty):
    nn = Classifier(
        layers=[Layer("Sigmoid", units=5000), Layer("Sigmoid", units=5)], learning_rate=0.001, n_iter=100, verbose=1
    )
    nn.fit(X, ty)
    print "Train Done!"
    return nn
Пример #7
0
def CNN(X_train, y_train, X_test, X_hidden):
    print("1 Con, 1 tanh")
    #l2 normalize preprocessing.normalize(X, 'l2')
    preprocessing.normalize(X_train, 'max')
    preprocessing.normalize(X_test, 'max')
    preprocessing.normalize(X_hidden, 'max')
    print("Done normalization")

    X_train = equalize_hist(X_train)
    X_test = equalize_hist(X_test)
    X_hidden = equalize_hist(X_hidden)

    nn = Classifier(layers=[
        Layer("Tanh", units=98, weight_decay=0.0001),
        Layer("Softmax")
    ],
                    learning_rate=0.01,
                    n_iter=1000,
                    batch_size=5)
    nn.fit(X_train, y_train)
    print('\nTRAIN SCORE', nn.score(X_train, y_train))
    pub_res = list(nn.predict(X_test))
    hid_res = list(nn.predict(X_hidden))

    return pub_res + hid_res
Пример #8
0
def autoEncoderOptimization(data):
	rbm = ae.AutoEncoder(
			layers=[
				ae.Layer("Tanh", units=300),
				ae.Layer("Sigmoid", units=200),
				ae.Layer("Tanh", units=100)
			],
			learning_rate=0.002,
			n_iter=10
		)

	rbm.fit(data["train"])

	model = Classifier(
			layers=[
				Layer("Tanh", units=300),
				Layer("Sigmoid", units=200),
				Layer("Tanh", units=100),
				Layer("Rectifier", units=100),
				Layer("Rectifier", units=50),
				Layer("Softmax")
			],
		)

	rbm.transfer(model)

	model.fit(data["train"], data["label"])

	prediction = model.predict(data["train"])

	print accuracy_score(data["label"], prediction)
Пример #9
0
 def test_VerboseClassifier(self):
     nn = MLPC(layers=[L("Softmax")], verbose=1, n_iter=1)
     a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,1), dtype=numpy.int32)
     nn.fit(a_in, a_out)
     assert_in("Epoch       Training Error       Validation Error       Time", self.buf.getvalue())
     assert_in("    1       ", self.buf.getvalue())
     assert_in("    N/A     ", self.buf.getvalue())
Пример #10
0
def mlp(number_layers, number_neurons_1, number_neurons_2, number_neurons_3, number_neurons_4, dropout_rate):

	layers = []
	number_neurons = []

	number_neurons.append(number_neurons_1)
	number_neurons.append(number_neurons_2)
	number_neurons.append(number_neurons_3)
	number_neurons.append(number_neurons_4)

	for i in np.arange(number_layers):
		layers.append(Layer("Sigmoid", units=number_neurons[i], dropout = dropout_rate))

	layers.append(Layer("Softmax",  units=2))

	scores = []

	for i in np.arange(n_validations):

		X_train, X_test, Y_train, Y_test = sklearn.cross_validation.train_test_split(X,Y, test_size=0.3, random_state=1)
	
		predictor = Classifier(
	    layers=layers,
	    learning_rate=0.001,
	    n_iter=25)

		predictor.fit(X_train, Y_train)

		scores.append(metrics.accuracy_score(Y_test, predictor.predict(X_test)))
	
	return -median(scores)
Пример #11
0
def batch_train(train, val, model_path):
    trainX, trainY = train
    valX, valY = val
    nn = Classifier(
        layers=[
            Convolution('Rectifier',
                        channels=100,
                        kernel_shape=(5, WORD_DIM),
                        border_mode='valid'
                        #pool_shape=(3,1),
                        #pool_type='max'
                        ),
            Layer('Rectifier', units=900, dropout=0.5),
            Layer('Softmax')
        ],
        batch_size=50,
        learning_rate=0.02,
        normalize='dropout',
        verbose=True)
    nn.n_iter = 100
    print 'Net created...'
    try:
        nn.fit(trainX, trainY)
    except KeyboardInterrupt:
        pickle.dump(nn, open(model_path, 'wb'))
    pickle.dump(nn, open(model_path, 'wb'))
    print 'Done, final model saved'
    print 'Testing'
    #Accuracy on the validation set
    print 'Validation accuracy:', batch_test(model_path, val)
Пример #12
0
def trainMLP(trainX, trainY, validationX, validationY, activation='Tanh', algorithm='adam',
			 hidden_layer_size=2048, alpha=0.001):
	print('Learning...')

	trainX, trainY = shuffle(trainX, trainY)
	validationX, validationY = shuffle(validationX, validationY)

	mlp = Classifier(
		layers=[
			Layer(activation, units=hidden_layer_size, dropout=0.1),
			Layer("Softmax", units=len(np.unique(trainY)), dropout=0.2)
		], learning_rule=algorithm,
		learning_rate=0.0005,
		learning_momentum=0.9,
		batch_size=256,
		n_stable=10,
		n_iter=200,
		regularize="L2",
		weight_decay=alpha,
		loss_type="mcc", #?
		valid_set=(validationX, validationY),
		verbose=True)

	print(mlp)

	mlp.fit(trainX, trainY)

	return mlp
Пример #13
0
def CNN(X_train, y_train, X_test, X_hidden):
	print("Combined")
	#l2 normalize preprocessing.normalize(X, 'l2')
	preprocessing.normalize(X_train, 'max')
	preprocessing.normalize(X_test, 'max')
	preprocessing.normalize(X_hidden, 'max')
	print("Done normalization")

	X_train = equalize_hist(X_train)
	X_test = equalize_hist(X_test)
	X_hidden = equalize_hist(X_hidden) 


	nn = Classifier(
    layers=[
        Convolution("Rectifier", channels=98, kernel_shape=(3,3), pool_shape = (2,2), pool_type="max"),
        #Convolution("Rectifier", channels=100, kernel_shape=(3,3), dropout=0.25, 
        	#weight_decay=0.0001, pool_shape = (2,2), pool_type="max"),
        Layer("Softmax")], learning_rate=0.01, n_iter=25, random_state= 42)
	nn.fit(X_train, y_train)
	print('\nTRAIN SCORE', nn.score(X_train, y_train))
	pub_res = list(nn.predict(X_test))
	hid_res = list(nn.predict(X_hidden))

	return pub_res+hid_res
Пример #14
0
def train_neural_network(samples, nn=None, learning_rate=0.001, n_iter=25): #pylint:disable=invalid-name
    """Trains a neural network using the given sample data.

    Args:
        samples: Tuple containing (sample inputs, sample outputs).
        nn: Neural network that should be trained. If this is none, a new NN
            will be created.
        learning_rate: Neural network learning rate.
        n_iter: Number of training iterations to use.

    Returns:
        The trained neural network.
    """
    sample_inputs, sample_outputs = check_samples(samples)

    # Create a new classifier if necessary.
    if nn is None:
        n_features = len(sample_inputs[0])
        nn = Classifier(
            layers=[
                Layer("Maxout", units=n_features, pieces=2),
                Layer("Softmax")],
            learning_rate=learning_rate,
            n_iter=n_iter)

    # Train the classifier.
    nn.fit(sample_inputs, sample_outputs)
    return nn
Пример #15
0
def autoEncoderOptimization(data):
    rbm = ae.AutoEncoder(layers=[
        ae.Layer("Tanh", units=300),
        ae.Layer("Sigmoid", units=200),
        ae.Layer("Tanh", units=100)
    ],
                         learning_rate=0.002,
                         n_iter=10)

    rbm.fit(data["train"])

    model = Classifier(layers=[
        Layer("Tanh", units=300),
        Layer("Sigmoid", units=200),
        Layer("Tanh", units=100),
        Layer("Rectifier", units=100),
        Layer("Rectifier", units=50),
        Layer("Softmax")
    ], )

    rbm.transfer(model)

    model.fit(data["train"], data["label"])

    prediction = model.predict(data["train"])

    print accuracy_score(data["label"], prediction)
def CNN(X_train, y_train, X_test, X_hidden):
    print("CNN")
    #l2 normalize preprocessing.normalize(X, 'l2')
    preprocessing.normalize(X_train, 'max')
    preprocessing.normalize(X_test, 'max')
    preprocessing.normalize(X_hidden, 'max')
    print("Done normalization")

    X_train = equalize_hist(X_train)
    X_test = equalize_hist(X_test)
    X_hidden = equalize_hist(X_hidden)

    nn = Classifier(
        layers=[
            Convolution("Rectifier", channels=98, kernel_shape=(3, 3)),
            #Convolution("Rectifier", channels=100, kernel_shape=(3,3), dropout=0.25,
            #weight_decay=0.0001, pool_shape = (2,2), pool_type="max"),
            Layer("Softmax")
        ],
        learning_rate=0.01,
        n_iter=25,
        random_state=42)
    nn.fit(X_train, y_train)
    print('\nTRAIN SCORE', nn.score(X_train, y_train))
    pub_res = list(nn.predict(X_test))
    hid_res = list(nn.predict(X_hidden))

    return pub_res + hid_res
Пример #17
0
def MLP_Evaluation(sample,
                   lable,
                   n_hidden=10,
                   activation_func='Tanh',
                   n_updates=20,
                   k_fold=5):
    X = sample
    y = lable
    kf = KFold(n_splits=k_fold, shuffle=True)
    split_num = kf.get_n_splits(X)
    k = 1
    G1, G2, S, Total = 0, 0, 0, 0
    (AUC, p, r, f1) = (0, 0, 0, 0)
    for train_index, test_index in kf.split(X):
        # print("TRAIN:", train_index, "TEST:", test_index)
        X_train, X_test = X[train_index], X[test_index]
        y_train, y_test = y[train_index], y[test_index]
        nn = Classifier(
            layers=[
                Layer(activation_func, units=n_hidden),
                Layer(activation_func, units=n_hidden),

                ####################Consider mutilpe layer condition#############################

                # Layer(activation_func, units=n_hidden),
                # Layer(activation_func, units=n_hidden),
                # Layer(activation_func, units=n_hidden),
                # Layer(activation_func, units=n_hidden),
                # Layer(activation_func, units=n_hidden),
                # Layer(activation_func, units=n_hidden),
                Layer("Softmax")
            ],
            learning_rate=0.001,
            n_iter=n_updates)
        nn.fit(X_train, y_train)
        y_test_vector = np.zeros((X_test.shape[0], 3), dtype='int64')
        for count in range(0, X_test.shape[0]):
            if (y_test[count][0] == 0):
                y_test_vector[count][0] = 1
            elif (y_test[count][0] == 1):
                y_test_vector[count][1] = 1
            else:
                y_test_vector[count][2] = 1

        (AUC_k, p_k, r_k, f1_k) = evaluation.evaluate(nn, X_test,
                                                      y_test_vector, 0.8)
        print("%s / %s Iteration:AUC: %s, Prec: %s, Rec: %s, F1: %s" %
              (k, k_fold, AUC_k, p_k, r_k, f1_k))
        AUC = AUC + AUC_k
        p = p + p_k
        r = r + r_k
        f1 = f1 + f1_k
        print("Average: AUC: %s, Prec: %s, Rec: %s, F1: %s" %
              (AUC / k, p / k, r / k, f1 / k))
        k = k + 1
    AUC = AUC / k_fold
    p = p / k_fold
    r = r / k_fold
    f1 = f1 / k_fold
    return AUC, p, r, f1
Пример #18
0
def main():
    vals, actions = matrixFromCSV("C:\\Users\\Chrisd\\Documents\\College\\Spring 2016\\379K\\Kaggle\\Kaggle\\train.csv")
    X_train, X_test, y_train, y_test = train_test_split(vals, actions, test_size=0.33, random_state=22)
    totalTest, totalAns = matrixFromCSV("C:\\Users\\Chrisd\\Documents\\College\\Spring 2016\\379K\\Kaggle\\Kaggle\\test.csv")


    nn = Classifier(
    layers=[
        Layer("Softmax", units=10),
        Layer("Linear", units=10),
        Layer("Sigmoid")],
    learning_rate=0.001,
    n_iter=20)

    nn.fit(X_train,y_train)
    pickle.dump(nn, open('nn.pkl', 'wb'))

    '''rs = RandomizedSearchCV(nn, param_distributions={
    'learning_rate': stats.uniform(0.001, 0.05),
    'hidden0__units': stats.randint(4, 100),
    'hidden1__units': stats.randint(4, 100),
    'hidden1__type': ["Linear","Rectifier", "Sigmoid", "Tanh"]})
    rs.fit(X_train, y_train)

    pickle.dump(rs, open('rs.pkl', 'wb'))
    rs = pickle.load(open('rs.pkl', 'rb'))'''

    #print(X_test.shape)
    #X_test.reshape(9,1)'''
    nn = pickle.load(open('nn.pkl', 'rb'))
    answer = nn.predict(X_test)
    writeToCSV(answer)
    print(getPercent(answer,y_test))
Пример #19
0
def batch_train(train,val,model_path):
    trainX,trainY = train
    valX,valY = val
    nn = Classifier(layers = [
			Convolution('Rectifier',
                                    channels=100,
                                    kernel_shape=(5,WORD_DIM),
                                    border_mode='valid'
                                    #pool_shape=(3,1),
                                    #pool_type='max'
                                    ),
			Layer('Rectifier',units=900,dropout=0.5),
                        Layer('Softmax')],
                        batch_size = 50,
                        learning_rate = 0.02,
                        normalize='dropout',
                        verbose = True)
    nn.n_iter = 100
    print 'Net created...'
    try:
	nn.fit(trainX,trainY)
    except KeyboardInterrupt:
	pickle.dump(nn,open(model_path,'wb'))
    pickle.dump(nn,open(model_path,'wb'))
    print 'Done, final model saved'
    print 'Testing'
    #Accuracy on the validation set
    print 'Validation accuracy:',batch_test(model_path,val)
    def classify(self, X, y):

        seed = random.randint(0, sys.maxint)

        X_train, X_test, y_train, y_test = split_test(X, y)

        nn = Classifier(
            layers=self.layers(),
            learning_rate = self.LEARNING_RATE,
            valid_size    = self.VALIDATION_SIZE,
            n_stable      = 10,
            f_stable      = self.STABLE,
            random_state  = seed,
            verbose       = False,
            debug         = False
        )

        nn.fit(X_train, y_train)

        nn.fit(X_train, y_train)


        train_score = nn.score(X_train, y_train)
        test_score = nn.score(X_test, y_test)

        return train_score, test_score
Пример #21
0
def train_sknn(X, y):
    '''
        NeuralNet with sknn
    '''
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.25,
                                                        random_state=5)
    X_train, X_test = impute_nan(X_train, X_test)
    X_train, X_test = normalize_features(X_train, X_test)
    nn = Classifier(layers=[Layer("Tanh", units=12),
                            Layer("Softmax")],
                    learning_rate=0.005,
                    n_iter=25)

    # gs = GridSearchCV(nn, param_grid={
    #     'learning_rate': [0.05, 0.01, 0.005, 0.001],
    #     'hidden0__units': [4, 8, 12,100],
    #     'hidden0__type': ["Rectifier", "Sigmoid", "Tanh"]})
    # gs.fit(X_train, y_train)
    # print(gs.best_estimator_)
    nn.fit(X_train, y_train)
    predicted = nn.predict(X_test).flatten()
    labels = y_test
    return predicted, labels
Пример #22
0
def train_sknn(data, labels):
    # layer one:
    hidden_layer = Layer(type="Sigmoid", name="hidden", units=10)
    output_layer = Layer(type="Softmax", name="output")
    layers = [hidden_layer, output_layer]
    mlp = Classifier(layers=layers, random_state=1)
    mlp.fit(data, labels)
    return mlp
Пример #23
0
 def check(self, a_in, a_out, a_mask, act='Softmax'):
     nn = MLPC(layers=[L(act)],
               learning_rule='adam',
               learning_rate=0.05,
               n_iter=250,
               n_stable=25)
     nn.fit(a_in, a_out, a_mask)
     return nn.predict_proba(a_in)
Пример #24
0
def MLP_leave_one_cross_validation(sample, lable):
    length = len(sample)
    right, first, second, third = 0, 0, 0, 0
    false_list = []
    for k in range(0, length):
        nn = Classifier(layers=[Layer("ExpLin", units=1000),
                                Layer("Softmax")],
                        learning_rate=0.001,
                        n_iter=27)
        train_sample = copy.deepcopy(sample)
        lable_sample = copy.deepcopy(lable)

        test_sample = np.array([sample[k]])
        test_lable = lable[k]
        train_sample = np.delete(train_sample, k, 0)
        lable_sample = np.delete(lable_sample, k, 0)

        nn.fit(train_sample, lable_sample)

        test_result = nn.predict(test_sample)
        print "predict_label: ", test_result[0][0]
        print "true_label: ", test_lable[0]

        if (test_lable[0] == 0):
            if (test_result[0][0] == test_lable[0]):
                print True
                first += 1
                right += 1
            else:
                print False
                false_list.append(k)
        elif (test_lable[0] == 1):
            if (test_result[0][0] == test_lable[0]):
                print True
                second += 1
                right += 1
            else:
                print False
                false_list.append(k)
        else:
            if (test_result[0][0] == test_lable[0]):
                print True
                third += 1
                right += 1
            else:
                print False
                false_list.append(k)
        print "...................................................................................................."
        print k
        print "...................................................................................................."
    # G1_rate = 1.0 * first / 59
    # S_rate = 1.0 * second / 58
    # G2_rate = 1.0 * third / 65
    print "class G1:", 1.0 * first / 59
    print "class S:", 1.0 * second / 58
    print "class G2:", 1.0 * third / 65
    print "class total:", 1.0 * right / 182
    print false_list
Пример #25
0
def fit_network():
	x,y = datasplit.data()
	x_normalized = normalize(x,norm='l2')
	nn = Classifier(layers=[Layer("Softmax" , units=1000),Layer("Softmax" , units=62)],learning_rate=0.02,n_iter=1)
	le= LabelEncoder()
	le.fit(y)
	y = le.transform(y)
	nn.fit(x_normalized , y)
	return nn
Пример #26
0
def train_model(values, labels):
    model = Classifier(layers=[
        Convolution("Rectifier", channels=8, kernel_shape=(3, 3)),
        Layer("Softmax")
    ],
                       learning_rate=0.02,
                       n_iter=5)
    model.fit(values, labels)
    return model
Пример #27
0
def _ann_n_iter(data, data_test, target, target_test, n_units):
    nn = Classifier(
        layers=[
            Layer("Sigmoid", units=n_units),
            Layer("Softmax")],
        n_iter=4000)
    nn.fit(data, target)
    test_score = nn.score(data_test, target_test)
    print n_units, test_score
def wrapper_for_backprop_neural_network_code(train_x, train_y, test_x, test_y):
    score = None
    nn = Classifier(
            layers=[Layer('Sigmoid', units=5), 
            Layer('Softmax')], learning_rate=.001, n_iter=25)
    nn.fit(train_x, train_y)
    predicted = nn.predict(test_x)
    score = accuracy_score(predicted, test_y)
    return score
Пример #29
0
def CNN(X_train, y_train, X_test):
	nn = Classifier(
    layers=[
        Convolution("Rectifier", channels=20, kernel_shape=(5,5), dropout=0.25),
        Layer("Tanh", units=300),
        Layer("Tanh", units=100),
        Layer("Softmax")], learning_rate=0.02, n_iter=10)
	nn.fit(X_train, y_train)
	print('\nTRAIN SCORE', nn.score(X_train, y_train))
	return list(nn.predict(X_test))
Пример #30
0
def wrapper_for_backprop_neural_network_code(train_x, train_y, test_x, test_y):
    score = None
    nn = Classifier(layers=[Layer('Sigmoid', units=5),
                            Layer('Softmax')],
                    learning_rate=.001,
                    n_iter=25)
    nn.fit(train_x, train_y)
    predicted = nn.predict(test_x)
    score = accuracy_score(predicted, test_y)
    return score
Пример #31
0
def _ann_n_iter(data, data_test, target, target_test, n_iter):
    nn = Classifier(
        layers=[
            Layer("Sigmoid", units=100),
            Layer("Softmax")],
        n_iter=n_iter)
    train_score = np.mean(cross_validation.cross_val_score(nn, data, target, cv=10))
    nn.fit(data, target)
    test_score = nn.score(data_test, target_test)
    print n_iter, train_score, test_score
Пример #32
0
def train_model(values,labels):
    model = Classifier(
	layers=[
		Convolution("Rectifier", channels=8, kernel_shape=(3,3)),
		Layer("Softmax")
	],
	learning_rate=0.02,
	n_iter=5)
    model.fit(values, labels)
    return model
Пример #33
0
def train(X, ty):
    nn = Classifier(
        layers=[Layer("Sigmoid", units=15),
                Layer("Softmax", units=2)],
        learning_rate=0.001,
        n_iter=10,
        verbose=1)
    nn.fit(X, ty)
    print "Train Done!"
    return nn
Пример #34
0
def train():
    res = createArrays()
    X_train = res[0]
    Y_train = res[1]
    samples = res[2]
    nn = Classifier(layers=[Layer("Sigmoid", units=150),
                            Layer("Softmax")],
                    learning_rate=0.001,
                    n_iter=samples)
    nn.fit(X_train, Y_train)
    pickle.dump(nn, open('nn.pkl', 'wb'))
class SoftmaxNeuralNetwork:
     
    def __init__(self):
        # learning rate
        self.nn = Classifier(layers=[Layer("Softmax", units=100), Layer("Softmax")], learning_rate=0.001, n_iter=25)
     
    def train(self, training_input, correct_output):
        self.nn.fit(training_input, correct_output)

    def predict(self, training_example):
        return self.nn.predict(training_example)
Пример #36
0
def _nn(tx, ty, rx, ry, n_iter):
    print "_nn"
    nn = Classifier(
            layers=[
                Layer("Tanh", units=100),
                Layer("Softmax")],
            n_iter=n_iter)
    nn.fit(tx, ty)
    resultst = nn.score(tx, ty)
    resultsr = nn.score(rx, ry)
    print "_nn done"
    return n_iter, resultst, resultsr
Пример #37
0
def nn_model(x, y):
    nn = Classifier(
    layers=[
        Layer("Sigmoid", units=500),
        Layer("Sigmoid", units=500),
        Layer("Softmax")],
    learning_rate=0.008,
    weight_decay = 0.0001,
    dropout_rate=0.1,
    n_iter=400)
    nn.fit(x.as_matrix(), y)
    return nn
Пример #38
0
def train(X, y, num_classes, model=None):
    if model is None:
        model = Classifier(
            layers=[
                Layer("Sigmoid", units=args.num_hidden),
                Layer("Softmax", units=num_classes)], 
            learning_rule='sgd',
            learning_rate=args.lr,
            n_iter=args.n_iter,
            verbose=1)
    model.fit(X, y)
    pickle.dump(model, open(args.outfile, "w"))
Пример #39
0
def fit_network():
    x, y = datasplit.data()
    x_normalized = normalize(x, norm='l2')
    nn = Classifier(
        layers=[Layer("Softmax", units=1000),
                Layer("Softmax", units=62)],
        learning_rate=0.02,
        n_iter=1)
    le = LabelEncoder()
    le.fit(y)
    y = le.transform(y)
    nn.fit(x_normalized, y)
    return nn
def  get_X_Y(filetrain,filetest):


    y_train,x_train=readCSV(filetrain)
    y_test,x_test=readCSV(filetest)

   # print f_score.f_score(X,Y)

    #print t_score.t_score(X,Y)
    nn=Classifier(layers=[Layer("Rectifier",units=100),Layer("Softmax")],learning_rate=0.02,n_iter=10)
    #pdb.set_trace()
    nn.fit(x_train,y_train)

    score=nn.score(x_test,y_test)
Пример #41
0
def auto(X_, act, units_):
    ## Neural Network Classifier -- 3 Hidden Layer
    myae = Classifier(layers=[Layer(act, units=units_[0]),
                              Layer("Softmax")],
                      n_iter=100,
                      verbose=True,
                      regularize="L2",
                      batch_size=32,
                      learning_rule="adagrad")
    ## Fit the Classifier
    np.random.seed(1)
    myae.fit(np.asarray(X_), np.asarray(X_))

    return myae
Пример #42
0
def auto(X_, act, units_):
    ## Neural Network Classifier -- 3 Hidden Layer
    myae = Classifier(layers = [Layer(act, units=units_[0]), 
                                Layer("Softmax")],
                                n_iter = 100,
                                verbose=True,
                                regularize="L2",
                                batch_size=32,
                                learning_rule="adagrad")
    ## Fit the Classifier
    np.random.seed(1)
    myae.fit(np.asarray(X_), np.asarray(X_))
    
    return myae
Пример #43
0
def _ann_train_size(data, data_test, target, target_test, train_size):
    nn = Classifier(
        layers=[
            Layer("Sigmoid", units=100),
            Layer("Softmax")])
    if train_size < 1:
        X_train, _, y_train, _ = cross_validation.train_test_split(
            data, target, train_size=train_size, stratify=target)
    else:
        X_train, y_train = data, target
    nn.fit(X_train, y_train)
    train_score = nn.score(X_train, y_train)
    test_score = nn.score(data_test, target_test)
    print train_size, train_score, test_score
Пример #44
0
def find_meta_parameters(X_, y_, classifier_type, **kwargs):
    
    if kwargs.get('act'):
        act = kwargs['act']

    print("\n Finding meta parameters for classifier: {0}".format(classifier_type))

    if classifier_type == "NN":
        ## Neural Network Classifier -- 2 Hidden Layer
        NN = Classifier(layers = [Layer(act, units=20), 
                                  Layer(act, units=20),
                                  Layer("Softmax")],
                                  regularize="L2",
                                  n_iter = 1000,
                                  verbose=True,
                                  batch_size=25,
                                  learning_rule="adagrad",
                                  random_state=0)
        ## Meta Parameters Grid Search with Cross Validation
        param_grid = {"learning_rate": [0.001, 0.01, 0.05, 0.075],
                      "weight_decay": [0.0001, 0.001, 0.005, 0.01],
                      "hidden0__units": [75, 100],
                      "hidden1__units": [75, 100]}

        NN = GridSearchCV(NN, param_grid, refit=True, verbose=True, scoring='roc_auc', n_jobs=1, cv=5)
        ## Fit the Classifier
        np.random.seed(1)
        NN.fit(np.asarray(X_), np.asarray(y_, dtype=np.int8))
        ## Best Fit Estimator
        Best_Model = NN.best_estimator_
    
    elif classifier_type == "RF":
        ## Random Forest
        rf = RandomForestClassifier(random_state=0, verbose=1, n_estimators=1000)
        ## Meta Parameters Grid Search with Cross Validation
        param_grid = {'max_features': ["auto", "log2", np.int(np.shape(X_)[1]/2)],
                      'n_estimators': [100,500,1000]}    
        rf = GridSearchCV(rf, param_grid, refit=True, verbose=True, scoring='roc_auc', n_jobs=1, cv=5)
        ## Fit the Classifier
        np.random.seed(1)
        rf.fit(np.asarray(X_), np.asarray(y_, dtype=np.int8))
        ## Best Fit Estimator
        Best_Model = rf.best_estimator_
        #Best_NN,

    else:
        raise ValueError("classifier_type undefined in find_meta_parameter")

    return Best_Model
Пример #45
0
class ClassifierNeuralNet():
	def __init__(self):
		self.nn = Classifier(
			layers=[
				Layer("Sigmoid", units =100),
				Layer("Softmax")],
			learning_rate = 0.001,
			n_iter = 200)

	def train(self):
		data = parser.load_echo_data('data/training_data.csv')
		self.nn.fit(data.data, data.target)

	def predictData(self, data):
		return self.nn.predict(data)
Пример #46
0
def train(X, y, w, num_classes, model=None, lr=0.01):
    if model is None:
        model = Classifier(
            layers=[
                Layer("Sigmoid", units=args.num_hidden),
                Layer("Softmax", units=num_classes)], 
            learning_rule='sgd',
            learning_rate=lr,
            n_iter=1,
            verbose=1)
    model.fit(X, y)#, w=w)
    pickle.dump(model, open(args.outfile, "w"))
    labels = model.predict(X).flatten()
    print "Split accuracy", float(np.sum(labels == y))/X.shape[0]
    return model
Пример #47
0
def train():
    res = createArrays()
    X_train = res[0]
    Y_train = res[1]
    samples = res[2]
    nn = Classifier(
            layers=[
                Layer("Sigmoid", units=150),
                Layer("Softmax")
                ],
            learning_rate=0.001,
            n_iter=samples
            )
    nn.fit(X_train, Y_train)
    pickle.dump(nn, open('nn.pkl', 'wb'))
Пример #48
0
def predictCategoryNN(training_set, test_set, target, test_targert,
                      componentsList):
    scaler = StandardScaler()
    scaler.fit(training_set[componentsList])
    training_set[componentsList] = scaler.transform(
        training_set[componentsList])
    test_set[componentsList] = scaler.transform(test_set[componentsList])
    nn = Classifier(layers=[Layer("Sigmoid", units=100),
                            Layer("Softmax")],
                    learning_rate=0.001,
                    n_iter=25)
    nn.fit(training_set[componentsList].as_matrix(), target.as_matrix())
    return nn.predict(test_set[componentsList].as_matrix()), pd.DataFrame(
        test_targert), nn.score(test_set[componentsList].as_matrix(),
                                test_targert.as_matrix())
def CNN(X_train, y_train, X_test):
    nn = Classifier(layers=[
        Convolution("Rectifier",
                    channels=20,
                    kernel_shape=(5, 5),
                    dropout=0.25),
        Layer("Tanh", units=300),
        Layer("Tanh", units=100),
        Layer("Softmax")
    ],
                    learning_rate=0.02,
                    n_iter=10)
    nn.fit(X_train, y_train)
    print('\nTRAIN SCORE', nn.score(X_train, y_train))
    return list(nn.predict(X_test))
Пример #50
0
def load_train_data(path, modelNo=1):
    X = []
    with open(path + '/train_newFeat_sparse_mat.dat', 'rb') as infile:
        X = pickle.load(infile)
    random.seed(modelNo)
    np.random.seed(modelNo)
    r = random.sample(xrange(0, X.shape[1]), int(round(0.8 * X.shape[1])))
    X = X[:, r]
    y = pd.read_csv(path + '/labels.csv', index_col=False, header=None)
    y = np.array(y).astype('int')
    X_train, X_val, y_train, y_val = train_test_split(X,
                                                      y,
                                                      test_size=0.2,
                                                      random_state=modelNo,
                                                      stratify=y)
    nn = Classifier(
        layers=[
            Layer("Rectifier", units=200, dropout=0.5),
            Layer("Rectifier", units=200, dropout=0.5),
            Layer("Rectifier", units=200, dropout=0.5),
            Layer("Sigmoid")
        ],
        learning_rate=0.02,
        n_iter=40,
        #    valid_set=(X,y),
        n_stable=15,
        debug=True,
        verbose=True)
    print "Model No is", modelNo
    if (modelNo == 1):
        print "Model No is", modelNo
        nn.valid_set = (X_val, y_val)
    #rbm1 = SVC(C=100.0, gamma = 0.1, probability=True, verbose=1).fit(X[0:9999,:], y[0:9999])
    #rbm2 = RandomForestClassifier(n_estimators=300, criterion='entropy', max_features='auto', bootstrap=False, oob_score=False, n_jobs=1, verbose=1).fit(X[0:9999,:], y[0:9999])
    #rbm3 = GradientBoostingClassifier(n_estimators=50,max_depth=11,subsample=0.8,min_samples_leaf=5,verbose=1).fit(X[0:9999,:], y[0:9999])
    nn.fit(X_train, y_train)
    Y = []
    with open(path + '/test_newFeat_sparse_mat.dat', 'rb') as infile:
        Y = pickle.load(infile)
    Y = Y[:, r]
    preds2 = np.zeros((Y.shape[0], 38))
    for i in xrange(0, 10):
        s = i * 10000
        e = min(preds2.shape[0], s + 10000)
        preds2[s:e, :] = nn.predict_proba(Y[s:e, :])
    p2 = pd.DataFrame(preds2)
    p2.to_csv("p2_" + str(modelNo) + ".csv", index=None, header=None)
    return p2
Пример #51
0
def mlp(
    number_layers,
    number_neurons_1,
    number_neurons_2,
    number_neurons_3,
    number_neurons_4,
    dropout_rate_1,
    dropout_rate_2,
    dropout_rate_3,
    dropout_rate_4,
    weight_decay,
    activation_1,
    activation_2,
    activation_3,
    activation_4,
    learning_rate,
):

    layers = []
    number_neurons = []
    activation = []
    dropout = []

    number_neurons.append(number_neurons_1)
    number_neurons.append(number_neurons_2)
    number_neurons.append(number_neurons_3)
    number_neurons.append(number_neurons_4)

    activation.append(activation_1)
    activation.append(activation_2)
    activation.append(activation_3)
    activation.append(activation_4)

    dropout.append(dropout_rate_1)
    dropout.append(dropout_rate_2)
    dropout.append(dropout_rate_3)
    dropout.append(dropout_rate_4)

    for i in np.arange(number_layers):
        layers.append(Layer(activation[i], units=number_neurons[i], dropout=dropout[i], weight_decay=weight_decay))

    layers.append(Layer("Softmax", units=2))

    predictor = Classifier(layers=layers, learning_rate=learning_rate, n_iter=25)

    predictor.fit(X_train, Y_train)

    return -metrics.accuracy_score(Y_test, predictor.predict(X_test))
def classifyNeuralNetworkClassifier(XTrain, XTest, YTrain, YTest, params):
    activation = params['activation']
    actLastLayer = params['actLastLayer']
    rule = params['rule']
    noOfUnits = params['units']
    rate = params['rate']
    noOfIter = params['iter']
    nn = Classifier(layers=[Layer(activation, units=noOfUnits), Layer(actLastLayer)], learning_rule=rule,
        learning_rate=0.02,
        n_iter=10)
    nn.fit(XTrain, YTrain)
    YPred = nn.predict(XTest)
    diff = YPred - YTest.reshape(YPred.shape)
    score = diff[diff == 0].size
    score = (100.0 * score) / (YPred.size)
    return score
Пример #53
0
def mlpclassifier(input_data, output_labels,filename) :
    from sknn.mlp import Classifier, Layer

    mlpC = Classifier(
        layers=[
            #Layer("Maxout", units=100, pieces=2),
            Layer("Softmax")],
            learning_rate=0.001,
            n_iter=25)
    X_train, X_test, Y_train, Y_test = train_test_split(input_data, output_labels, test_size=0.25, random_state=42)
    mlpC.fit(X_train, Y_train)
    
    predictionsMLP= mlpC.predict(X_test)
    calc_conf_matrix(Y_test, predictionsMLP, 'Multi Layer Perceptron confusion matrix', filename+'_cm')
    roc_plot(input_data,output_labels, mlpC,filename+'_roc')
    coeff_of_deterimination(mlpC, input_data, output_labels, 5)
Пример #54
0
def test_mlp_classifier(
        data,
        layers=[Layer("Rectifier", units=10),
                Layer('Softmax')],
        learning_rate=0.02,
        n_iter=1,
        scale=1):

    #preprossing data if necessary
    X_raw, y = data

    #normalize data for better performance
    if scale == 1:
        X = preprocessing.scale(X_raw)
    else:
        X = X_raw

    #since our test set is not labeled I am using the training data provided for train, validation and test
    print("Create, train, test, validation_sets")

    #split the data into training/validation set and testing set
    X_train_valid, X_test, y_train_valid, y_test = train_test_split(
        X, y, test_size=0.2, random_state=42)

    #split the training set into training set and validation set
    X_train, X_valid, y_train, y_valid = train_test_split(X_train_valid,
                                                          y_train_valid,
                                                          test_size=0.2,
                                                          random_state=23)

    #build the different layers of the model
    print("Building the model...")
    nn = Classifier(layers=layers, learning_rate=learning_rate, n_iter=n_iter)

    #train the model
    print("Training...")
    nn.fit(X_train, y_train)

    #test the model
    print("Testing...")
    y_valid = nn.predict(X_train)

    #return the validation score
    print("Score...")
    score = nn.score(X_test, y_test)

    return score, layers, learning_rate, n_iter
Пример #55
0
def train(X, Y):
    print X.shape
    print Y.shape
    trainX = X[:int(X.shape[0] * 0.7), :, :]
    trainY = Y[:int(Y.shape[0] * 0.7), :]
    valX = X[int(X.shape[0] * 0.7):int(X.shape[0] * 0.8), :, :]
    valY = Y[int(Y.shape[0] * 0.7):int(Y.shape[0] * 0.8), :]
    testX = X[int(X.shape[0] * 0.8):, :, :]
    testY = Y[int(Y.shape[0] * 0.8):, :]

    print 'Train, Val, Test'
    print trainX.shape, ',', trainY.shape, '--', valX.shape, ',', valY.shape, '--', testX.shape, ',', testY.shape

    nn = Classifier(
        layers=[
            Convolution('Rectifier', channels=1, kernel_shape=(5, WORD_DIM)),
            Layer('Rectifier', units=300),
            Layer('Rectifier', units=300),
            Layer('Softmax')
        ],
        #valid_set = (valX,valY),
        learning_rate=0.02,  #0.05, #0.001,
        #normalize='batch',
        verbose=True)
    print 'Net created...'
    #Load net here --always CHECK HERE before starting -- DO THIS NOW, WE WANT TO CONTINUE FROM HERE ON
    nn = pickle.load(open(model_path, 'rb'))
    for i in range(100):
        try:
            nn.n_iter = 5
            nn.fit(trainX, trainY)
            pickle.dump(nn, open(model_path, 'wb'))
            nn = pickle.load(open(model_path, 'rb'))
        except KeyboardInterrupt:
            pickle.dump(nn, open(model_path, 'wb'))
            print 'Saved model after keyboard interrupt'
        pickle.dump(nn, open(model_path, 'wb'))
        print 'Temp model saved'

    #try:
    #	nn.fit(trainX,trainY)
    #except KeyboardInterrupt:
    #	pickle.dump(nn,open(model_path,'wb'))

    print 'Done, final model saved'
Пример #56
0
def classifyNeuralNetworkClassifier(XTrain, XTest, YTrain, YTest, params):
    activation = params['activation']
    actLastLayer = params['actLastLayer']
    rule = params['rule']
    noOfUnits = params['units']
    rate = params['rate']
    noOfIter = params['iter']
    nn = Classifier(
        layers=[Layer(activation, units=noOfUnits),
                Layer(actLastLayer)],
        learning_rule=rule,
        learning_rate=0.02,
        n_iter=10)
    nn.fit(XTrain, YTrain)
    YPred = nn.predict(XTest)
    diff = YPred - YTest.reshape(YPred.shape)
    score = diff[diff == 0].size
    score = (100.0 * score) / (YPred.size)
    return score
Пример #57
0
    def fit(self, X, y):
        """
        Fit the model according to the given training data

        Parameters
        ----------
        X: {array-like}, shape (n_samples, n_features)
            Training vector, where n_samples is the number of samples and
            n_features is the number of features

        y: array-like, shape (n_samples,)
            Target vector relative to X.

        Returns
        -------
        self : object
            return self.
        """

        nn = Classifier(
            layers=[
                # Convolution("Rectifier", channels=10, pool_shape=(2,2), kernel_shape=(3, 3)),
                Layer('Rectifier', units=100, dropout=0.25),
                Layer('Sigmoid', units=80, dropout=0.25),
                Layer("Maxout", units=60, pieces=8),
                Layer('Softmax')
            ],
            learning_rate=0.01,
            learning_rule='momentum',
            learning_momentum=0.9,
            batch_size=25,
            valid_set=None,
            # valid_set=(X_test, y_test),
            valid_size=0.2,
            n_stable=10,
            n_iter=100,
            verbose=True)
        nn.fit(X, y)

        self.model = nn

        self.num_class = len(np.unique(y))
Пример #58
0
def covnetTrain(train_bmi , train_labels , ite =10 , kernel =3 ,learn_rate =0.02, channel = 8):
    nn = Classifier(
        layers = [
            Convolution("Rectifier", channels=channel, kernel_shape=(kernel,kernel)),
            Layer("Softmax")],
        learning_rate=learn_rate,
        n_iter=ite
        )

    neuralnet = nn.fit(train_bmi , train_labels)
    return  neuralnet