コード例 #1
0
def make_neural_network():
    dataset = np.loadtxt("/Users/BARNES_3/Documents/niki/courses/Decision making/riot_predictor/data_for_neural.csv", delimiter=",")

    score_total = 0
    for i in xrange(0, 5):
        msk = np.random.rand(len(dataset)) < 0.8
        train = dataset[msk]
        test = dataset[~msk]
        x_train = train[:,0:6]
        y_train = train[:,6]
        x_test = test[:,0:6]
        y_test = test[:,6]
        # print type(x_test)
        # score = 0.797035347777
        # 0.801596351197
        nn = Classifier(
            layers=[
                # Layer("Tanh", units = 1000),
                # Layer("Sigmoid", units = 1000),
                # Layer("Linear")],
                Layer("ExpLin", units = 800),
                Layer("Softmax"),
                ],
            learning_rate=0.0002,
            n_iter=20)
        nn.fit(x_train, y_train)
        score = nn.score(x_test, y_test)
        score_total += score
    print score_total/5
    # print score
    return nn
コード例 #2
0
ファイル: annAnalysis.py プロジェクト: anuragreddygv323/P1
def autoEncoderOptimization(data):
    rbm = ae.AutoEncoder(layers=[
        ae.Layer("Tanh", units=300),
        ae.Layer("Sigmoid", units=200),
        ae.Layer("Tanh", units=100)
    ],
                         learning_rate=0.002,
                         n_iter=10)

    rbm.fit(data["train"])

    model = Classifier(layers=[
        Layer("Tanh", units=300),
        Layer("Sigmoid", units=200),
        Layer("Tanh", units=100),
        Layer("Rectifier", units=100),
        Layer("Rectifier", units=50),
        Layer("Softmax")
    ], )

    rbm.transfer(model)

    model.fit(data["train"], data["label"])

    prediction = model.predict(data["train"])

    print accuracy_score(data["label"], prediction)
コード例 #3
0
def mlp(number_layers, number_neurons_1, number_neurons_2, number_neurons_3,
        number_neurons_4, dropout_rate):

    layers = []
    number_neurons = []

    number_neurons.append(number_neurons_1)
    number_neurons.append(number_neurons_2)
    number_neurons.append(number_neurons_3)
    number_neurons.append(number_neurons_4)

    for i in np.arange(number_layers):
        layers.append(
            Layer("Sigmoid", units=number_neurons[i], dropout=dropout_rate))

    layers.append(Layer("Softmax", units=2))

    scores = []

    for i in np.arange(n_validations):

        X_train, X_test, Y_train, Y_test = sklearn.cross_validation.train_test_split(
            X, Y, test_size=0.3, random_state=1)

        predictor = Classifier(layers=layers, learning_rate=0.001, n_iter=25)

        predictor.fit(X_train, Y_train)

        scores.append(metrics.accuracy_score(Y_test,
                                             predictor.predict(X_test)))

    return -median(scores)
コード例 #4
0
ファイル: sentiment.py プロジェクト: PCJohn/Sentiment-ConvNet
def batch_train(train, val, model_path):
    trainX, trainY = train
    valX, valY = val
    nn = Classifier(
        layers=[
            Convolution('Rectifier',
                        channels=100,
                        kernel_shape=(5, WORD_DIM),
                        border_mode='valid'
                        #pool_shape=(3,1),
                        #pool_type='max'
                        ),
            Layer('Rectifier', units=900, dropout=0.5),
            Layer('Softmax')
        ],
        batch_size=50,
        learning_rate=0.02,
        normalize='dropout',
        verbose=True)
    nn.n_iter = 100
    print 'Net created...'
    try:
        nn.fit(trainX, trainY)
    except KeyboardInterrupt:
        pickle.dump(nn, open(model_path, 'wb'))
    pickle.dump(nn, open(model_path, 'wb'))
    print 'Done, final model saved'
    print 'Testing'
    #Accuracy on the validation set
    print 'Validation accuracy:', batch_test(model_path, val)
コード例 #5
0
def train_neural_network(samples, nn=None, learning_rate=0.001, n_iter=25): #pylint:disable=invalid-name
    """Trains a neural network using the given sample data.

    Args:
        samples: Tuple containing (sample inputs, sample outputs).
        nn: Neural network that should be trained. If this is none, a new NN
            will be created.
        learning_rate: Neural network learning rate.
        n_iter: Number of training iterations to use.

    Returns:
        The trained neural network.
    """
    sample_inputs, sample_outputs = check_samples(samples)

    # Create a new classifier if necessary.
    if nn is None:
        n_features = len(sample_inputs[0])
        nn = Classifier(
            layers=[
                Layer("Maxout", units=n_features, pieces=2),
                Layer("Softmax")],
            learning_rate=learning_rate,
            n_iter=n_iter)

    # Train the classifier.
    nn.fit(sample_inputs, sample_outputs)
    return nn
コード例 #6
0
def MLP_Evaluation(sample,
                   lable,
                   n_hidden=10,
                   activation_func='Tanh',
                   n_updates=20,
                   k_fold=5):
    X = sample
    y = lable
    kf = KFold(n_splits=k_fold, shuffle=True)
    split_num = kf.get_n_splits(X)
    k = 1
    G1, G2, S, Total = 0, 0, 0, 0
    (AUC, p, r, f1) = (0, 0, 0, 0)
    for train_index, test_index in kf.split(X):
        # print("TRAIN:", train_index, "TEST:", test_index)
        X_train, X_test = X[train_index], X[test_index]
        y_train, y_test = y[train_index], y[test_index]
        nn = Classifier(
            layers=[
                Layer(activation_func, units=n_hidden),
                Layer(activation_func, units=n_hidden),

                ####################Consider mutilpe layer condition#############################

                # Layer(activation_func, units=n_hidden),
                # Layer(activation_func, units=n_hidden),
                # Layer(activation_func, units=n_hidden),
                # Layer(activation_func, units=n_hidden),
                # Layer(activation_func, units=n_hidden),
                # Layer(activation_func, units=n_hidden),
                Layer("Softmax")
            ],
            learning_rate=0.001,
            n_iter=n_updates)
        nn.fit(X_train, y_train)
        y_test_vector = np.zeros((X_test.shape[0], 3), dtype='int64')
        for count in range(0, X_test.shape[0]):
            if (y_test[count][0] == 0):
                y_test_vector[count][0] = 1
            elif (y_test[count][0] == 1):
                y_test_vector[count][1] = 1
            else:
                y_test_vector[count][2] = 1

        (AUC_k, p_k, r_k, f1_k) = evaluation.evaluate(nn, X_test,
                                                      y_test_vector, 0.8)
        print("%s / %s Iteration:AUC: %s, Prec: %s, Rec: %s, F1: %s" %
              (k, k_fold, AUC_k, p_k, r_k, f1_k))
        AUC = AUC + AUC_k
        p = p + p_k
        r = r + r_k
        f1 = f1 + f1_k
        print("Average: AUC: %s, Prec: %s, Rec: %s, F1: %s" %
              (AUC / k, p / k, r / k, f1 / k))
        k = k + 1
    AUC = AUC / k_fold
    p = p / k_fold
    r = r / k_fold
    f1 = f1 / k_fold
    return AUC, p, r, f1
コード例 #7
0
def train_sknn(X, y):
    '''
        NeuralNet with sknn
    '''
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.25,
                                                        random_state=5)
    X_train, X_test = impute_nan(X_train, X_test)
    X_train, X_test = normalize_features(X_train, X_test)
    nn = Classifier(layers=[Layer("Tanh", units=12),
                            Layer("Softmax")],
                    learning_rate=0.005,
                    n_iter=25)

    # gs = GridSearchCV(nn, param_grid={
    #     'learning_rate': [0.05, 0.01, 0.005, 0.001],
    #     'hidden0__units': [4, 8, 12,100],
    #     'hidden0__type': ["Rectifier", "Sigmoid", "Tanh"]})
    # gs.fit(X_train, y_train)
    # print(gs.best_estimator_)
    nn.fit(X_train, y_train)
    predicted = nn.predict(X_test).flatten()
    labels = y_test
    return predicted, labels
コード例 #8
0
    def learn_data(self):
        first_half = []
        fh_wins = []
        second_half = []
        sh_wins = []
        key_t = []

        for key, stats in self.results.items():
            if key[0] < 2006:
                first_half += [stats.stat_arr()]
                fh_wins += [stats.wins]
            else:
                second_half += [stats.stat_arr()]
                sh_wins += [stats.wins]
                key_t += [key]

        x_ = np.array([second_half])
        x = np.array([first_half])
        y_learn = np.array([fh_wins])

        nn = Classifier(layers=[Layer("Sigmoid", units=100),
                                Layer("Softmax")],
                        learning_rate=0.01,
                        n_iter=50)
        nn.fit(x, y_learn)

        prdt = nn.predict(x_)

        for i in range(len(second_half)):
            if prdt[0][i] >= 10 or sh_wins[i] >= 11:
                print((str(key_t[i]) + " actually won " + str(sh_wins[i]) +
                       " and " + "was predicted with " + str(prdt[0][i])))
コード例 #9
0
def main():
    vals, actions = matrixFromCSV("C:\\Users\\Chrisd\\Documents\\College\\Spring 2016\\379K\\Kaggle\\Kaggle\\train.csv")
    X_train, X_test, y_train, y_test = train_test_split(vals, actions, test_size=0.33, random_state=22)
    totalTest, totalAns = matrixFromCSV("C:\\Users\\Chrisd\\Documents\\College\\Spring 2016\\379K\\Kaggle\\Kaggle\\test.csv")


    nn = Classifier(
    layers=[
        Layer("Softmax", units=10),
        Layer("Linear", units=10),
        Layer("Sigmoid")],
    learning_rate=0.001,
    n_iter=20)

    nn.fit(X_train,y_train)
    pickle.dump(nn, open('nn.pkl', 'wb'))

    '''rs = RandomizedSearchCV(nn, param_distributions={
    'learning_rate': stats.uniform(0.001, 0.05),
    'hidden0__units': stats.randint(4, 100),
    'hidden1__units': stats.randint(4, 100),
    'hidden1__type': ["Linear","Rectifier", "Sigmoid", "Tanh"]})
    rs.fit(X_train, y_train)

    pickle.dump(rs, open('rs.pkl', 'wb'))
    rs = pickle.load(open('rs.pkl', 'rb'))'''

    #print(X_test.shape)
    #X_test.reshape(9,1)'''
    nn = pickle.load(open('nn.pkl', 'rb'))
    answer = nn.predict(X_test)
    writeToCSV(answer)
    print(getPercent(answer,y_test))
コード例 #10
0
ファイル: annAnalysis.py プロジェクト: aisobran/Adv-ML-NFL
def autoEncoderOptimization(data):
	rbm = ae.AutoEncoder(
			layers=[
				ae.Layer("Tanh", units=300),
				ae.Layer("Sigmoid", units=200),
				ae.Layer("Tanh", units=100)
			],
			learning_rate=0.002,
			n_iter=10
		)

	rbm.fit(data["train"])

	model = Classifier(
			layers=[
				Layer("Tanh", units=300),
				Layer("Sigmoid", units=200),
				Layer("Tanh", units=100),
				Layer("Rectifier", units=100),
				Layer("Rectifier", units=50),
				Layer("Softmax")
			],
		)

	rbm.transfer(model)

	model.fit(data["train"], data["label"])

	prediction = model.predict(data["train"])

	print accuracy_score(data["label"], prediction)
コード例 #11
0
def trainMLP(trainX, trainY, validationX, validationY, activation='Tanh', algorithm='adam',
			 hidden_layer_size=2048, alpha=0.001):
	print('Learning...')

	trainX, trainY = shuffle(trainX, trainY)
	validationX, validationY = shuffle(validationX, validationY)

	mlp = Classifier(
		layers=[
			Layer(activation, units=hidden_layer_size, dropout=0.1),
			Layer("Softmax", units=len(np.unique(trainY)), dropout=0.2)
		], learning_rule=algorithm,
		learning_rate=0.0005,
		learning_momentum=0.9,
		batch_size=256,
		n_stable=10,
		n_iter=200,
		regularize="L2",
		weight_decay=alpha,
		loss_type="mcc", #?
		valid_set=(validationX, validationY),
		verbose=True)

	print(mlp)

	mlp.fit(trainX, trainY)

	return mlp
コード例 #12
0
ファイル: sentiment.py プロジェクト: PCJohn/Sentiment-ConvNet
def batch_train(train,val,model_path):
    trainX,trainY = train
    valX,valY = val
    nn = Classifier(layers = [
			Convolution('Rectifier',
                                    channels=100,
                                    kernel_shape=(5,WORD_DIM),
                                    border_mode='valid'
                                    #pool_shape=(3,1),
                                    #pool_type='max'
                                    ),
			Layer('Rectifier',units=900,dropout=0.5),
                        Layer('Softmax')],
                        batch_size = 50,
                        learning_rate = 0.02,
                        normalize='dropout',
                        verbose = True)
    nn.n_iter = 100
    print 'Net created...'
    try:
	nn.fit(trainX,trainY)
    except KeyboardInterrupt:
	pickle.dump(nn,open(model_path,'wb'))
    pickle.dump(nn,open(model_path,'wb'))
    print 'Done, final model saved'
    print 'Testing'
    #Accuracy on the validation set
    print 'Validation accuracy:',batch_test(model_path,val)
コード例 #13
0
ファイル: pcadnn.py プロジェクト: skbly7/smai-project
def train(X, ty):
    nn = Classifier(
        layers=[Layer("Sigmoid", units=5000), Layer("Sigmoid", units=5)], learning_rate=0.001, n_iter=100, verbose=1
    )
    nn.fit(X, ty)
    print "Train Done!"
    return nn
コード例 #14
0
 def test_VerboseClassifier(self):
     nn = MLPC(layers=[L("Softmax")], verbose=1, n_iter=1)
     a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,1), dtype=numpy.int32)
     nn.fit(a_in, a_out)
     assert_in("Epoch       Training Error       Validation Error       Time", self.buf.getvalue())
     assert_in("    1       ", self.buf.getvalue())
     assert_in("    N/A     ", self.buf.getvalue())
コード例 #15
0
def mlp(number_layers, number_neurons_1, number_neurons_2, number_neurons_3, number_neurons_4, dropout_rate):

	layers = []
	number_neurons = []

	number_neurons.append(number_neurons_1)
	number_neurons.append(number_neurons_2)
	number_neurons.append(number_neurons_3)
	number_neurons.append(number_neurons_4)

	for i in np.arange(number_layers):
		layers.append(Layer("Sigmoid", units=number_neurons[i], dropout = dropout_rate))

	layers.append(Layer("Softmax",  units=2))

	scores = []

	for i in np.arange(n_validations):

		X_train, X_test, Y_train, Y_test = sklearn.cross_validation.train_test_split(X,Y, test_size=0.3, random_state=1)
	
		predictor = Classifier(
	    layers=layers,
	    learning_rate=0.001,
	    n_iter=25)

		predictor.fit(X_train, Y_train)

		scores.append(metrics.accuracy_score(Y_test, predictor.predict(X_test)))
	
	return -median(scores)
コード例 #16
0
 def check(self, a_in, a_out, a_mask, act='Softmax'):
     nn = MLPC(layers=[L(act)],
               learning_rule='adam',
               learning_rate=0.05,
               n_iter=250,
               n_stable=25)
     nn.fit(a_in, a_out, a_mask)
     return nn.predict_proba(a_in)
コード例 #17
0
def train_sknn(data, labels):
    # layer one:
    hidden_layer = Layer(type="Sigmoid", name="hidden", units=10)
    output_layer = Layer(type="Softmax", name="output")
    layers = [hidden_layer, output_layer]
    mlp = Classifier(layers=layers, random_state=1)
    mlp.fit(data, labels)
    return mlp
コード例 #18
0
def MLP_leave_one_cross_validation(sample, lable):
    length = len(sample)
    right, first, second, third = 0, 0, 0, 0
    false_list = []
    for k in range(0, length):
        nn = Classifier(layers=[Layer("ExpLin", units=1000),
                                Layer("Softmax")],
                        learning_rate=0.001,
                        n_iter=27)
        train_sample = copy.deepcopy(sample)
        lable_sample = copy.deepcopy(lable)

        test_sample = np.array([sample[k]])
        test_lable = lable[k]
        train_sample = np.delete(train_sample, k, 0)
        lable_sample = np.delete(lable_sample, k, 0)

        nn.fit(train_sample, lable_sample)

        test_result = nn.predict(test_sample)
        print "predict_label: ", test_result[0][0]
        print "true_label: ", test_lable[0]

        if (test_lable[0] == 0):
            if (test_result[0][0] == test_lable[0]):
                print True
                first += 1
                right += 1
            else:
                print False
                false_list.append(k)
        elif (test_lable[0] == 1):
            if (test_result[0][0] == test_lable[0]):
                print True
                second += 1
                right += 1
            else:
                print False
                false_list.append(k)
        else:
            if (test_result[0][0] == test_lable[0]):
                print True
                third += 1
                right += 1
            else:
                print False
                false_list.append(k)
        print "...................................................................................................."
        print k
        print "...................................................................................................."
    # G1_rate = 1.0 * first / 59
    # S_rate = 1.0 * second / 58
    # G2_rate = 1.0 * third / 65
    print "class G1:", 1.0 * first / 59
    print "class S:", 1.0 * second / 58
    print "class G2:", 1.0 * third / 65
    print "class total:", 1.0 * right / 182
    print false_list
コード例 #19
0
ファイル: predictor.py プロジェクト: shravan97/kaggle
def fit_network():
	x,y = datasplit.data()
	x_normalized = normalize(x,norm='l2')
	nn = Classifier(layers=[Layer("Softmax" , units=1000),Layer("Softmax" , units=62)],learning_rate=0.02,n_iter=1)
	le= LabelEncoder()
	le.fit(y)
	y = le.transform(y)
	nn.fit(x_normalized , y)
	return nn
コード例 #20
0
def wrapper_for_backprop_neural_network_code(train_x, train_y, test_x, test_y):
    score = None
    nn = Classifier(
            layers=[Layer('Sigmoid', units=5), 
            Layer('Softmax')], learning_rate=.001, n_iter=25)
    nn.fit(train_x, train_y)
    predicted = nn.predict(test_x)
    score = accuracy_score(predicted, test_y)
    return score
コード例 #21
0
def train_model(values, labels):
    model = Classifier(layers=[
        Convolution("Rectifier", channels=8, kernel_shape=(3, 3)),
        Layer("Softmax")
    ],
                       learning_rate=0.02,
                       n_iter=5)
    model.fit(values, labels)
    return model
コード例 #22
0
def _ann_n_iter(data, data_test, target, target_test, n_units):
    nn = Classifier(
        layers=[
            Layer("Sigmoid", units=n_units),
            Layer("Softmax")],
        n_iter=4000)
    nn.fit(data, target)
    test_score = nn.score(data_test, target_test)
    print n_units, test_score
コード例 #23
0
 def test_GetParamValues(self):
     nn = MLPC(layers=[L("Linear")],
               learning_rate=0.05,
               n_iter=456,
               n_stable=123,
               valid_size=0.2,
               dropout_rate=0.25)
     params = nn.get_params()
     self.check_values(params)
コード例 #24
0
def _ann_n_iter(data, data_test, target, target_test, n_iter):
    nn = Classifier(
        layers=[
            Layer("Sigmoid", units=100),
            Layer("Softmax")],
        n_iter=n_iter)
    train_score = np.mean(cross_validation.cross_val_score(nn, data, target, cv=10))
    nn.fit(data, target)
    test_score = nn.score(data_test, target_test)
    print n_iter, train_score, test_score
コード例 #25
0
ファイル: CNN.py プロジェクト: lionheartX/Kaggle_uoft
def CNN(X_train, y_train, X_test):
	nn = Classifier(
    layers=[
        Convolution("Rectifier", channels=20, kernel_shape=(5,5), dropout=0.25),
        Layer("Tanh", units=300),
        Layer("Tanh", units=100),
        Layer("Softmax")], learning_rate=0.02, n_iter=10)
	nn.fit(X_train, y_train)
	print('\nTRAIN SCORE', nn.score(X_train, y_train))
	return list(nn.predict(X_test))
コード例 #26
0
def wrapper_for_backprop_neural_network_code(train_x, train_y, test_x, test_y):
    score = None
    nn = Classifier(layers=[Layer('Sigmoid', units=5),
                            Layer('Softmax')],
                    learning_rate=.001,
                    n_iter=25)
    nn.fit(train_x, train_y)
    predicted = nn.predict(test_x)
    score = accuracy_score(predicted, test_y)
    return score
コード例 #27
0
def train_model(values,labels):
    model = Classifier(
	layers=[
		Convolution("Rectifier", channels=8, kernel_shape=(3,3)),
		Layer("Softmax")
	],
	learning_rate=0.02,
	n_iter=5)
    model.fit(values, labels)
    return model
コード例 #28
0
def train(X, ty):
    nn = Classifier(
        layers=[Layer("Sigmoid", units=15),
                Layer("Softmax", units=2)],
        learning_rate=0.001,
        n_iter=10,
        verbose=1)
    nn.fit(X, ty)
    print "Train Done!"
    return nn
コード例 #29
0
class SoftmaxNeuralNetwork:
     
    def __init__(self):
        # learning rate
        self.nn = Classifier(layers=[Layer("Softmax", units=100), Layer("Softmax")], learning_rate=0.001, n_iter=25)
     
    def train(self, training_input, correct_output):
        self.nn.fit(training_input, correct_output)

    def predict(self, training_example):
        return self.nn.predict(training_example)
コード例 #30
0
def train():
    res = createArrays()
    X_train = res[0]
    Y_train = res[1]
    samples = res[2]
    nn = Classifier(layers=[Layer("Sigmoid", units=150),
                            Layer("Softmax")],
                    learning_rate=0.001,
                    n_iter=samples)
    nn.fit(X_train, Y_train)
    pickle.dump(nn, open('nn.pkl', 'wb'))
コード例 #31
0
def covnetTrain(train_bmi , train_labels , ite =10 , kernel =3 ,learn_rate =0.02, channel = 8):
    nn = Classifier(
        layers = [
            Convolution("Rectifier", channels=channel, kernel_shape=(kernel,kernel)),
            Layer("Softmax")],
        learning_rate=learn_rate,
        n_iter=ite
        )

    neuralnet = nn.fit(train_bmi , train_labels)
    return  neuralnet
def neural_classifier(X, y, classes=2):
    from sknn.mlp import Classifier, Layer
    nn = Classifier(layers=[Layer("Rectifier", units=100),
                            Layer("Softmax")],
                    learning_rate=0.02,
                    n_iter=10)
    if classes == 2:
        model = nn.fit(X, y)
    else:
        model = OneVsRestClassifier(nn).fit(X, y)
    return model
コード例 #33
0
def _nn(tx, ty, rx, ry, n_iter):
    print "_nn"
    nn = Classifier(
            layers=[
                Layer("Tanh", units=100),
                Layer("Softmax")],
            n_iter=n_iter)
    nn.fit(tx, ty)
    resultst = nn.score(tx, ty)
    resultsr = nn.score(rx, ry)
    print "_nn done"
    return n_iter, resultst, resultsr
コード例 #34
0
ファイル: patch_class.py プロジェクト: cwein3/im-seg
def train(X, y, num_classes, model=None):
    if model is None:
        model = Classifier(
            layers=[
                Layer("Sigmoid", units=args.num_hidden),
                Layer("Softmax", units=num_classes)], 
            learning_rule='sgd',
            learning_rate=args.lr,
            n_iter=args.n_iter,
            verbose=1)
    model.fit(X, y)
    pickle.dump(model, open(args.outfile, "w"))
コード例 #35
0
def nn_model(x, y):
    nn = Classifier(
    layers=[
        Layer("Sigmoid", units=500),
        Layer("Sigmoid", units=500),
        Layer("Softmax")],
    learning_rate=0.008,
    weight_decay = 0.0001,
    dropout_rate=0.1,
    n_iter=400)
    nn.fit(x.as_matrix(), y)
    return nn
コード例 #36
0
ファイル: predictor.py プロジェクト: shravan97/kaggle
def fit_network():
    x, y = datasplit.data()
    x_normalized = normalize(x, norm='l2')
    nn = Classifier(
        layers=[Layer("Softmax", units=1000),
                Layer("Softmax", units=62)],
        learning_rate=0.02,
        n_iter=1)
    le = LabelEncoder()
    le.fit(y)
    y = le.transform(y)
    nn.fit(x_normalized, y)
    return nn
def  get_X_Y(filetrain,filetest):


    y_train,x_train=readCSV(filetrain)
    y_test,x_test=readCSV(filetest)

   # print f_score.f_score(X,Y)

    #print t_score.t_score(X,Y)
    nn=Classifier(layers=[Layer("Rectifier",units=100),Layer("Softmax")],learning_rate=0.02,n_iter=10)
    #pdb.set_trace()
    nn.fit(x_train,y_train)

    score=nn.score(x_test,y_test)
コード例 #38
0
def auto(X_, act, units_):
    ## Neural Network Classifier -- 3 Hidden Layer
    myae = Classifier(layers=[Layer(act, units=units_[0]),
                              Layer("Softmax")],
                      n_iter=100,
                      verbose=True,
                      regularize="L2",
                      batch_size=32,
                      learning_rule="adagrad")
    ## Fit the Classifier
    np.random.seed(1)
    myae.fit(np.asarray(X_), np.asarray(X_))

    return myae
コード例 #39
0
ファイル: Helper.py プロジェクト: IQSS/gentb-site
def auto(X_, act, units_):
    ## Neural Network Classifier -- 3 Hidden Layer
    myae = Classifier(layers = [Layer(act, units=units_[0]), 
                                Layer("Softmax")],
                                n_iter = 100,
                                verbose=True,
                                regularize="L2",
                                batch_size=32,
                                learning_rule="adagrad")
    ## Fit the Classifier
    np.random.seed(1)
    myae.fit(np.asarray(X_), np.asarray(X_))
    
    return myae
コード例 #40
0
def _ann_train_size(data, data_test, target, target_test, train_size):
    nn = Classifier(
        layers=[
            Layer("Sigmoid", units=100),
            Layer("Softmax")])
    if train_size < 1:
        X_train, _, y_train, _ = cross_validation.train_test_split(
            data, target, train_size=train_size, stratify=target)
    else:
        X_train, y_train = data, target
    nn.fit(X_train, y_train)
    train_score = nn.score(X_train, y_train)
    test_score = nn.score(data_test, target_test)
    print train_size, train_score, test_score
コード例 #41
0
ファイル: Helper.py プロジェクト: IQSS/gentb-site
def find_meta_parameters(X_, y_, classifier_type, **kwargs):
    
    if kwargs.get('act'):
        act = kwargs['act']

    print("\n Finding meta parameters for classifier: {0}".format(classifier_type))

    if classifier_type == "NN":
        ## Neural Network Classifier -- 2 Hidden Layer
        NN = Classifier(layers = [Layer(act, units=20), 
                                  Layer(act, units=20),
                                  Layer("Softmax")],
                                  regularize="L2",
                                  n_iter = 1000,
                                  verbose=True,
                                  batch_size=25,
                                  learning_rule="adagrad",
                                  random_state=0)
        ## Meta Parameters Grid Search with Cross Validation
        param_grid = {"learning_rate": [0.001, 0.01, 0.05, 0.075],
                      "weight_decay": [0.0001, 0.001, 0.005, 0.01],
                      "hidden0__units": [75, 100],
                      "hidden1__units": [75, 100]}

        NN = GridSearchCV(NN, param_grid, refit=True, verbose=True, scoring='roc_auc', n_jobs=1, cv=5)
        ## Fit the Classifier
        np.random.seed(1)
        NN.fit(np.asarray(X_), np.asarray(y_, dtype=np.int8))
        ## Best Fit Estimator
        Best_Model = NN.best_estimator_
    
    elif classifier_type == "RF":
        ## Random Forest
        rf = RandomForestClassifier(random_state=0, verbose=1, n_estimators=1000)
        ## Meta Parameters Grid Search with Cross Validation
        param_grid = {'max_features': ["auto", "log2", np.int(np.shape(X_)[1]/2)],
                      'n_estimators': [100,500,1000]}    
        rf = GridSearchCV(rf, param_grid, refit=True, verbose=True, scoring='roc_auc', n_jobs=1, cv=5)
        ## Fit the Classifier
        np.random.seed(1)
        rf.fit(np.asarray(X_), np.asarray(y_, dtype=np.int8))
        ## Best Fit Estimator
        Best_Model = rf.best_estimator_
        #Best_NN,

    else:
        raise ValueError("classifier_type undefined in find_meta_parameter")

    return Best_Model
コード例 #42
0
class ClassifierNeuralNet():
	def __init__(self):
		self.nn = Classifier(
			layers=[
				Layer("Sigmoid", units =100),
				Layer("Softmax")],
			learning_rate = 0.001,
			n_iter = 200)

	def train(self):
		data = parser.load_echo_data('data/training_data.csv')
		self.nn.fit(data.data, data.target)

	def predictData(self, data):
		return self.nn.predict(data)
コード例 #43
0
ファイル: patch_class_splits.py プロジェクト: cwein3/im-seg
def train(X, y, w, num_classes, model=None, lr=0.01):
    if model is None:
        model = Classifier(
            layers=[
                Layer("Sigmoid", units=args.num_hidden),
                Layer("Softmax", units=num_classes)], 
            learning_rule='sgd',
            learning_rate=lr,
            n_iter=1,
            verbose=1)
    model.fit(X, y)#, w=w)
    pickle.dump(model, open(args.outfile, "w"))
    labels = model.predict(X).flatten()
    print "Split accuracy", float(np.sum(labels == y))/X.shape[0]
    return model
コード例 #44
0
ファイル: trainer.py プロジェクト: rohany/Language-Recognizer
def train():
    res = createArrays()
    X_train = res[0]
    Y_train = res[1]
    samples = res[2]
    nn = Classifier(
            layers=[
                Layer("Sigmoid", units=150),
                Layer("Softmax")
                ],
            learning_rate=0.001,
            n_iter=samples
            )
    nn.fit(X_train, Y_train)
    pickle.dump(nn, open('nn.pkl', 'wb'))
コード例 #45
0
ファイル: c_smac_mlp_11.py プロジェクト: jpfiguero/Project
def mlp(
    number_layers,
    number_neurons_1,
    number_neurons_2,
    number_neurons_3,
    number_neurons_4,
    dropout_rate_1,
    dropout_rate_2,
    dropout_rate_3,
    dropout_rate_4,
    weight_decay,
    activation_1,
    activation_2,
    activation_3,
    activation_4,
    learning_rate,
):

    layers = []
    number_neurons = []
    activation = []
    dropout = []

    number_neurons.append(number_neurons_1)
    number_neurons.append(number_neurons_2)
    number_neurons.append(number_neurons_3)
    number_neurons.append(number_neurons_4)

    activation.append(activation_1)
    activation.append(activation_2)
    activation.append(activation_3)
    activation.append(activation_4)

    dropout.append(dropout_rate_1)
    dropout.append(dropout_rate_2)
    dropout.append(dropout_rate_3)
    dropout.append(dropout_rate_4)

    for i in np.arange(number_layers):
        layers.append(Layer(activation[i], units=number_neurons[i], dropout=dropout[i], weight_decay=weight_decay))

    layers.append(Layer("Softmax", units=2))

    predictor = Classifier(layers=layers, learning_rate=learning_rate, n_iter=25)

    predictor.fit(X_train, Y_train)

    return -metrics.accuracy_score(Y_test, predictor.predict(X_test))
コード例 #46
0
ファイル: main.py プロジェクト: emithongle/QCF-Thesis-tmp
def buildClassifier(clf='random_forest'):
    if (clf == 'neuron_network'):
        return Classifier(
            layers=[Layer("Sigmoid", units=100),
                    Layer("Softmax", units=2)],
            learning_rule='sgd',
            learning_rate=0.01,
            n_iter=10)

    elif (clf == 'SVC'):
        return svm.SVC()

    elif (clf == 'Linear Discriminant Analysis'):
        return LinearDiscriminantAnalysis()
    elif (clf == 'Quadratic Discriminant Analysis'):
        return QuadraticDiscriminantAnalysis()

    elif (clf == 'AdaBoost'):
        return AdaBoostClassifier(n_estimators=100)

    elif (clf == 'extra_trees_classifier'):
        return ExtraTreesClassifier(n_estimators=10)
    elif (clf == 'gradient_boosting_classifier'):
        return GradientBoostingClassifier(n_estimators=10)
    else:
        return RandomForestClassifier(n_estimators=10)
コード例 #47
0
def create_network(niter, lr, verboseflag):
    nn = Classifier(layers=[Layer("Sigmoid", units=5),
                            Layer("Softmax")],
                    learning_rate=lr,
                    n_iter=niter,
                    verbose=verboseflag)
    return nn
コード例 #48
0
def mlp_test():
    # layer one:
    hidden_layer = Layer(type="Sigmoid", name="hidden", units=10)
    output_layer = Layer(type="Softmax", name="output")
    layers = [hidden_layer, output_layer]
    mlp = Classifier(layers=layers, random_state=1)
    classify(classifier=mlp)
コード例 #49
0
ファイル: net_predict.py プロジェクト: IraPS/Suspense
def predict():

    pipeline = Pipeline([
        ('min/max scaler', MinMaxScaler(feature_range=(0.0, 1.0))),
        ('neural network', Classifier(layers=[Layer("ExpLin", units=5), Layer("Softmax")], n_iter=25))])

    X = np.load('All_features.npz')['arr_0']

    D = np.load('Akunin_features.npz')['arr_0']

    all_samples = [1]*141 + [0]*123
    y = np.array(all_samples)

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.0, random_state=0)

    pipeline.fit(X_train, y_train)
    pickle.dump(pipeline, open('NeuralNet_model.pkl', 'wb'))
    prediction = pipeline.predict(D)
    probs = pipeline.predict_proba(D)

    gradation = {1.01: 5, 0.9: 4, 0.8: 3, 0.7: 2, 0.6: 1}
    ress1 = []
    simple_predicts = []
    scale_predicts = []
    for i in prediction:
        simple_predicts.append(i[0])
    for i in probs:
        scale_predicts.append(i[1]*10)
        compare = []
        for u in gradation:
            if i[1] < u:
                compare.append(gradation[u])
        ress1.append(min(compare))

    return simple_predicts, scale_predicts
def classifyNeuralNetworkClassifier(XTrain, XTest, YTrain, YTest, params):
    activation = params['activation']
    actLastLayer = params['actLastLayer']
    rule = params['rule']
    noOfUnits = params['units']
    rate = params['rate']
    noOfIter = params['iter']
    nn = Classifier(layers=[Layer(activation, units=noOfUnits), Layer(actLastLayer)], learning_rule=rule,
        learning_rate=0.02,
        n_iter=10)
    nn.fit(XTrain, YTrain)
    YPred = nn.predict(XTest)
    diff = YPred - YTest.reshape(YPred.shape)
    score = diff[diff == 0].size
    score = (100.0 * score) / (YPred.size)
    return score
コード例 #51
0
def CNN(X, y):
	#l2 normalize 
	#preprocessing.normalize(X, 'max')
	#scale centre to the mean to unit vector
	#preprocessing.scale(X_train)
	#preprocessing.scale(X_test)
	#X = equalize_hist(X)
	X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2, random_state = 42)
	nn = Classifier(
    layers=[
        Convolution("Rectifier", channels=10, kernel_shape=(5,5), dropout=0.25, normalize="batch", weight_decay=0.0001),
        #Layer("Tanh", units=10),
        Layer("Softmax")], learning_rate=0.05, n_iter=10)
	nn.fit(X_train, y_train)
	print('\nTRAIN SCORE', nn.score(X_train, y_train))
	print('TEST SCORE', nn.score(X_test, y_test))
コード例 #52
0
ファイル: ml.py プロジェクト: abnarain/malware_detection
def mlpclassifier(input_data, output_labels,filename) :
    from sknn.mlp import Classifier, Layer

    mlpC = Classifier(
        layers=[
            #Layer("Maxout", units=100, pieces=2),
            Layer("Softmax")],
            learning_rate=0.001,
            n_iter=25)
    X_train, X_test, Y_train, Y_test = train_test_split(input_data, output_labels, test_size=0.25, random_state=42)
    mlpC.fit(X_train, Y_train)
    
    predictionsMLP= mlpC.predict(X_test)
    calc_conf_matrix(Y_test, predictionsMLP, 'Multi Layer Perceptron confusion matrix', filename+'_cm')
    roc_plot(input_data,output_labels, mlpC,filename+'_roc')
    coeff_of_deterimination(mlpC, input_data, output_labels, 5)
コード例 #53
0
def train_dropout_nn(X, y, model_type='classifier', cv_fold=5):
    """

    Parameters
    ----------
    X
    y
    model_type
    cv_fold

    Returns
    -------

    """
    # a grid of hyperparameters from which to search for an optimal combination
    param_grid = {
        'weight_decay': [0.05, 0.01, 0.005, 0.001],
        'dropout_rate': [0.25, 0.50],
        'learning_momentum': np.arange(0.1, 1.0, 0.3),
        'learning_rate': [0.05, 0.01, 0.005, 0.001],
        'hidden0__units': [8, 16, 32, 64],
        'hidden0__dropout': [0.25, 0.50]
    }

    # create appropriate model type
    if model_type == 'classifier':
        model = Classifier(
            layers=[Layer('Sigmoid'), Layer('Softmax')],
            regularize='L2',
            verbose=True
        )
    else:
        model = Regressor(
            layers=[Layer('Sigmoid'), Layer('Linear')],
            regularize='L2',
            verbose=True
        )

    # do a grid search for optimal hyperparameters
    grid_search = GridSearchCV(
        estimator=model,
        param_grid=param_grid,
        scoring='neg_mean_squared_error',
        cv=cv_fold,
        refit=True
    )
    logging.info('Fitting neural networks regularized with dropout ...')
    grid_search.fit(X, y)

    # print results from grid search
    logging.info('best hyperparameter combination %s' % grid_search.best_params_)
    gs_results = grid_search.cv_results_
    for params, mean_score in zip(
        gs_results['params'], gs_results['mean_test_score']
    ):
        print(params, '%.2f' % np.sqrt(-mean_score))

    # return the final model
    return grid_search.best_estimator_
コード例 #54
0
 def build_mlp(self):
     layers = self.__build_layers()
     print("Building neural network...")
     pipeline = Pipeline([
         ('min/max scaler', MinMaxScaler(feature_range=(0.0, 1.0))),
         ('neural network', Classifier(layers=layers, learning_rule='nesterov'))])
     print("Network built successfully")
     return pipeline
コード例 #55
0
def CNN(X_train, y_train, X_test, X_hidden):
    print("CNN")
    #l2 normalize preprocessing.normalize(X, 'l2')
    preprocessing.normalize(X_train, 'max')
    preprocessing.normalize(X_test, 'max')
    preprocessing.normalize(X_hidden, 'max')
    print("Done normalization")

    X_train = equalize_hist(X_train)
    X_test = equalize_hist(X_test)
    X_hidden = equalize_hist(X_hidden)

    nn = Classifier(
        layers=[
            Convolution("Rectifier", channels=98, kernel_shape=(3, 3)),
            #Convolution("Rectifier", channels=100, kernel_shape=(3,3), dropout=0.25,
            #weight_decay=0.0001, pool_shape = (2,2), pool_type="max"),
            Layer("Softmax")
        ],
        learning_rate=0.01,
        n_iter=25,
        random_state=42)
    nn.fit(X_train, y_train)
    print('\nTRAIN SCORE', nn.score(X_train, y_train))
    pub_res = list(nn.predict(X_test))
    hid_res = list(nn.predict(X_hidden))

    return pub_res + hid_res
コード例 #56
0
def train(X, Y):
    print X.shape
    print Y.shape
    trainX = X[:int(X.shape[0] * 0.7), :, :]
    trainY = Y[:int(Y.shape[0] * 0.7), :]
    valX = X[int(X.shape[0] * 0.7):int(X.shape[0] * 0.8), :, :]
    valY = Y[int(Y.shape[0] * 0.7):int(Y.shape[0] * 0.8), :]
    testX = X[int(X.shape[0] * 0.8):, :, :]
    testY = Y[int(Y.shape[0] * 0.8):, :]

    print 'Train, Val, Test'
    print trainX.shape, ',', trainY.shape, '--', valX.shape, ',', valY.shape, '--', testX.shape, ',', testY.shape

    nn = Classifier(
        layers=[
            Convolution('Rectifier', channels=1, kernel_shape=(5, WORD_DIM)),
            Layer('Rectifier', units=300),
            Layer('Rectifier', units=300),
            Layer('Softmax')
        ],
        #valid_set = (valX,valY),
        learning_rate=0.02,  #0.05, #0.001,
        #normalize='batch',
        verbose=True)
    print 'Net created...'
    #Load net here --always CHECK HERE before starting -- DO THIS NOW, WE WANT TO CONTINUE FROM HERE ON
    nn = pickle.load(open(model_path, 'rb'))
    for i in range(100):
        try:
            nn.n_iter = 5
            nn.fit(trainX, trainY)
            pickle.dump(nn, open(model_path, 'wb'))
            nn = pickle.load(open(model_path, 'rb'))
        except KeyboardInterrupt:
            pickle.dump(nn, open(model_path, 'wb'))
            print 'Saved model after keyboard interrupt'
        pickle.dump(nn, open(model_path, 'wb'))
        print 'Temp model saved'

    #try:
    #	nn.fit(trainX,trainY)
    #except KeyboardInterrupt:
    #	pickle.dump(nn,open(model_path,'wb'))

    print 'Done, final model saved'
コード例 #57
0
def CNN(X_train, y_train, X_test, X_hidden):
    print("1 Con, 1 tanh")
    #l2 normalize preprocessing.normalize(X, 'l2')
    preprocessing.normalize(X_train, 'max')
    preprocessing.normalize(X_test, 'max')
    preprocessing.normalize(X_hidden, 'max')
    print("Done normalization")

    X_train = equalize_hist(X_train)
    X_test = equalize_hist(X_test)
    X_hidden = equalize_hist(X_hidden)

    nn = Classifier(layers=[
        Layer("Tanh", units=98, weight_decay=0.0001),
        Layer("Softmax")
    ],
                    learning_rate=0.01,
                    n_iter=1000,
                    batch_size=5)
    nn.fit(X_train, y_train)
    print('\nTRAIN SCORE', nn.score(X_train, y_train))
    pub_res = list(nn.predict(X_test))
    hid_res = list(nn.predict(X_hidden))

    return pub_res + hid_res