def classify(self, X, y):

        seed = random.randint(0, sys.maxint)

        X_train, X_test, y_train, y_test = split_test(X, y)

        nn = Classifier(
            layers=self.layers(),
            learning_rate = self.LEARNING_RATE,
            valid_size    = self.VALIDATION_SIZE,
            n_stable      = 10,
            f_stable      = self.STABLE,
            random_state  = seed,
            verbose       = False,
            debug         = False
        )

        nn.fit(X_train, y_train)

        nn.fit(X_train, y_train)


        train_score = nn.score(X_train, y_train)
        test_score = nn.score(X_test, y_test)

        return train_score, test_score
def CNN(X, y):
    #l2 normalize
    #preprocessing.normalize(X, 'max')
    #scale centre to the mean to unit vector
    #preprocessing.scale(X_train)
    #preprocessing.scale(X_test)
    #X = equalize_hist(X)
    X_train, X_test, y_train, y_test = cross_validation.train_test_split(
        X, y, test_size=0.2, random_state=42)
    nn = Classifier(
        layers=[
            Convolution("Rectifier",
                        channels=100,
                        kernel_shape=(10, 10),
                        dropout=0.25,
                        normalize="batch",
                        weight_decay=0.0001,
                        pool_shape=(2, 2),
                        pool_type="max"),
            #Layer("Tanh", units=100),
            Layer("Softmax")
        ],
        learning_rate=0.05,
        n_iter=10)
    nn.fit(X_train, y_train)
    print('\nTRAIN SCORE', nn.score(X_train, y_train))
    print('TEST SCORE', nn.score(X_test, y_test))
def CNN(X, y):
    print("1-layer Tanh 100 NN")
    #l2 normalize
    preprocessing.normalize(X, 'max')
    print("Done normalization")
    X = equalize_hist(X)
    #print("Done histogram equalization")
    #scale centre to the mean to unit vector
    #preprocessing.scale(X_train)
    #preprocessing.scale(X_test)
    #X = equalize_hist(X)
    X_train, X_test, y_train, y_test = cross_validation.train_test_split(
        X, y, test_size=0.2)
    print("Creating neural net...")
    nn = Classifier(layers=[
        Layer("Tanh", units=98, weight_decay=0.0001),
        Layer("Softmax")
    ],
                    learning_rate=0.01,
                    n_iter=1000,
                    batch_size=5)
    print("Done creating neural net")
    print("Neural net fitting....")
    nn.fit(X_train, y_train)
    print("Done Neural net fitting!")
    print('\nTRAIN SCORE', nn.score(X_train, y_train))
    print('TEST SCORE', nn.score(X_test, y_test))
示例#4
0
def _ann_train_size(data, data_test, target, target_test, train_size):
    nn = Classifier(
        layers=[
            Layer("Sigmoid", units=100),
            Layer("Softmax")])
    if train_size < 1:
        X_train, _, y_train, _ = cross_validation.train_test_split(
            data, target, train_size=train_size, stratify=target)
    else:
        X_train, y_train = data, target
    nn.fit(X_train, y_train)
    train_score = nn.score(X_train, y_train)
    test_score = nn.score(data_test, target_test)
    print train_size, train_score, test_score
def CNN(X_train, y_train, X_test, X_hidden):
	print("Combined")
	#l2 normalize preprocessing.normalize(X, 'l2')
	preprocessing.normalize(X_train, 'max')
	preprocessing.normalize(X_test, 'max')
	preprocessing.normalize(X_hidden, 'max')
	print("Done normalization")

	X_train = equalize_hist(X_train)
	X_test = equalize_hist(X_test)
	X_hidden = equalize_hist(X_hidden) 


	nn = Classifier(
    layers=[
        Convolution("Rectifier", channels=98, kernel_shape=(3,3), pool_shape = (2,2), pool_type="max"),
        #Convolution("Rectifier", channels=100, kernel_shape=(3,3), dropout=0.25, 
        	#weight_decay=0.0001, pool_shape = (2,2), pool_type="max"),
        Layer("Softmax")], learning_rate=0.01, n_iter=25, random_state= 42)
	nn.fit(X_train, y_train)
	print('\nTRAIN SCORE', nn.score(X_train, y_train))
	pub_res = list(nn.predict(X_test))
	hid_res = list(nn.predict(X_hidden))

	return pub_res+hid_res
示例#6
0
def make_neural_network():
    dataset = np.loadtxt("/Users/BARNES_3/Documents/niki/courses/Decision making/riot_predictor/data_for_neural.csv", delimiter=",")

    score_total = 0
    for i in xrange(0, 5):
        msk = np.random.rand(len(dataset)) < 0.8
        train = dataset[msk]
        test = dataset[~msk]
        x_train = train[:,0:6]
        y_train = train[:,6]
        x_test = test[:,0:6]
        y_test = test[:,6]
        # print type(x_test)
        # score = 0.797035347777
        # 0.801596351197
        nn = Classifier(
            layers=[
                # Layer("Tanh", units = 1000),
                # Layer("Sigmoid", units = 1000),
                # Layer("Linear")],
                Layer("ExpLin", units = 800),
                Layer("Softmax"),
                ],
            learning_rate=0.0002,
            n_iter=20)
        nn.fit(x_train, y_train)
        score = nn.score(x_test, y_test)
        score_total += score
    print score_total/5
    # print score
    return nn
示例#7
0
def CNN(X, y):
	#l2 normalize 
	#preprocessing.normalize(X, 'max')
	#scale centre to the mean to unit vector
	#preprocessing.scale(X_train)
	#preprocessing.scale(X_test)
	#X = equalize_hist(X)
	X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2, random_state = 42)
	nn = Classifier(
    layers=[
        Convolution("Rectifier", channels=10, kernel_shape=(5,5), dropout=0.25, normalize="batch", weight_decay=0.0001),
        #Layer("Tanh", units=10),
        Layer("Softmax")], learning_rate=0.05, n_iter=10)
	nn.fit(X_train, y_train)
	print('\nTRAIN SCORE', nn.score(X_train, y_train))
	print('TEST SCORE', nn.score(X_test, y_test))
示例#8
0
def CNN(X_train, y_train, X_test, X_hidden):
    print("1 Con, 1 tanh")
    #l2 normalize preprocessing.normalize(X, 'l2')
    preprocessing.normalize(X_train, 'max')
    preprocessing.normalize(X_test, 'max')
    preprocessing.normalize(X_hidden, 'max')
    print("Done normalization")

    X_train = equalize_hist(X_train)
    X_test = equalize_hist(X_test)
    X_hidden = equalize_hist(X_hidden)

    nn = Classifier(layers=[
        Layer("Tanh", units=98, weight_decay=0.0001),
        Layer("Softmax")
    ],
                    learning_rate=0.01,
                    n_iter=1000,
                    batch_size=5)
    nn.fit(X_train, y_train)
    print('\nTRAIN SCORE', nn.score(X_train, y_train))
    pub_res = list(nn.predict(X_test))
    hid_res = list(nn.predict(X_hidden))

    return pub_res + hid_res
def CNN(X_train, y_train, X_test, X_hidden):
    print("CNN")
    #l2 normalize preprocessing.normalize(X, 'l2')
    preprocessing.normalize(X_train, 'max')
    preprocessing.normalize(X_test, 'max')
    preprocessing.normalize(X_hidden, 'max')
    print("Done normalization")

    X_train = equalize_hist(X_train)
    X_test = equalize_hist(X_test)
    X_hidden = equalize_hist(X_hidden)

    nn = Classifier(
        layers=[
            Convolution("Rectifier", channels=98, kernel_shape=(3, 3)),
            #Convolution("Rectifier", channels=100, kernel_shape=(3,3), dropout=0.25,
            #weight_decay=0.0001, pool_shape = (2,2), pool_type="max"),
            Layer("Softmax")
        ],
        learning_rate=0.01,
        n_iter=25,
        random_state=42)
    nn.fit(X_train, y_train)
    print('\nTRAIN SCORE', nn.score(X_train, y_train))
    pub_res = list(nn.predict(X_test))
    hid_res = list(nn.predict(X_hidden))

    return pub_res + hid_res
def _ann_n_iter(data, data_test, target, target_test, n_units):
    nn = Classifier(
        layers=[
            Layer("Sigmoid", units=n_units),
            Layer("Softmax")],
        n_iter=4000)
    nn.fit(data, target)
    test_score = nn.score(data_test, target_test)
    print n_units, test_score
示例#11
0
def _ann_n_iter(data, data_test, target, target_test, n_iter):
    nn = Classifier(
        layers=[
            Layer("Sigmoid", units=100),
            Layer("Softmax")],
        n_iter=n_iter)
    train_score = np.mean(cross_validation.cross_val_score(nn, data, target, cv=10))
    nn.fit(data, target)
    test_score = nn.score(data_test, target_test)
    print n_iter, train_score, test_score
示例#12
0
def CNN(X_train, y_train, X_test):
	nn = Classifier(
    layers=[
        Convolution("Rectifier", channels=20, kernel_shape=(5,5), dropout=0.25),
        Layer("Tanh", units=300),
        Layer("Tanh", units=100),
        Layer("Softmax")], learning_rate=0.02, n_iter=10)
	nn.fit(X_train, y_train)
	print('\nTRAIN SCORE', nn.score(X_train, y_train))
	return list(nn.predict(X_test))
class TestClassifierFunctionality(unittest.TestCase):

    def setUp(self):
        self.nn = MLPC(layers=[L("Linear")], n_iter=1)

    def test_FitAutoInitialize(self):
        a_in, a_out = numpy.zeros((8,16)), numpy.random.randint(0, 5, (8,))
        self.nn.fit(a_in, a_out)
        assert_true(self.nn.is_initialized)

    def test_ExplicitValidSet(self):
        a_in, a_out = numpy.zeros((8,16)), numpy.random.randint(0, 5, (8,))
        self.nn.valid_set = (a_in, a_out)
        self.nn.fit(a_in, a_out)
        assert_true(self.nn.is_initialized)

    def test_PartialFit(self):
        a_in, a_out = numpy.zeros((8,4)), numpy.random.randint(0, 5, (8,))
        self.nn.partial_fit(a_in, a_out, classes=[0,1,2,3])
        self.nn.partial_fit(a_in*2.0, a_out+1, classes=[0,1,2,3])

    def test_PredictUninitializedNoUnitCount(self):
        a_in = numpy.zeros((8,16))
        assert_raises(AssertionError, self.nn.predict, a_in)

    def test_PredictUninitializedNoLabels(self):
        self.nn.layers[-1].units = 4
        a_in = numpy.zeros((8,16))
        assert_raises(AssertionError, self.nn.predict, a_in)

    def test_PredictClasses(self):
        a_in, a_out = numpy.zeros((8,16)), numpy.random.randint(0, 5, (8,))
        self.nn.fit(a_in, a_out)
        a_test = self.nn.predict(a_in)
        assert_equal(type(a_out), type(a_test))
        assert_equal(a_out.shape[0], a_test.shape[0])

    def test_PredictMultiClass(self):
        a_in, a_out = numpy.zeros((8,16)), numpy.random.randint(0, 3, (8,2))
        self.nn.fit(a_in, a_out)
        a_test = self.nn.predict(a_in)
        assert_equal(type(a_out), type(a_test))
        assert_equal(a_out.shape, a_test.shape)

    def test_EstimateProbalities(self):
        a_in, a_out = numpy.zeros((8,16)), numpy.random.randint(0, 5, (8,))
        self.nn.fit(a_in, a_out)
        a_test = self.nn.predict_proba(a_in)
        assert_equal(type(a_out), type(a_test))

    def test_CalculateScore(self):
        a_in, a_out = numpy.zeros((8,16)), numpy.random.randint(0, 5, (8,))
        self.nn.fit(a_in, a_out)
        f = self.nn.score(a_in, a_out)
        assert_equal(type(f), numpy.float64)
示例#14
0
class TestClassifierFunctionality(unittest.TestCase):
    def setUp(self):
        self.nn = MLPC(layers=[L("Linear")], n_iter=1)

    def test_FitAutoInitialize(self):
        a_in, a_out = numpy.zeros((8, 16)), numpy.random.randint(0, 5, (8, ))
        self.nn.fit(a_in, a_out)
        assert_true(self.nn.is_initialized)

    def test_ExplicitValidSet(self):
        a_in, a_out = numpy.zeros((8, 16)), numpy.random.randint(0, 5, (8, ))
        self.nn.valid_set = (a_in, a_out)
        self.nn.fit(a_in, a_out)
        assert_true(self.nn.is_initialized)

    def test_PartialFit(self):
        a_in, a_out = numpy.zeros((8, 4)), numpy.random.randint(0, 5, (8, ))
        self.nn.partial_fit(a_in, a_out, classes=[0, 1, 2, 3])
        self.nn.partial_fit(a_in * 2.0, a_out + 1, classes=[0, 1, 2, 3])

    def test_PredictUninitializedNoUnitCount(self):
        a_in = numpy.zeros((8, 16))
        assert_raises(AssertionError, self.nn.predict, a_in)

    def test_PredictUninitializedNoLabels(self):
        self.nn.layers[-1].units = 4
        a_in = numpy.zeros((8, 16))
        assert_raises(AssertionError, self.nn.predict, a_in)

    def test_PredictClasses(self):
        a_in, a_out = numpy.zeros((8, 16)), numpy.random.randint(0, 5, (8, ))
        self.nn.fit(a_in, a_out)
        a_test = self.nn.predict(a_in)
        assert_equal(type(a_out), type(a_test))
        assert_equal(a_out.shape[0], a_test.shape[0])

    def test_PredictMultiClass(self):
        a_in, a_out = numpy.zeros((8, 16)), numpy.random.randint(0, 3, (8, 2))
        self.nn.fit(a_in, a_out)
        a_test = self.nn.predict(a_in)
        assert_equal(type(a_out), type(a_test))
        assert_equal(a_out.shape, a_test.shape)

    def test_EstimateProbalities(self):
        a_in, a_out = numpy.zeros((8, 16)), numpy.random.randint(0, 5, (8, ))
        self.nn.fit(a_in, a_out)
        a_test = self.nn.predict_proba(a_in)
        assert_equal(type(a_out), type(a_test))

    def test_CalculateScore(self):
        a_in, a_out = numpy.zeros((8, 16)), numpy.random.randint(0, 5, (8, ))
        self.nn.fit(a_in, a_out)
        f = self.nn.score(a_in, a_out)
        assert_equal(type(f), numpy.float64)
示例#15
0
def _nn(tx, ty, rx, ry, n_iter):
    print "_nn"
    nn = Classifier(
            layers=[
                Layer("Tanh", units=100),
                Layer("Softmax")],
            n_iter=n_iter)
    nn.fit(tx, ty)
    resultst = nn.score(tx, ty)
    resultsr = nn.score(rx, ry)
    print "_nn done"
    return n_iter, resultst, resultsr
def  get_X_Y(filetrain,filetest):


    y_train,x_train=readCSV(filetrain)
    y_test,x_test=readCSV(filetest)

   # print f_score.f_score(X,Y)

    #print t_score.t_score(X,Y)
    nn=Classifier(layers=[Layer("Rectifier",units=100),Layer("Softmax")],learning_rate=0.02,n_iter=10)
    #pdb.set_trace()
    nn.fit(x_train,y_train)

    score=nn.score(x_test,y_test)
示例#17
0
def CNN(X, y):
	print("1-layer Tanh 100 NN")
	#l2 normalize 
	preprocessing.normalize(X, 'max')
	print("Done normalization")
	X = equalize_hist(X)
	#print("Done histogram equalization")
	#scale centre to the mean to unit vector
	#preprocessing.scale(X_train)
	#preprocessing.scale(X_test)
	#X = equalize_hist(X)
	X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2)
	print("Creating neural net...")
	nn = Classifier(
    layers=[
    	Layer("Tanh", units = 98, weight_decay=0.0001),
        Layer("Softmax")], learning_rate=0.01, n_iter=1000, batch_size= 5)
	print("Done creating neural net")
	print("Neural net fitting....")
	nn.fit(X_train, y_train)
	print("Done Neural net fitting!")
	print('\nTRAIN SCORE', nn.score(X_train, y_train))
	print('TEST SCORE', nn.score(X_test, y_test))
示例#18
0
def predictCategoryNN(training_set, test_set, target, test_targert,
                      componentsList):
    scaler = StandardScaler()
    scaler.fit(training_set[componentsList])
    training_set[componentsList] = scaler.transform(
        training_set[componentsList])
    test_set[componentsList] = scaler.transform(test_set[componentsList])
    nn = Classifier(layers=[Layer("Sigmoid", units=100),
                            Layer("Softmax")],
                    learning_rate=0.001,
                    n_iter=25)
    nn.fit(training_set[componentsList].as_matrix(), target.as_matrix())
    return nn.predict(test_set[componentsList].as_matrix()), pd.DataFrame(
        test_targert), nn.score(test_set[componentsList].as_matrix(),
                                test_targert.as_matrix())
示例#19
0
def number(number):
    #def number():
    digits = load_digits()

    Xdata = digits.data

    Xdata = scale(Xdata)

    X_train, X_test, y_train, y_test, images_train, images_test = train_test_split(
        Xdata, digits.target, digits.images, test_size=0.25, random_state=42)

    nn = Classifier(layers=[Layer("Sigmoid", units=100),
                            Layer("Softmax")],
                    learning_rate=0.001,
                    n_iter=25)
    #http://aka.ms/vcpython27
    #http://blog.sciencenet.cn/blog-669638-1080739.html
    #C:\Python27\Lib\site-packages\lasagne\layers\pool.py
    nn.fit(X_train, y_train)
    print nn.score(X_test, y_test)

    X_test[0] = scale(number)
    predicted = nn.predict(X_test)
    print("predicted is ", predicted[0])
def CNN(X_train, y_train, X_test):
    nn = Classifier(layers=[
        Convolution("Rectifier",
                    channels=20,
                    kernel_shape=(5, 5),
                    dropout=0.25),
        Layer("Tanh", units=300),
        Layer("Tanh", units=100),
        Layer("Softmax")
    ],
                    learning_rate=0.02,
                    n_iter=10)
    nn.fit(X_train, y_train)
    print('\nTRAIN SCORE', nn.score(X_train, y_train))
    return list(nn.predict(X_test))
示例#21
0
def test_mlp_classifier(
        data,
        layers=[Layer("Rectifier", units=10),
                Layer('Softmax')],
        learning_rate=0.02,
        n_iter=1,
        scale=1):

    #preprossing data if necessary
    X_raw, y = data

    #normalize data for better performance
    if scale == 1:
        X = preprocessing.scale(X_raw)
    else:
        X = X_raw

    #since our test set is not labeled I am using the training data provided for train, validation and test
    print("Create, train, test, validation_sets")

    #split the data into training/validation set and testing set
    X_train_valid, X_test, y_train_valid, y_test = train_test_split(
        X, y, test_size=0.2, random_state=42)

    #split the training set into training set and validation set
    X_train, X_valid, y_train, y_valid = train_test_split(X_train_valid,
                                                          y_train_valid,
                                                          test_size=0.2,
                                                          random_state=23)

    #build the different layers of the model
    print("Building the model...")
    nn = Classifier(layers=layers, learning_rate=learning_rate, n_iter=n_iter)

    #train the model
    print("Training...")
    nn.fit(X_train, y_train)

    #test the model
    print("Testing...")
    y_valid = nn.predict(X_train)

    #return the validation score
    print("Score...")
    score = nn.score(X_test, y_test)

    return score, layers, learning_rate, n_iter
class NeuralNetwork(object):
    """Class NeuralNetwork - Represent a neural network"""

    def __init__(self, number_layers, numbers_neurons, learning_rate, **kwargs):
        """Create neural network from the parameters"""
        self.input_layer = Layer("Sigmoid", units=46)
        self.output_layer = Layer("Softmax")

        self.hidden_layers = []
        for i in range(number_layers):
            layer = Layer("Sigmoid", units=numbers_neurons[i])
            self.hidden_layers.append(layer)

        self.learning_rate = learning_rate

        #training data
        self.X_train = kwargs.get('X_train', None)
        self.Y_train = kwargs.get('Y_train', None)

        #test data
        self.X_test = kwargs.get('X_test', None)
        self.Y_test = kwargs.get('Y_test', None)

        self.net = Classifier(layers=([self.input_layer]+self.hidden_layers+[self.output_layer]), learning_rate=self.learning_rate, n_iter=1000)

        self.is_ready = False

    def train(self):
        """Train of the neural network"""
        self.net.fit(self.X_train, self.Y_train)
        self.is_ready = True

    def get_auc(self):
        """Returns the area under the roc curve"""
        if not self.is_ready:
            self.train()

        output_test = self.net.predict(self.X_test)
        return metrics.roc_auc_score(self.Y_test, output_test)


    def classify(self):
        """Returns the mean accuracy on the given test data and labels."""
        score = 0
        score = self.net.score(self.X_test, self.Y_test)
        return score
示例#23
0
class TestClassifierFunctionality(unittest.TestCase):
    def setUp(self):
        self.nn = MLPC(layers=[L("Linear")], n_iter=1)

    def test_FitAutoInitialize(self):
        a_in, a_out = numpy.zeros((8, 16)), numpy.zeros((8, ),
                                                        dtype=numpy.int32)
        self.nn.fit(a_in, a_out)
        assert_true(self.nn.is_initialized)

    def test_PartialFit(self):
        a_in, a_out = numpy.zeros((8, 4)), numpy.zeros((8, ),
                                                       dtype=numpy.int32)
        self.nn.partial_fit(a_in, a_out, classes=[0, 1, 2, 3])
        self.nn.partial_fit(a_in * 2.0, a_out + 1, classes=[0, 1, 2, 3])

    def test_PredictUninitialized(self):
        a_in = numpy.zeros((8, 16))
        assert_raises(ValueError, self.nn.predict, a_in)

    def test_PredictClasses(self):
        a_in, a_out = numpy.zeros((8, 16)), numpy.zeros((8, ),
                                                        dtype=numpy.int32)
        self.nn.fit(a_in, a_out)
        a_test = self.nn.predict(a_in)
        assert_equal(type(a_out), type(a_test))
        assert_equal(a_out.shape, a_test.shape)

    def test_EstimateProbalities(self):
        a_in, a_out = numpy.zeros((8, 16)), numpy.zeros((8, ),
                                                        dtype=numpy.int32)
        self.nn.fit(a_in, a_out)
        a_test = self.nn.predict_proba(a_in)
        assert_equal(type(a_out), type(a_test))

    def test_CalculateScore(self):
        a_in, a_out = numpy.zeros((8, 16)), numpy.zeros((8, ),
                                                        dtype=numpy.int32)
        self.nn.fit(a_in, a_out)
        f = self.nn.score(a_in, a_out)
        assert_equal(type(f), numpy.float64)
def evaluate(datajson):
    # Init classifier
    clf = Classifier(
        layers=[Layer('Rectifier', units=100), Layer('Sigmoid')],
        learning_rate=0.02,
        n_iter=10,
        valid_size=0.1,
        verbose=False)

    # Load data from JSON file
    with open(datajson, 'r') as fin:
        loadedjson = json.load(fin)
    training = loadedjson[TRAINING_DATA_LABEL]
    testing  = loadedjson[TESTING_DATA_LABEL]
    x_train = []
    y_train = []
    x_test  = []
    y_test  = []
    for i in training:
        x_train.append(i['x'])
        y_train.append(i['y'])
    for i in testing:
        x_test.append(i['x'])
        y_test.append(i['y'])
    x_train = np.array(x_train)
    y_train = np.array(y_train)
    x_test = np.array(x_test)
    y_test = np.array(y_test)
    # print(len(x_train), len(y_train), len(x_test), len(y_test))

    # Classifying
    clf.fit(x_train, y_train)

    # Score
    score = clf.score(x_test, y_test)
    print(score)
    # print(clf.predict(np.array([[0.1,0.1,0.1,0.1,0.1,0.1]])))

    # Save model
    joblib.dump(clf, 'nn_clf.pkl')
示例#25
0
def CNN(X_train, y_train, X_test, X_hidden):
	print("1 Con, 1 tanh")
	#l2 normalize preprocessing.normalize(X, 'l2')
	preprocessing.normalize(X_train, 'max')
	preprocessing.normalize(X_test, 'max')
	preprocessing.normalize(X_hidden, 'max')
	print("Done normalization")

	X_train = equalize_hist(X_train)
	X_test = equalize_hist(X_test)
	X_hidden = equalize_hist(X_hidden) 


	nn = Classifier(
    layers=[
    	Layer("Tanh", units = 98, weight_decay=0.0001),
        Layer("Softmax")], learning_rate=0.01, n_iter=1000, batch_size= 5)
	nn.fit(X_train, y_train)
	print('\nTRAIN SCORE', nn.score(X_train, y_train))
	pub_res = list(nn.predict(X_test))
	hid_res = list(nn.predict(X_hidden))

	return pub_res+hid_res
class TestClassifierFunctionality(unittest.TestCase):

    def setUp(self):
        self.nn = MLPC(layers=[L("Linear")], n_iter=1)

    def test_FitAutoInitialize(self):
        a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,), dtype=numpy.int32)
        self.nn.fit(a_in, a_out)
        assert_true(self.nn.is_initialized)

    def test_PartialFit(self):
        a_in, a_out = numpy.zeros((8,4)), numpy.zeros((8,), dtype=numpy.int32)
        self.nn.partial_fit(a_in, a_out, classes=[0,1,2,3])
        self.nn.partial_fit(a_in*2.0, a_out+1, classes=[0,1,2,3])

    def test_PredictUninitialized(self):
        a_in = numpy.zeros((8,16))
        assert_raises(ValueError, self.nn.predict, a_in)

    def test_PredictClasses(self):
        a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,), dtype=numpy.int32)
        self.nn.fit(a_in, a_out)
        a_test = self.nn.predict(a_in)
        assert_equal(type(a_out), type(a_test))
        assert_equal(a_out.shape, a_test.shape)

    def test_EstimateProbalities(self):
        a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,), dtype=numpy.int32)
        self.nn.fit(a_in, a_out)
        a_test = self.nn.predict_proba(a_in)
        assert_equal(type(a_out), type(a_test))

    def test_CalculateScore(self):
        a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,), dtype=numpy.int32)
        self.nn.fit(a_in, a_out)
        f = self.nn.score(a_in, a_out)
        assert_equal(type(f), numpy.float64)
示例#27
0
from sklearn import metrics
from sklearn.cross_validation import cross_val_score

file = open("../clean-data/data.csv", "r")
data = file.readlines()
vector_x = list()
vector_y = list()
for ii in data[1:]:
  temp_vector_x = ii.split(",")
  #Because the first two features are timestamp and dow jones rating. 
  x, y = map(float, temp_vector_x[2:-2]), int(temp_vector_x[-1])
  vector_x.append(x)
  vector_y.append(y)


vector_x = np.array(vector_x)
vector_y = np.array(vector_y)

x_train, x_test, y_train, y_test = train_test_split(vector_x, vector_y, test_size = 0.3, random_state = 0)

net = Classifier(
		layers=[
			Layer("Maxout", units = 10, pieces = 2), 
			Layer("Softmax")],
		learning_rate = 0.1, 
		n_iter = 25
		)
net.fit(x_train, y_train)
score = net.score(x_test, y_test)
print score
示例#28
0
##### Entrenamos la red #####	
modelS.fit(data_x_entre, data_y_entre)


##### Predicciones #####
#Simple
predic = modelS.predict(data_x_prueb)

#Cruzado
predic_cross = cross_val_predict(modelC,data_x,data_y,cv=10)


##### Score #####
#Simple
scor = modelS.score(data_x_prueb, data_y_prueb)

#Cruzado
scor_cross = cross_val_score(modelC,data_x, data_y,cv=10).mean()


##### Error cuadratico #####
#Simple
cuadr = np.mean((predic - data_y_prueb) ** 2)

#Cruzado
cuadr_cross = np.mean((predic_cross - data_y) ** 2)


##### Resumen de los ajustes del modelo #####
nombres = ['C. Bueno', 'C. Malo']
示例#29
0
from sknn.mlp import Classifier, Layer
from sklearn.cross_validation import train_test_split


def load_data(file_path):
    return np.genfromtxt(file_path, delimiter=',', missing_values='?',
                         dtype=None)

def split_set(data):
    return (data[:,:-1], data[:,-1])


data = load_data('dermatology/dermatology.data')
layers = [Layer("Rectifier", units=100, dropout=0.25),
          Layer("Linear", units=100, dropout=0.25),]
          #Layer("Sigmoid", units=100, dropout=0.25)]

for layers_perm in permutations(layers):
    X_train, X_test, y_train, y_test = train_test_split(data[:, :-1],
                                                        data[:, -1],
                                                        test_size=0.33,
                                                        random_state=42)
    nn = Classifier(
        layers=list(layers_perm) + [Layer("Softmax")],
        learning_rate=0.001,
        n_iter=100)

    nn.fit(X_train, y_train)
    print "Configuration: " + str(layers_perm)
    print nn.score(X_test, y_test)
示例#30
0
		Layer("Rectifier", units=400),
		#Layer("Rectifier", units=200),
        Layer("Softmax")],
    learning_rate=0.015,
	dropout_rate=0.35,
	valid_size=0.15,
	learning_momentum=0.4,
	batch_size=20,
	#learning_rule='adam',
    n_iter=100,
	verbose=True)


nn.fit(trI,trL);	

res=nn.score(teI,teL);
print res


yres = nn.predict(teI);

  
print("\tReport:")
print(classification_report(teL,yres))
print '\nConfusion matrix:\n',confusion_matrix(teL, yres)


#print yres,teL


# plot_utils.plot_learning_curve(svc_linear1, wisconsin_training_inputs, wisconsin_training_classes, cv=10, ylim=[0.7, 1.05], figure_title="Learning Curve vs # Training Size (SVM - Linear) ", n_jobs=1, train_sizes=np.linspace(.1, 1.0, 10))
# plot_utils.plot_learning_curve_with_test(ada_classifier2, wisconsin_training_inputs, wisconsin_training_classes, wisconsin_testing_inputs, wisconsin_testing_classes, cv=10, ylim=[0.0, 20], figure_title="Learning Curve of Adaboost vs # Samples wisconsin's Data (Errors)", n_jobs=1, train_sizes=np.linspace(.1, 1.0, 10))


## Multilayer neural network

from sknn.mlp import Classifier, Layer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler

nn = Classifier(
    layers=[
        Layer("Softmax")],
    learning_rate=0.01,
    n_iter=25)
nn.fit(wisconsin_training_inputs, wisconsin_training_classes)

y_pred = nn.predict(wisconsin_testing_inputs)

score = nn.score(wisconsin_testing_inputs, wisconsin_testing_classes)
print("nn score: ", score)
print("y_pred: ", y_pred)

## plot the confusion matrix
# plot_utils.plot_confusion_matrix(wisconsin_testing_classes, y_pred, figure_title="Confusion Matrix - Breast Cancer Data Testing (Neural Network)")

plotter = PlotUtils()
## plotting the learning curve with training and test errors
# plotter.plot_learning_curve_train_test_errors(nn, wisconsin_training_inputs, wisconsin_training_classes, wisconsin_testing_inputs, wisconsin_testing_classes, cv_int=10, ylim=[0.0, 0.10])
# plotter.plot_learning_curve_train_test_errors(nn, parkinson_training_inputs, parkinson_training_classes, parkinson_testing_inputs, parkinson_testing_classes, cv_int=10, ylim=[0.0, 0.30])
from sknn.mlp import Classifier, Layer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler

# start = timeit.timeit()
nn = Classifier(
    layers=[
        Layer("Softmax")],
    learning_rate=0.02,
    n_iter=25)

start = timeit.timeit()
nn.fit(parkinson_training_inputs, parkinson_training_classes)

y_pred = nn.predict(parkinson_testing_inputs)

score = nn.score(parkinson_testing_inputs, parkinson_testing_classes)
print("nn score: ", score)
print("y_pred: ", y_pred)
end = timeit.timeit()
time_diff = end - start

print("neural network, time_diff: ", time_diff)

# plot the confusion matrix
#plot_utils.plot_confusion_matrix(parkinson_testing_classes, y_pred, figure_title="Confusion Matrix - Parkinson's Data Testing (Neural Network)")

plotter = PlotUtils()
# ## plotting the learning curve with training and test errors
# plotter.plot_learning_curve_train_test_errors(nn, parkinson_training_inputs, parkinson_training_classes, parkinson_testing_inputs, parkinson_testing_classes, cv_int=10, ylim=[0.0, 0.30])
示例#33
0
from sknn.mlp import Classifier, Layer

nn = Classifier(layers=[Layer("Rectifier", units=100),
                        Layer("Softmax")],
                learning_rate=0.02,
                n_iter=10)
nn.fit(X_train, y_train)

y_valid = nn.predict(X_valid)

score = nn.score(X_test, y_test)
示例#34
0
文件: main.py 项目: afcarl/chaseplan
    teamData[team] = currentData

nn = Classifier(
    layers=[Layer("Rectifier", units=100, pieces=6),
            Layer("Softmax")],
    learning_rate=0.05,
    learning_rule='adadelta',
    n_iter=1000)

trainData = None

for team in teamData:
    if trainData is None:
        trainData = teamData[team]
    elif team != test_team:
        frames = [trainData, teamData[team]]

testData = teamData[test_team].as_matrix(['Runs', 'Inns', 'Ave', 'SR'])

y = trainData['Rating'].as_matrix()
trainData = trainData.as_matrix(['Runs', 'Inns', 'Ave', 'SR'])

nn.fit(trainData, y)

x = nn.predict(testData)
score = nn.score(testData, teamData[test_team]['Rating'].as_matrix())
for player in range(0, x.__len__()):
    print str(
        x[player]) + ": " + teamData[test_team]['Player'][player] + ": " + str(
            teamData[test_team]['LastPlayed'][player])
示例#35
0
        max_epochs=10,
        verbose=1)
    classifiers.append(('nolearn.lasagne', clf))


RUNS = 10

for name, orig in classifiers:
    times = []
    accuracies = []
    for i in range(RUNS):
        start = time.time()

        clf = clone(orig)
        clf.random_state = int(time.time())
        clf.fit(X_train, y_train)

        accuracies.append(clf.score(X_test, y_test))
        times.append(time.time() - start)

    a_t = np.array(times)
    a_s = np.array(accuracies)

    y_pred = clf.predict(X_test)

    print("\n"+name)
    print("\tAccuracy: %5.2f%% ±%4.2f" % (100.0 * a_s.mean(), 100.0 * a_s.std()))
    print("\tTimes:    %5.2fs ±%4.2f" % (a_t.mean(), a_t.std()))
    print("\tReport:")
    print(classification_report(y_test, y_pred))
class TestClassifierFunctionality(unittest.TestCase):
    def setUp(self):
        self.nn = MLPC(layers=[L("Softmax")], n_iter=1)

    def test_IsClassifier(self):
        assert_true(self.nn.is_classifier)

    def test_FitAutoInitialize(self):
        a_in, a_out = numpy.zeros((8, 16)), numpy.random.randint(0, 5, (8, ))
        self.nn.fit(a_in, a_out)
        assert_true(self.nn.is_initialized)

    def test_ExplicitValidSet(self):
        a_in, a_out = numpy.zeros((8, 16)), numpy.random.randint(0, 5, (8, ))
        self.nn.valid_set = (a_in, a_out)
        self.nn.fit(a_in, a_out)
        assert_true(self.nn.is_initialized)

    def test_PartialFit(self):
        a_in, a_out = numpy.zeros((8, 4)), numpy.random.randint(0, 5, (8, ))
        self.nn.partial_fit(a_in, a_out, classes=[0, 1, 2, 3])
        self.nn.partial_fit(a_in * 2.0, a_out + 1, classes=[0, 1, 2, 3])

    def test_PredictUninitializedNoUnitCount(self):
        a_in = numpy.zeros((8, 16))
        assert_raises(AssertionError, self.nn.predict, a_in)

    def test_PredictUninitializedNoLabels(self):
        self.nn.layers[-1].units = 4
        a_in = numpy.zeros((8, 16))
        assert_raises(AssertionError, self.nn.predict, a_in)

    def test_PredictBinaryProbability(self):
        a_in = numpy.random.uniform(-1.0, 1.0, size=(8, 16))
        a_out = numpy.array((a_in.sum(axis=1) >= 0.0), dtype=numpy.int32)
        a_out[0], a_out[-1] = 0, 1
        self.nn.fit(a_in, a_out)

        a_proba = self.nn.predict_proba(a_in)
        a_test = self.nn.predict(a_in)
        c_out = numpy.unique(a_out)

        assert_equal(2, c_out.shape[0])
        assert_equal((8, 2), a_proba.shape)

    def test_PredictClasses(self):
        a_in, a_out = numpy.zeros((8, 16)), numpy.random.randint(0, 5, (8, ))
        self.nn.fit(a_in, a_out)
        self.nn.batch_size = 4
        a_test = self.nn.predict(a_in)
        assert_equal(type(a_out), type(a_test))
        assert_equal(a_out.shape[0], a_test.shape[0])

        c_out = numpy.unique(a_out)
        assert_equal(len(self.nn.classes_), 1)
        assert_true((self.nn.classes_[0] == c_out).all())

    def test_PredictLargerBatchSize(self):
        a_in, a_out = numpy.zeros((8, 16)), numpy.random.randint(0, 5, (8, 1))
        self.nn.batch_size = 32

        self.nn.fit(a_in, a_out)
        a_test = self.nn.predict(a_in)
        assert_equal(type(a_out), type(a_test))
        assert_equal(a_out.shape[0], a_test.shape[0])

    def test_PredictMultiClass(self):
        a_in, a_out = numpy.zeros(
            (32, 16)), numpy.random.randint(0, 3, (32, 2))
        self.nn.fit(a_in, a_out)
        a_test = self.nn.predict(a_in)
        assert_equal(type(a_out), type(a_test))
        assert_equal(a_out.shape, a_test.shape)

        assert_equal(len(self.nn.classes_), 2)
        assert_equal(self.nn.classes_[0].shape[0], 3)
        assert_equal(self.nn.classes_[1].shape[0], 3)

    def test_EstimateProbalities(self):
        a_in, a_out = numpy.zeros((8, 16)), numpy.random.randint(0, 5, (8, ))
        self.nn.fit(a_in, a_out)
        a_test = self.nn.predict_proba(a_in)
        assert_equal(type(a_out), type(a_test))
        assert_equal(a_in.shape[0], a_test.shape[0])

    def test_CalculateScore(self):
        a_in, a_out = numpy.zeros((8, 16)), numpy.random.randint(0, 5, (8, ))
        self.nn.fit(a_in, a_out)
        f = self.nn.score(a_in, a_out)
        assert_equal(type(f), numpy.float64)
nn = Classifier(
    layers=[
        Convolution('Rectifier', channels=12, kernel_shape=(3, 3), border_mode='full'),
        Convolution('Rectifier', channels=8, kernel_shape=(3, 3), border_mode='valid'),
        Layer('Rectifier', units=64),
        Layer('Softmax')],
    learning_rate=0.002,
    valid_size=0.2,
    n_stable=10,
    verbose=True)

nn.fit(X_train, y_train)


# Determine how well it does on training data and unseen test data.
print('\nTRAIN SCORE', nn.score(X_train, y_train))
print('TEST SCORE', nn.score(X_test, y_test))

y_pred = nn.predict(X_test)


# Show some training images and some test images too.
import pylab

for index, (image, label) in enumerate(zip(digits.images, digits.target)[:6]):
    pylab.subplot(2, 6, index + 1)
    pylab.axis('off')
    pylab.imshow(image, cmap=pylab.cm.gray_r, interpolation='nearest')
    pylab.title('Training: %i' % label)

for index, (image, prediction) in enumerate(zip(X_test, y_pred)[:6]):
示例#38
0
    if os.path.exists('data_models/neural_network.pkl') and RecreateModel:
            nn = joblib.load('data_models/neural_network.pkl')
    else:
        nn = Classifier(
            layers=[
                Layer("Sigmoid", units=10),
                Layer("Softmax")],
            learning_rate=0.9,
            n_iter=25)
        nn.fit(x_train, y_train)
        joblib.dump(nn, 'data_models/neural_network.pkl')

    result = nn.predict(x_test)

    print 'Neural Network Results:'
    print 'Overall Accuracy: {0:3f}%'.format(nn.score(x_test, y_test) * 100)

    x_test_bad = data.X_test[data.Y_test == 1]
    y_test_bad = data.Y_test[x_test_bad.index.values.tolist()]
    print 'Bad Accuracy: {0:3f}%'.format(nn.score(x_test_bad.as_matrix(), y_test_bad.as_matrix()) * 100)

    x_test_good = data.X_test[data.Y_test == 0]
    y_test_good = data.Y_test[x_test_good.index.values.tolist()]
    print 'Good Accuracy: {0:3f}%'.format(nn.score(x_test_good.as_matrix(), y_test_good.as_matrix()) * 100)

    print 'Confusion matrix:'
    print confusion_matrix(result, y_test, labels=[0, 1])


run_time = time.time() - start_time
print 'Total time elapsed is {0} milliseconds'.format(run_time)
def nn(config):
    print config
    nLayers = []
    for x in config["layers"]:
        if x.size == 1:
            nLayers.append(Layer(x.ltype))
        elif x.ltype == "Maxout":
            nLayers.append(Layer(x.ltype, units=x.size, pieces=x.pieces))
        else:
            nLayers.append(Layer(x.ltype, units=x.size))

    data = None

    if config["dataset"] == u'wine':
        data = np.loadtxt(fname="winequality-white.csv", delimiter = ',', skiprows=1);

        X, Y = data[:, :-1], data[:, -1]

        x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=.1, random_state=50)

        x_train = scale(x_train)
        x_test = scale(x_test) 

        x = Classifier(
            layers = nLayers,
            learning_rate = float(config["learning_rate"]),
            n_iter=int(config["num_iter"]))

        x.fit(x_train, y_train)

        score = x.score(x_test, y_test)
        current_nn = x
        print score
        return score, ""
    if config["dataset"] == u"heart":
        data = np.loadtxt(fname="heart_disease.csv", delimiter=',',
                skiprows=1)
        X, Y = data[:, :-1], data[:, -1]
        x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=50)
        x_train = scale(x_train)
        x_test = scale(x_test)

        y = Classifier(
            layers = nLayers,
            learning_rate = float(config["learning_rate"]),
            n_iter=int(config["num_iter"])
        )

        y.fit(x_train, y_train)
        score = y.score(x_test, y_test)
        current_nn = y
        print score
        return score, ""
    if config["dataset"] == "handwrite":
        print "Downloading data..."
        data = datasets.fetch_mldata("MNIST original")

        x_train, x_test, y_train, y_test = train_test_split(data.data/255.0, data.target.astype("int0"), test_size = .2)
        x_train = scale(x_train)
        x_test = scale(x_test)

        x = Classifier(
            layers = nLayers,
            learning_rate = float(config["learning_rate"]),
            n_iter=int(config["num_iter"]))

        x.fit(x_train, y_train)
        score = x.score(x_test, y_test)
        return score, x
示例#40
0
def get_nn_pck(X_train,
               X_test,
               y_train,
               y_test,
               c1=16,
               k1=9,
               p1=2,
               c2=14,
               k2=7,
               p2=2,
               c3=10,
               k3=3,
               p3=2):
    currentTime = str(
        time.strftime('%Y%m%d %H%M%S', time.localtime(time.time())))
    dirPath = currentPath + currentTime + 'canshu' + '%d-%d-%d-%d-%d-%d-%d-%d-%d' % (
        c1, k1, p1, c2, k2, p2, c3, k3, p3) + "/"
    excelPath = dirPath + "resLog.xlsx"
    os.mkdir(dirPath)
    # 创建一个excel文件,当前时间命名
    workbook = xlsxwriter.Workbook(excelPath)
    # 创建一个工作表对象
    worksheet = workbook.add_worksheet()
    worksheet.write("A1", "epochs")
    worksheet.write("B1", "Train-Score")
    worksheet.write("C1", "Test-Score")
    result = []
    for i in range(1, 10):
        nn = Classifier(
            layers=[
                Convolution('Rectifier',
                            channels=c1,
                            kernel_shape=(k1, k1),
                            border_mode='full',
                            pool_shape=(p1, p1)),
                # border_mode = 'full',没有stride
                Convolution('Rectifier',
                            channels=c2,
                            kernel_shape=(k2, k2),
                            border_mode='full',
                            pool_shape=(p2, p2)),
                # border_mode = 'full',没有stride
                Convolution('Rectifier',
                            channels=c3,
                            kernel_shape=(k3, k3),
                            border_mode='full',
                            pool_shape=(p3, p3)),
                Layer(
                    'Rectifier',
                    units=32,
                ),
                Layer(
                    'Rectifier',
                    units=32,
                ),
                Layer('Softmax', units=2)
            ],
            learning_rule="sgd",
            learning_rate=0.015,
            learning_momentum=0.9,
            weight_decay=0.001,
            n_iter=i,
            n_stable=10,
            f_stable=0.001,
            valid_size=0.1,
            verbose=True)
        nn.fit(X_train, y_train)

        pickle.dump(nn, open(dirPath + "nn" + str(i) + ".pkl", 'wb'))

        worksheet.write("A" + str(i + 1), i)
        worksheet.write("B" + str(i + 1), nn.score(X_train, y_train))
        worksheet.write("C" + str(i + 1), nn.score(X_test, y_test))
        result.append(nn.score(X_test, y_test))
    workbook.close()
    return max(result)
示例#41
0
    layers=[
        Convolution('Rectifier', channels=12, kernel_shape=(3, 3), border_mode='full'),
        Convolution('Rectifier', channels=10, kernel_shape=(3, 3), border_mode='valid'),
        Convolution('Rectifier', channels=4, kernel_shape=(3, 3), border_mode='valid'),
        Layer('Rectifier', units=64),
        Layer('Softmax')],
    learning_rate=0.002,
    valid_size=0.2,
    n_stable=10,
    verbose=True)

nn.fit(X_train, y_train)


# Determine how well it does on training data and unseen test data.
print('\nTRAIN SCORE', nn.score(X_train, y_train))
print('TEST SCORE', nn.score(X_test, y_test))

y_pred = nn.predict(X_test)


# Show some training images and some test images too.
import matplotlib.pyplot as pylab

for index, (image, label) in enumerate(zip(digits.images[:6], digits.target[:6])):
    pylab.subplot(2, 6, index + 1)
    pylab.axis('off')
    pylab.imshow(image, cmap=pylab.cm.gray_r, interpolation='nearest')
    pylab.title('Training: %i' % label)

for index, (image, prediction) in enumerate(zip(X_test[:6], y_pred[:6])):
class TestClassifierFunctionality(unittest.TestCase):

    def setUp(self):
        self.nn = MLPC(layers=[L("Softmax")], n_iter=1)

    def test_IsClassifier(self):
        assert_true(self.nn.is_classifier)

    def test_FitAutoInitialize(self):
        a_in, a_out = numpy.zeros((8,16)), numpy.random.randint(0, 5, (8,))
        self.nn.fit(a_in, a_out)
        assert_true(self.nn.is_initialized)

    def test_ExplicitValidSet(self):
        a_in, a_out = numpy.zeros((8,16)), numpy.random.randint(0, 5, (8,))
        self.nn.valid_set = (a_in, a_out)
        self.nn.fit(a_in, a_out)
        assert_true(self.nn.is_initialized)

    def test_PartialFit(self):
        a_in, a_out = numpy.zeros((8,4)), numpy.random.randint(0, 5, (8,))
        self.nn.partial_fit(a_in, a_out, classes=[0,1,2,3])
        self.nn.partial_fit(a_in*2.0, a_out+1, classes=[0,1,2,3])

    def test_PredictUninitializedNoUnitCount(self):
        a_in = numpy.zeros((8,16))
        assert_raises(AssertionError, self.nn.predict, a_in)

    def test_PredictUninitializedNoLabels(self):
        self.nn.layers[-1].units = 4
        a_in = numpy.zeros((8,16))
        assert_raises(AssertionError, self.nn.predict, a_in)

    def test_PredictBinaryProbability(self):
        a_in = numpy.random.uniform(-1.0, 1.0, size=(8,16))
        a_out = numpy.array((a_in.sum(axis=1) >= 0.0), dtype=numpy.int32)
        a_out[0], a_out[-1] = 0, 1
        self.nn.fit(a_in, a_out)

        a_proba = self.nn.predict_proba(a_in)
        a_test = self.nn.predict(a_in)
        c_out = numpy.unique(a_out)

        assert_equal(2, c_out.shape[0])
        assert_equal((8, 2), a_proba.shape)

        assert_true((a_proba >= 0.0).all())
        assert_true((a_proba <= 1.0).all())
        assert_true((abs(a_proba.sum(axis=1) - 1.0) < 1E-9).all())

    def test_PredictClasses(self):
        a_in, a_out = numpy.zeros((8,16)), numpy.random.randint(0, 5, (8,))
        self.nn.fit(a_in, a_out)
        self.nn.batch_size = 4
        a_test = self.nn.predict(a_in)
        assert_equal(type(a_out), type(a_test))
        assert_equal(a_out.shape[0], a_test.shape[0])

        c_out = numpy.unique(a_out)
        assert_equal(len(self.nn.classes_), 1)
        assert_true((self.nn.classes_[0] == c_out).all())

    def test_PredictLargerBatchSize(self):
        a_in, a_out = numpy.zeros((8,16)), numpy.random.randint(0, 5, (8,1))
        self.nn.batch_size = 32

        self.nn.fit(a_in, a_out)
        a_test = self.nn.predict(a_in)
        assert_equal(type(a_out), type(a_test))
        assert_equal(a_out.shape[0], a_test.shape[0])

    def test_PredictMultiClass(self):
        a_in, a_out = numpy.zeros((32,16)), numpy.random.randint(0, 3, (32,2))
        self.nn.fit(a_in, a_out)
        a_test = self.nn.predict(a_in)
        assert_equal(type(a_out), type(a_test))
        assert_equal(a_out.shape, a_test.shape)

        assert_equal(len(self.nn.classes_), 2)
        assert_equal(self.nn.classes_[0].shape[0], 3)
        assert_equal(self.nn.classes_[1].shape[0], 3)

    def test_EstimateProbalities(self):
        a_in, a_out = numpy.zeros((8,16)), numpy.random.randint(0, 5, (8,))
        self.nn.fit(a_in, a_out)
        a_proba = self.nn.predict_proba(a_in)
        assert_equal(type(a_out), type(a_proba))
        assert_equal(a_in.shape[0], a_proba.shape[0])

        assert_true((a_proba >= 0.0).all())
        assert_true((a_proba <= 1.0).all())
        assert_true((abs(a_proba.sum(axis=1) - 1.0) < 1E-9).all())

    def test_MultipleProbalitiesAsList(self):
        a_in, a_out = numpy.zeros((8,16)), numpy.random.randint(0, 5, (8,4))
        self.nn.fit(a_in, a_out)
        a_proba = self.nn.predict_proba(a_in)
        assert_equal(list, type(a_proba))
        assert_equal(4, len(a_proba))

        for p in a_proba:
            assert_equal(a_in.shape[0], p.shape[0])
            assert_less_equal(p.shape[1], 5)
            assert_true((p >= 0.0).all())
            assert_true((p <= 1.0).all())
            assert_true((abs(p.sum(axis=1) - 1.0) < 1E-9).all())

    def test_CalculateScore(self):
        a_in, a_out = numpy.zeros((8,16)), numpy.random.randint(0, 5, (8,))
        self.nn.fit(a_in, a_out)
        f = self.nn.score(a_in, a_out)
        assert_equal(type(f), numpy.float64)
示例#43
0
X_test = X[150000:]
y_test = y[150000:]

nn = Classifier(
    layers=[
        Layer("Sigmoid", units=100),
        Layer("Softmax")],
    learning_rate=0.00018,  #valid_set = ((X_valid, y_valid))
    n_iter=3000,
    valid_set = (X_valid, y_valid))
print "Neural network specifications:"
print nn

nn.fit(X_train, y_train)

y_valid = nn.predict(X_valid)  #OHHHH so the predict functions are always for validation!! (?) ... *facepalm*

score1 = nn.score(X_train, y_train)

score2 = nn.score(X_valid, y_valid)

score3 = nn.score(X_test, y_test)

print "Training accuracy = ", score1

print "Validation accuracy = ", score2

print "Testing accuracy = ", score3

print "Time = ", time.time() - startTime, "seconds"
示例#44
0
print

#------------------------NN---------------------------
print "Neural Network Classifer"

nn = Classifier(
    layers=[Layer("Sigmoid", units=100),
            Layer("Softmax")],
    learning_rate=0.00018,  #valid_set = ((X_valid, y_valid))
    n_iter=1000)
print "Neural network specifications:"
print nn

nn.fit(trainingSet, trainingSetLabels)

score1 = nn.score(trainingSet, trainingSetLabels)

score3 = nn.score(testingSet, testingSetLabels)

print "Training accuracy = ", score1

print "Testing accuracy = ", score3

probNN = nn.predict_proba(testingSet)
fprNN, tprNN, threshNN = metrics.roc_curve(
    testingSetLabels,
    probNN[:, 0])  #true positive rate, false positive rate (ROC curve)

print "Time = ", time.time() - startTime, "seconds"

startTime = time.time()
layers = [Layer(type='Tanh', units=7, dropout=0.25),
          Layer(type='Softmax')]
nn = Classifier(layers,
                valid_set=None,
                valid_size=0,
                batch_size=10,
                n_stable=20,
                n_iter=100,
                learning_rule='sgd',
                learning_rate=0.05,
                verbose=True)
nn.fit(X_train, y_train, w_train)
#nn.fit(X_train, y_train)
print "Training score"
print nn.score(X_train, y_train)
y_test_pred = nn.predict(X_test)
#Training Accuracy
y_pred = nn.predict(X_train)
print "Classification report train"
print classification_report(y_train, y_pred)
print "Score"
print nn.score(X_train, y_train)
print "Classification Report test"
print classification_report(y_test, y_test_pred)

print "Test Score"
print nn.score(X_test, y_test)

print "Test confusion matrix"
print confusion_matrix(y_test, y_test_pred)
scores_test = [[0. for i in range(num_of_trials)]
               for j in range(len(activation_list))]

for i, activation in enumerate(activation_list):
    print('========= Activation func: {} ========='.format(activation))
    ay.set_activation(activation)  # Set dropout rate
    for t in range(num_of_trials):
        print('---- Trial: {} ----'.format(t + 1))
        # define model, set params
        clf = Classifier(layers=ay.get_layers(),
                         learning_rate=learning_rate,
                         n_iter=iteration)
        # train
        clf.fit(X_train, y_train)
        # evaluate
        scores_train[i][t] = clf.score(X_train, y_train)
        scores_test[i][t] = clf.score(X_test, y_test)
        print('  - Training set score: {}'.format(scores_train[i][t]))
        print('  - Test set score: {}'.format(scores_test[i][t]))

scores_train = np.array(scores_train)
scores_test = np.array(scores_test)
print('')
print('# Sammary')
print('train:', scores_train)
print('test:', scores_test)
average_train = np.sum(scores_train, axis=1) / float(num_of_trials)
average_test = np.sum(scores_test, axis=1) / float(num_of_trials)
print('average_train:', average_train)
print('average_test:', average_test)
scores_test = [[] for i in range(num_of_trials)]

for n in range(num_of_trials):
    print('============== Trial: {} ==============='.format(n+1))
    for i, unit in enumerate(units):
        clf = Classifier(
            layers=[
                Layer('Rectifier', units=unit),
                Layer("Softmax")],
            learning_rate=learning_rate,
            n_iter=iteration)

        clf.fit(X_train, y_train)

        print ('====================================')
        scores_train[n].append(clf.score(X_train, y_train))
        scores_test[n].append(clf.score(X_test, y_test))
        print ('num of units >> {}'.format(unit))
        print ('  - Training set score: {}'.format(scores_train[n][i]))
        print ('  - Test set score: {}'.format(scores_test[n][i]))

scores_train = np.array(scores_train)
scores_test = np.array(scores_test)
print('')
print('# Sammary')
print('train:', scores_train)
print('test:', scores_test)
average_train = np.sum(scores_train, axis=0) / float(num_of_trials)
average_test = np.sum(scores_test, axis=0) / float(num_of_trials)
print ('average_train:', average_train)
print ('average_test:', average_test)
        max_epochs=10,
        verbose=1)
    classifiers.append(('nolearn.lasagne', clf))


RUNS = 10

for name, orig in classifiers:
    times = []
    accuracies = []
    for i in range(RUNS):
        start = time.time()

        clf = clone(orig)
        clf.random_state = int(time.time())
        clf.fit(X_train, y_train)

        accuracies.append(clf.score(X_test, y_test))
        times.append(time.time() - start)

    a_t = np.array(times)
    a_s = np.array(accuracies)

    y_pred = clf.predict(X_test)

    print("\n"+name)
    print("\tAccuracy: %5.2f%% ±%4.2f" % (100.0 * a_s.mean(), 100.0 * a_s.std()))
    print("\tTimes:    %5.2fs ±%4.2f" % (a_t.mean(), a_t.std()))
    print("\tReport:")
    print(classification_report(y_test, y_pred))
示例#49
0
'''
Created on 2015/08/12

@author: Daytona
'''

import numpy as np
from scipy import stats
#np.random.seed(12345678)

from sknn.mlp import Classifier, Layer

nn = Classifier(
    layers=[
        Layer("Rectifier", units=100),
        Layer("Linear")],
    LEARNING_RATE=0.02,
    n_iter=10)
nn.fit(X_train, y_train)

y_valid = nn.predict(X_valid)

score = nn.score(X_test, y_test)
示例#50
0
    (knn_pred_train, mnb_pred_train, rfc_pred_train, svm_pred_train))
pred_1_test = np.vstack(
    (knn_pred_test, mnb_pred_test, rfc_pred_test, svm_pred_test))

pred_1_train = pred_1_train.T
pred_1_test = pred_1_test.T

# Feeding into MLP
nn = Classifier(layers=[Layer("Sigmoid", units=100),
                        Layer("Softmax")],
                learning_rate=0.001,
                n_iter=25)

newsgroups_train_original.target = newsgroups_train_original.target.reshape(
    11314, 1)
nn.fit(pred_1_train, newsgroups_train_original.target)

y_example = nn.predict(pred_1_test)

y_test.shape
u = nn.score(y_test, y_example)

# Accuracy of MLP
from sklearn.metrics import accuracy_score
print(accuracy_score(y_example, y_test))

target_names = [0, 1, 2, 3, 4, 5]
newsgroups_test.target = newsgroups_test.target.reshape(7532, 1)
classification_report(newsgroups_test.target, mnb_pred_test, target_names)
print classification_report
示例#51
0
                          regularize='L1',
                          verbose=True,
                          valid_size=0.1,
                          n_iter=200)
        clf3.fit(Xnew, ytrain)

        # clf4 = svm.SVC(kernel='rbf',gamma=0.1)
        # clf4.fit(Xnew,ytrain)
        # clf5 = svm.SVC(kernel='rbf',gamma=0.05)
        # clf5.fit(Xnew,ytrain)
        # clf6 = svm.SVC(kernel='rbf',gamma=0.001)
        # clf6.fit(Xnew,ytrain)
        # clf7 = svm.SVC(kernel='rbf',gamma=0.005)
        # clf7.fit(Xnew,ytrain)
        #evaluate accuracy, save to array
        acc1 += clf1.score(Xtestnew, ytest)
        acc2 += clf2.score(Xtestnew, ytest)
        acc3 += clf3.score(Xtestnew, ytest)
        # acc4 += clf4.score(Xtestnew,ytest)
        # acc5 += clf5.score(Xtestnew,ytest)
        # acc6 += clf6.score(Xtestnew,ytest)
        # acc7 += clf7.score(Xtestnew,ytest)
    #svmaccs.append((acc4/10,acc5/10,acc6/10,acc7/10))
    #print(svmaccs)
    accs = (acc1 / 5, acc2 / 5, acc3 / 5)
    nnacs.append(accs)
    #train gaussian kernel model to X, Y
    #evaluate accuracy, save to array

# print (sum(var))
# plt.plot(sumvar)
示例#52
0
#------------------------NN---------------------------
print "Neural Network Classifer"

nn = Classifier(
    layers=[
        Layer("Sigmoid", units=100),
        Layer("Softmax")],
    learning_rate=0.00018,  #valid_set = ((X_valid, y_valid))
    n_iter=1000)
print "Neural network specifications:"
print nn

nn.fit(trainingSet, trainingSetLabels)

score1 = nn.score(trainingSet, trainingSetLabels)

score3 = nn.score(testingSet, testingSetLabels)

print "Training accuracy = ", score1

print "Testing accuracy = ", score3

probNN = nn.predict_proba(testingSet)
fprNN, tprNN, threshNN = metrics.roc_curve(testingSetLabels, probNN[:, 0]) #true positive rate, false positive rate (ROC curve)

print "Time = ", time.time() - startTime, "seconds"

startTime = time.time()

print
示例#53
0
X_test = X[150000:]
y_test = y[150000:]

nn = Classifier(
    layers=[Layer("Sigmoid", units=100),
            Layer("Softmax")],
    learning_rate=0.000002,  #valid_set = ((X_valid, y_valid))
    n_iter=10000)
print "Neural network specifications:"
print nn

nn.fit(X_train, y_train)

y_valid = nn.predict(
    X_valid
)  #OHHHH so the predict functions are always for validation!! (?) ... *facepalm*

score1 = nn.score(X_train, y_train)

score2 = nn.score(X_valid, y_valid)

score3 = nn.score(X_test, y_test)

print "Training accuracy = ", score1

print "Validation accuracy = ", score2

print "Testing accuracy = ", score3

print "Time = ", time.time() - startTime, "seconds"
示例#54
0
    elif f == 1:
        single_inside = matrix

    elif f == 2:
        double_outside = matrix

    elif f == 3:
        double_inside = matrix

frames = [single_outside, single_inside, double_outside, double_inside]
results = pd.concat(frames)
x = np.array(results.drop(['label'], 1))
x = preprocessing.scale(x)
y = np.array(results['label'])
# machine learning
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
    x, y, test_size=0.2)
# clf = MLPClassifier(solver='lbgfs', alpha=1e-5,hidden_layer_sizes=(5, 2), random_state=1)
clf = Classifier(
    layers=[Layer("Maxout", units=100, pieces=2),
            Layer("Softmax")],
    learning_rate=0.001,
    n_iter=25)
clf.fit(x_train, y_train)
# accuracy = clf.score(x_test, y_test)
print clf.predict(x_test)
print y_test
print clf.score(x_test, y_test)
with open('gesture_recognizeSVM_NN.pickle', 'wb') as f:
    pickle.dump(clf, f)
示例#55
0
    RecreateModel = False

    if os.path.exists('data_models/neural_network.pkl') and RecreateModel:
        nn = joblib.load('data_models/neural_network.pkl')
    else:
        nn = Classifier(layers=[Layer("Sigmoid", units=10),
                                Layer("Softmax")],
                        learning_rate=0.9,
                        n_iter=25)
        nn.fit(x_train, y_train)
        joblib.dump(nn, 'data_models/neural_network.pkl')

    result = nn.predict(x_test)

    print 'Neural Network Results:'
    print 'Overall Accuracy: {0:3f}%'.format(nn.score(x_test, y_test) * 100)

    x_test_bad = data.X_test[data.Y_test == 1]
    y_test_bad = data.Y_test[x_test_bad.index.values.tolist()]
    print 'Bad Accuracy: {0:3f}%'.format(
        nn.score(x_test_bad.as_matrix(), y_test_bad.as_matrix()) * 100)

    x_test_good = data.X_test[data.Y_test == 0]
    y_test_good = data.Y_test[x_test_good.index.values.tolist()]
    print 'Good Accuracy: {0:3f}%'.format(
        nn.score(x_test_good.as_matrix(), y_test_good.as_matrix()) * 100)

    print 'Confusion matrix:'
    print confusion_matrix(result, y_test, labels=[0, 1])

run_time = time.time() - start_time
	x_train = scaler.fit_transform(x_train)
	x_valid = scaler.transform(x_valid)
	for alpha in learning_rates:
		for batch_size in batch_sizes:
			for epoch in num_epochs:
				nn = Classifier(
					layers=[
						Layer("Tanh", units=100),
						Layer("Softmax")],
					learning_rate=alpha,
					batch_size=batch_size,
					regularize="L2",
					n_iter=epoch,
					verbose=False)
				nn.fit(x_train, y_train)
				valid_score = nn.score(x_valid, y_valid)
				nn_accuracy.append(valid_score)
				nn_batch_sizes.append(batch_size)
				nn_alphas.append(alpha)
				nn_epochs.append(epoch)
				nn_dropout.append(0)
				nn_type.append(0)
				nn_fold.append(fold)
				print str(epoch) + " epochs.",\
					batch_size, "Batch Size.",\
					alpha, "Learning Rate.",\
					"Fold", fold,\
					"Valid Accuracy:\t", round(valid_score, 4)

nn_results = np.column_stack((nn_type, nn_epochs, nn_batch_sizes, nn_alphas, nn_dropout, nn_accuracy, nn_fold))
nn_df = pd.DataFrame(nn_results[:, 1:], columns=['epoch', 'batch_size', 'alpha', 'dropout', 'accuracy', 'fold'])
示例#57
0
    teamData[team] = currentData

nn = Classifier(
    layers=[
        Layer("Rectifier", units=100, pieces=6),
        Layer("Softmax")],
    learning_rate=0.05,
    learning_rule='adadelta',
    n_iter=1000)

trainData = None

for team in teamData:
    if trainData is None:
        trainData = teamData[team]
    elif team != test_team:
        frames = [trainData, teamData[team]]

testData = teamData[test_team].as_matrix(['Runs', 'Inns', 'Ave', 'SR'])

y = trainData['Rating'].as_matrix()
trainData = trainData.as_matrix(['Runs', 'Inns', 'Ave', 'SR'])

nn.fit(trainData, y)

x = nn.predict(testData)
score = nn.score(testData, teamData[test_team]['Rating'].as_matrix())
for player in range(0, x.__len__()):
    print str(x[player])+": "+teamData[test_team]['Player'][player]+": "+str(teamData[test_team]['LastPlayed'][player])
示例#58
0
# taking the class variable in another column
y = mydata['y']
del mydata['y']
mynewdata = preprocessing.normalize(mydata)

# creating a model and splitting data set into training and testing
DefaultTrain, DefaultValidaiton, y_train, y_test = train_test_split(mynewdata, y, test_size=0.2, random_state=42)

nn = Classifier(layers=[
        Layer("Rectifier", units=100),
        Layer("Softmax")],
    learning_rate=0.003,
    n_iter=25)
nn.fit(DefaultTrain, y_train)
y_valid = nn.predict(DefaultValidaiton)
print('Accuracy: ',nn.score(DefaultValidaiton, y_test))
print confusion_matrix(y_test,y_valid)
fpr, tpr, thresholds =metrics.roc_curve(y_test, y_valid,pos_label=1)
roc_auc = auc(fpr, tpr)
plt.figure()
plt.plot(fpr, tpr, color='darkorange',
         lw=2, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
示例#59
0
def load_data(file_path):
    return np.genfromtxt(file_path,
                         delimiter=',',
                         missing_values='?',
                         dtype=None)


def split_set(data):
    return (data[:, :-1], data[:, -1])


data = load_data('dermatology/dermatology.data')
layers = [
    Layer("Rectifier", units=100, dropout=0.25),
    Layer("Linear", units=100, dropout=0.25),
]
#Layer("Sigmoid", units=100, dropout=0.25)]

for layers_perm in permutations(layers):
    X_train, X_test, y_train, y_test = train_test_split(data[:, :-1],
                                                        data[:, -1],
                                                        test_size=0.33,
                                                        random_state=42)
    nn = Classifier(layers=list(layers_perm) + [Layer("Softmax")],
                    learning_rate=0.001,
                    n_iter=100)

    nn.fit(X_train, y_train)
    print "Configuration: " + str(layers_perm)
    print nn.score(X_test, y_test)
示例#60
0
    scaler = StandardScaler()
    x_train = scaler.fit_transform(x_train)
    x_valid = scaler.transform(x_valid)
    for alpha in learning_rates:
        for batch_size in batch_sizes:
            for epoch in num_epochs:
                nn = Classifier(
                    layers=[Layer("Tanh", units=100),
                            Layer("Softmax")],
                    learning_rate=alpha,
                    batch_size=batch_size,
                    regularize="L2",
                    n_iter=epoch,
                    verbose=False)
                nn.fit(x_train, y_train)
                valid_score = nn.score(x_valid, y_valid)
                nn_accuracy.append(valid_score)
                nn_batch_sizes.append(batch_size)
                nn_alphas.append(alpha)
                nn_epochs.append(epoch)
                nn_dropout.append(0)
                nn_type.append(0)
                nn_fold.append(fold)
                print str(epoch) + " epochs.",\
                 batch_size, "Batch Size.",\
                 alpha, "Learning Rate.",\
                 "Fold", fold,\
                 "Valid Accuracy:\t", round(valid_score, 4)

nn_results = np.column_stack((nn_type, nn_epochs, nn_batch_sizes, nn_alphas,
                              nn_dropout, nn_accuracy, nn_fold))