Exemple #1
0
def NeuralNet(train, test, features):
    eta = 0.025
    niter = 2000

    regressor = Regressor(
        layers=[Layer("Rectifier", units=100), Layer("Tanh", units=100), Layer("Sigmoid", units=100), Layer("Linear")],
        learning_rate=eta,
        learning_rule="momentum",
        learning_momentum=0.9,
        batch_size=100,
        valid_size=0.01,
        n_stable=100,
        n_iter=niter,
        verbose=True,
    )

    print regressor.__class__.__name__
    start = time.time()
    regressor.fit(np.array(train[list(features)]), train[goal])
    print "  -> Training time:", time.time() - start

    if not os.path.exists("result/"):
        os.makedirs("result/")
    # TODO: fix this shit
    predictions = regressor.predict(np.array(test[features]))
    try:  # try to flatten a list that might be flattenable.
        predictions = list(itertools.chain.from_iterable(predictions))
    except:
        pass
    csvfile = "result/dat-nnet-eta%s-niter%s.csv" % (str(eta), str(niter))
    with open(csvfile, "w") as output:
        writer = csv.writer(output, lineterminator="\n")
        writer.writerow([myid, goal])
        for i in range(0, len(predictions)):
            writer.writerow([i + 1, predictions[i]])
class ClassificationTools():
	def __init__(self, inputVector=[], outputVector=[], filepath=''):
		if filepath == '':
			self.inputVector = numpy.asarray(inputVector)
			self.outputVector = numpy.asarray(outputVector)
			self.model = None
		else:
			self.model = pickle.load(file(filepath, 'r'))

	def setVectors(self, inputVector, outputVector):
		self.inputVector = numpy.asarray(inputVector)
		self.outputVector = numpy.asarray(outputVector)


	def trainMultilayerPerceptron(self, hlunits=10000, learningRate=0.01, iters=1000):
		# trains a simple MLP with a single hidden layer
		self.model = Regressor(
			layers=[
				Layer("Rectifier", units=hlunits),
				Layer("Linear")],
			learning_rate=learningRate,
			n_iter=iters)
		self.model.fit(self.inputVector, self.outputVector)

	def predict(self, toPredict):
		prediction = self.model.predict(numpy.asarray(toPredict))
		return prediction # this will be a 1D numpy array of floats

	def trainDeepNetwork(self):
		# trains a deep network based a multi layer autoencoder
		# which is then fine tuned using an MLP
		pass

	def serializeModel(self, filepath):
		pickle.dump(self.model, file(filepath, 'w'))
Exemple #3
0
 def test_VerboseRegressor(self):
     nn = MLPR(layers=[L("Linear")], verbose=1, n_iter=1)
     a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,4))
     nn.fit(a_in, a_out)
     assert_in("Epoch       Training Error       Validation Error       Time", self.buf.getvalue())
     assert_in("    1       ", self.buf.getvalue())
     assert_in("    N/A     ", self.buf.getvalue())
def neural_net(features,target,test_size_percent=0.2,cv_split=3,n_iter=100,learning_rate=0.01):
    '''Features -> Pandas Dataframe with attributes as columns
        target -> Pandas Dataframe with target column for prediction
        Test_size_percent -> Percentage of data point to be used for testing'''
    scale=preprocessing.MinMaxScaler()
    X_array = scale.fit_transform(features)
    y_array = scale.fit_transform(target)
    mlp = Regressor(layers=[Layer("Rectifier",units=5), # Hidden Layer1
                            Layer("Rectifier",units=3)  # Hidden Layer2
                            ,Layer("Linear")],     # Output Layer
                        n_iter = n_iter, learning_rate=0.01)
    X_train, X_test, y_train, y_test = train_test_split(X_array, y_array.T.squeeze(), test_size=test_size_percent, random_state=4)
    mlp.fit(X_train,y_train)
    test_prediction = mlp.predict(X_test)
    tscv = TimeSeriesSplit(cv_split)
    
    training_score = cross_val_score(mlp,X_train,y_train,cv=tscv.n_splits) 
    testing_score = cross_val_score(mlp,X_test,y_test,cv=tscv.n_splits)
    print"Cross-val Training score:", training_score.mean()
#    print"Cross-val Testing score:", testing_score.mean()
    training_predictions = cross_val_predict(mlp,X_train,y_train,cv=tscv.n_splits)
    testing_predictions = cross_val_predict(mlp,X_test,y_test,cv=tscv.n_splits)
    
    training_accuracy = metrics.r2_score(y_train,training_predictions) 
#    test_accuracy_model = metrics.r2_score(y_test,test_prediction_model)
    test_accuracy = metrics.r2_score(y_test,testing_predictions)
    
#    print"Cross-val predicted accuracy:", training_accuracy
    print"Test-predictions accuracy:",test_accuracy

    plot_model(target,y_train,y_test,training_predictions,testing_predictions)
    return mlp
class TestDataAugmentation(unittest.TestCase):

    def setUp(self):
        self.called = 0
        self.value = 1.0

        self.nn = MLPR(
                    layers=[L("Linear")],
                    n_iter=1,
                    batch_size=2,
                    mutator=self._mutate_fn)

    def _mutate_fn(self, sample):
        self.called += 1
        sample[sample == 0.0] = self.value

    def test_TestCalledOK(self):
        a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,4))
        self.nn._fit(a_in, a_out)
        assert_equals(a_in.shape[0], self.called)

    def test_DataIsUsed(self):
        self.value = float("nan")
        a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,4))
        assert_raises(RuntimeError, self.nn._fit, a_in, a_out)
Exemple #6
0
def gamma():
    value_map = {'warm': 1.0, 'neutral': 0.5, 'cold': 0.0}

    X = data["x"][:, [0, 1, 2, 5, 6]]
    X = np.abs(X)
    maxX = np.amax(X, axis=0)
    minX = np.amax(X, axis=0)
    X = (X - minX) / maxX
    Y = data["y"][:, 1]
    Y = np.asarray([value_map[y] for y in Y])

    split_data = cross_validation.train_test_split(X, Y, test_size=0.2)
    X_train = split_data[0]
    X_test = split_data[1]
    Y_train = split_data[2]
    Y_test = split_data[3]

    nn = Regressor(
        layers=[
            Layer("Rectifier", units=3),
            Layer("Linear")],
        learning_rate=1e-3,
        n_iter=100)

    nn.fit(X_train, Y_train)

    print 'inosity accuracy'
    prediction = nn.predict(X_test)
    prediction = [closest(y[0]) for y in prediction]
    Y_test = [closest(y) for y in Y_test]
    print metrics.accuracy_score(prediction, Y_test)
Exemple #7
0
 def __init__(self, new=False, display=False):
     self.possibilities = generate(Learn.n_coups)
     np.random.shuffle(self.possibilities)
     self.explore = 0.
     self.jeu = MJ.Jeu(autorepeat=False, display=display)
     self.jeu.restart(Learn.coups_sautes)
     self.image = self.get_image()
     if new:
         self.nn = Regressor(layers=[
             Layer("Linear", units=(Learn.n_cell + Learn.n_coups)),
             Layer("Sigmoid", units=1000),
             Layer("Sigmoid")
         ],
                             learning_rate=0.01,
                             n_iter=1)
         self.nn.fit(
             self.good_shape(self.image,
                             self.possibilities[Learn.n_coups / 2 - 1]),
             np.array([[0]]))
     else:
         self.nn = pickle.load(open('nn.pkl', 'rb'))
         self.nn.fit(
             self.good_shape(self.image,
                             self.possibilities[Learn.n_coups / 2 - 1]),
             np.array([[1]]))
     self.current_data_set = []
class TestSerializedNetwork(TestLinearNetwork):

    def setUp(self):
        self.original = MLPR(layers=[L("Linear")])
        a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,4))
        self.original._initialize(a_in, a_out)

        buf = io.BytesIO()
        pickle.dump(self.original, buf)
        buf.seek(0)
        self.nn = pickle.load(buf)

    def test_TypeOfWeightsArray(self):
        for w, b in self.nn._mlp_to_array():
            assert_equal(type(w), numpy.ndarray)
            assert_equal(type(b), numpy.ndarray)

    # Override base class test, you currently can't re-train a network that
    # was serialized and deserialized.
    def test_FitAutoInitialize(self): pass
    def test_ResizeInputFrom4D(self): pass
    def test_ResizeInputFrom3D(self): pass

    def test_PredictNoOutputUnitsAssertion(self):
        # Override base class test, this is not initialized but it
        # should be able to predict without throwing assert.
        assert_true(self.nn.is_initialized)

    def test_PredictAlreadyInitialized(self):
        a_in = numpy.zeros((8,16))
        self.nn.predict(a_in)
class TestSerializedNetwork(TestLinearNetwork):
    def setUp(self):
        self.original = MLPR(layers=[L("Linear")])
        a_in, a_out = numpy.zeros((8, 16)), numpy.zeros((8, 4))
        self.original._initialize(a_in, a_out)

        buf = io.BytesIO()
        pickle.dump(self.original, buf)
        buf.seek(0)
        self.nn = pickle.load(buf)

    def test_TypeOfWeightsArray(self):
        for w, b in self.nn._mlp_to_array():
            assert_equal(type(w), numpy.ndarray)
            assert_equal(type(b), numpy.ndarray)

    # Override base class test, you currently can't re-train a network that
    # was serialized and deserialized.
    def test_FitAutoInitialize(self):
        pass

    def test_ResizeInputFrom4D(self):
        pass

    def test_ResizeInputFrom3D(self):
        pass

    def test_PredictNoOutputUnitsAssertion(self):
        # Override base class test, this is not initialized but it
        # should be able to predict without throwing assert.
        assert_true(self.nn.is_initialized)

    def test_PredictAlreadyInitialized(self):
        a_in = numpy.zeros((8, 16))
        self.nn.predict(a_in)
Exemple #10
0
    def test_UnusedParameterWarning(self):
        nn = MLPR(layers=[L("Linear", pieces=2)], n_iter=1)
        a_in = numpy.zeros((8, 16))
        nn._initialize(a_in, a_in)

        assert_in('Parameter `pieces` is unused', self.buf.getvalue())
        self.buf = io.StringIO()  # clear
Exemple #11
0
class NeuralRegLearner(object):

    def __init__(self, verbose = False):
        self.name = "Neural net Regression Learner"
        self.network =  Regressor( layers=[
										Layer("Rectifier", units=100),
										Layer("Linear")],
									learning_rate=0.02,
									n_iter=10)

    def addEvidence(self,dataX,dataY):
        """
        @summary: Add training data to learner
        @param dataX: X values of data to add
        @param dataY: the Y training values
        """
        dataX = np.array(dataX)
        dataY = np.array(dataY)
        self.network.fit(dataX, dataY) 
        
    def query(self,points):
        """
        @summary: Estimate a set of test points given the model we built.
        @param points: should be a numpy array with each row corresponding to a specific query.
        @returns the estimated values according to the saved model.
        """
        return self.network.predict(points)
Exemple #12
0
    def __init__(self, verbose = False):
        self.name = "Neural net Regression Learner"
        self.network =  Regressor( layers=[
										Layer("Rectifier", units=100),
										Layer("Linear")],
									learning_rate=0.02,
									n_iter=10)
    def test_UnusedParameterWarning(self):
        nn = MLPR(layers=[L("Linear", pieces=2)], n_iter=1)
        a_in = numpy.zeros((8,16))
        nn._initialize(a_in, a_in)

        assert_in('Parameter `pieces` is unused', self.buf.getvalue())
        self.buf = io.StringIO() # clear
    def run_EqualityTest(self, copier, asserter):
        for activation in ["Rectifier", "Sigmoid", "Maxout", "Tanh"]:
            nn1 = MLPR(layers=[L(activation, units=16, pieces=2), L("Linear", units=1)], random_state=1234)
            nn1._initialize(self.a_in, self.a_out)

            nn2 = copier(nn1, activation)
            asserter(numpy.all(nn1.predict(self.a_in) == nn2.predict(self.a_in)))
Exemple #15
0
 def make(self, activation, seed=1234, train=False, **keywords):
     nn = MLPR(layers=[L(activation, units=16, **keywords), L("Linear", units=1)], random_state=seed, n_iter=1)
     if train:
         nn.fit(self.a_in, self.a_out)
     else:
         nn._initialize(self.a_in, self.a_out)
     return nn
Exemple #16
0
 def __init__(self, iterations=5):
     results = []
     situations = []
     logging.basicConfig()
     for i in range(0, iterations):
         g = Game(print_board=False)
         round_situations = []
         while not g.game_over:
             choices = g.available_cols()
             choice = random.choice(choices)
             round_situations.append(self.game_to_sit(g, choice))
             g.place_piece(choice)
         for situation in round_situations:
             results.append(g.points)
         situations.extend(round_situations)
     #self.pipeline = Pipeline([
     #    ('min/max scaler', MinMaxScaler(feature_range=(0.0, 1.0))),
     #    ('neural network', Regressor(
     self.nn = Regressor(layers=[
                 Layer("Rectifier", units=100),
                 Layer("Linear")],
             learning_rate=0.00002,
             n_iter=10)
     #self.pipeline.fit(np.array(situations), np.array(results))
     print np.array(situations).shape
     self.nn.fit(np.array(situations), np.array(results))
class TestLinearNetwork(unittest.TestCase):

    def setUp(self):
        self.nn = MLPR(layers=[L("Linear")], n_iter=1)

    def test_LifeCycle(self):
        del self.nn

    def test_PredictNoOutputUnitsAssertion(self):
        a_in = numpy.zeros((8,16))
        assert_raises(AssertionError, self.nn.predict, a_in)

    def test_AutoInitializeWithOutputUnits(self):
        self.nn.layers[-1].units = 4
        a_in = numpy.zeros((8,16))
        self.nn.predict(a_in)

    def test_FitAutoInitialize(self):
        a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,4))
        self.nn.fit(a_in, a_out)
        assert_true(self.nn.is_initialized)

    def test_FitWrongSize(self):
        a_in, a_out = numpy.zeros((7,16)), numpy.zeros((9,4))
        assert_raises(AssertionError, self.nn.fit, a_in, a_out)
Exemple #18
0
def CreateNetwork(data, predicates):
    # входная размерность
    dim_in = len(predicates)
    # выходная размерность
    dim_out = len(data[0]) - 1
    # конфигурация сети
    neural_network = Regressor(
        layers=[
            Layer("Rectifier", units=50),
            Layer("Linear")],
        learning_rate=0.001,
        n_iter=5000)
    # формирование обучающей выборки
    x_train = np.array([CalcPredicates(row[0], predicates) for row in data])
    y_train = np.array([apply(float, row[1:]) for row in data])
    # обучение
    logging.info('Start training')
    logging.info('\n'+str(x_train))
    logging.info('\n'+str(y_train))
    try:
        neural_network.fit(x_train, y_train)
    except KeyboardInterrupt:
        logging.info('User break')
        pass
    logging.info('Network created successfully')
    logging.info('score = '+str(neural_network.score(x_train, y_train)))
    # сохранение обученной сети
    pickle.dump(neural_network, open(datetime.datetime.now().isoformat()+'.pkl', 'wb'))
    return neural_network
Exemple #19
0
class TestInputOutputs(unittest.TestCase):
    def setUp(self):
        self.nn = MLPR(layers=[L("Linear")], n_iter=1)

    def test_FitOneDimensional(self):
        a_in, a_out = numpy.zeros((8, 16)), numpy.zeros((8, ))
        self.nn.fit(a_in, a_out)
class TestInputOutputs(unittest.TestCase):

    def setUp(self):
        self.nn = MLPR(layers=[L("Linear")], n_iter=1)

    def test_FitOneDimensional(self):
        a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,))
        self.nn.fit(a_in, a_out)
    def test_SetParametersConstructor(self):
        weights = numpy.random.uniform(-1.0, +1.0, (16,4))
        biases = numpy.random.uniform(-1.0, +1.0, (4,))
        nn = MLPR(layers=[L("Linear")], parameters=[(weights, biases)])

        a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,4))
        nn._initialize(a_in, a_out)
        assert_in('Reloading parameters for 1 layer weights and biases.', self.buf.getvalue())
Exemple #22
0
 def ctor(_, activation):
     nn = MLPR(layers=[
         L(activation, units=16, pieces=2),
         L("Linear", units=1)
     ],
               random_state=1234)
     nn._initialize(self.a_in, self.a_out)
     return nn
    def test_SquareKernelPool(self):
        nn = MLPR(layers=[
                    C("Rectifier", channels=4, kernel_shape=(3,3), pool_shape=(2,2)),
                    L("Linear", units=5)])

        a_in = numpy.zeros((8,32,32,1))
        nn._create_specs(a_in)
        assert_equal(nn.unit_counts, [1024, 15 * 15 * 4, 5])
    def test_VerticalKernel(self):
        nn = MLPR(layers=[
                    C("Rectifier", channels=4, kernel_shape=(1,16)),
                    L("Linear", units=7)])

        a_in = numpy.zeros((8,16,16,1))
        nn._create_specs(a_in)
        assert_equal(nn.unit_counts, [256, 16 * 4, 7])
    def test_SquareKernelFull(self):
        nn = MLPR(layers=[
                    C("Rectifier", channels=4, kernel_shape=(3,3), border_mode='full'),
                    L("Linear", units=5)])

        a_in = numpy.zeros((8,32,32,1))
        nn._create_specs(a_in)
        assert_equal(nn.unit_counts, [1024, 32 * 32 * 4, 5])
    def test_HorizontalKernel(self):
        nn = MLPR(layers=[
                    C("Rectifier", channels=7, kernel_shape=(16,1)),
                    L("Linear", units=5)])

        a_in = numpy.zeros((8,16,16,1))
        nn._create_specs(a_in)
        assert_equal(nn.unit_counts, [256, 16 * 7, 5])
Exemple #27
0
    def test_SquareKernelFull(self):
        nn = MLPR(layers=[
            C("ExpLin", channels=4, kernel_shape=(3, 3), border_mode='full'),
            L("Linear", units=5)
        ])

        a_in = numpy.zeros((8, 32, 32, 1))
        nn._create_specs(a_in)
        assert_equal(nn.unit_counts, [1024, 4624, 5])
Exemple #28
0
    def test_SetParametersConstructor(self):
        weights = numpy.random.uniform(-1.0, +1.0, (16, 4))
        biases = numpy.random.uniform(-1.0, +1.0, (4, ))
        nn = MLPR(layers=[L("Linear")], parameters=[(weights, biases)])

        a_in, a_out = numpy.zeros((8, 16)), numpy.zeros((8, 4))
        nn._initialize(a_in, a_out)
        assert_in('Reloading parameters for 1 layer weights and biases.',
                  self.buf.getvalue())
    def check(self, a_in, a_out, a_mask):
        nn = MLPR(layers=[L("Linear")], learning_rule='adam', learning_rate=0.1, n_iter=50)
        nn.fit(a_in, a_out, a_mask)
        v_out = nn.predict(a_in)

        # Make sure the examples weighted 1.0 have low error, 0.0 high error.
        print(abs(a_out - v_out).T * a_mask)
        assert_true((abs(a_out - v_out).T * a_mask < 1E-1).all())
        assert_true((abs(a_out - v_out).T * (1.0 - a_mask) > 2.5E-1).any())
Exemple #30
0
    def test_HorizontalKernel(self):
        nn = MLPR(layers=[
            C("Rectifier", channels=7, kernel_shape=(16, 1)),
            L("Linear", units=5)
        ])

        a_in = numpy.zeros((8, 16, 16, 1))
        nn._create_specs(a_in)
        assert_equal(nn.unit_counts, [256, 16 * 7, 5])
Exemple #31
0
    def test_VerticalKernel(self):
        nn = MLPR(layers=[
            C("Rectifier", channels=4, kernel_shape=(1, 16)),
            L("Linear", units=7)
        ])

        a_in = numpy.zeros((8, 16, 16, 1))
        nn._create_specs(a_in)
        assert_equal(nn.unit_counts, [256, 16 * 4, 7])
Exemple #32
0
    def test_SquareKernelPool(self):
        nn = MLPR(layers=[
            C("Rectifier", channels=4, kernel_shape=(3, 3), pool_shape=(2, 2)),
            L("Linear", units=5)
        ])

        a_in = numpy.zeros((8, 32, 32, 1))
        nn._create_specs(a_in)
        assert_equal(nn.unit_counts, [1024, 15 * 15 * 4, 5])
 def _run(self, activation):
     a_in, a_out = numpy.zeros((8, 32, 16, 1)), numpy.zeros((8, 4))
     nn = MLPR(
         layers=[C(activation, channels=4, kernel_shape=(3, 3), pool_shape=(2, 2), pool_type="mean"), L("Linear")],
         n_iter=1,
     )
     nn.fit(a_in, a_out)
     a_test = nn.predict(a_in)
     assert_equal(type(a_out), type(a_in))
    def test_MultiLayerPooling(self):
        nn = MLPR(layers=[
                    C("Rectifier", channels=4, kernel_shape=(3,3), pool_shape=(2,2)),
                    C("Rectifier", channels=4, kernel_shape=(3,3), pool_shape=(2,2)),
                    L("Linear")])

        a_in, a_out = numpy.zeros((8,32,32,1)), numpy.zeros((8,16))
        nn._initialize(a_in, a_out)
        assert_equal(nn.unit_counts, [1024, 900, 196, 16])
    def test_SquareKernelPool(self):
        # TODO: After creation the outputs don't seem to correspond; pooling enabled?
        nn = MLPR(layers=[
                    C("Rectifier", channels=4, kernel_shape=(3,3), pool_shape=(2,2)),
                    L("Linear", units=5)])

        a_in = numpy.zeros((8,32,32,1))
        nn._create_specs(a_in)
        assert_equal(nn.unit_counts, [1024, 15 * 15 * 4, 5])
def train_regression_predictor(train_x, train_y, learning_rule='sgd', learning_rate=0.002, n_iter=20, units=4):
    mlp = Regressor(layers=[Layer('Rectifier', units=units),
                            Layer('Linear')],
                   learning_rule=learning_rule,
                   learning_rate=learning_rate,
                   n_iter=n_iter)
    mlp.fit(train_x, train_y)
    print mlp.score(train_x, train_y)
    return mlp
Exemple #37
0
    def test_MultiLayerPooling(self):
        nn = MLPR(layers=[
            C("Rectifier", channels=4, kernel_shape=(3, 3), pool_shape=(2, 2)),
            C("ExpLin", channels=4, kernel_shape=(3, 3), pool_shape=(2, 2)),
            L("Linear")
        ])

        a_in, a_out = numpy.zeros((8, 32, 32, 1)), numpy.zeros((8, 16))
        nn._initialize(a_in, a_out)
        assert_equal(nn.unit_counts, [1024, 900, 196, 16])
    def test_Upscaling(self):
        nn = MLPR(
            layers=[
                C("Rectifier", channels=4, kernel_shape=(1, 1), scale_factor=(2, 2), border_mode="same"),
                L("Linear", units=5),
            ]
        )

        a_in = numpy.zeros((8, 32, 32, 1))
        nn._create_specs(a_in)
        assert_equal(nn.unit_counts, [1024, 64 * 64 * 4, 5])
Exemple #39
0
 def make(self, activation, seed=1234, train=False, **keywords):
     nn = MLPR(
         layers=[L(activation, units=16, **keywords),
                 L("Linear", units=1)],
         random_state=seed,
         n_iter=1)
     if train:
         nn.fit(self.a_in, self.a_out)
     else:
         nn._initialize(self.a_in, self.a_out)
     return nn
Exemple #40
0
 def make(self, activation, seed=1234, train=False, **keywords):
     nn = MLPR(
         layers=[C(activation, channels=16, kernel_shape=(3, 3), **keywords), L("Linear")],
         random_state=seed,
         n_iter=1,
     )
     if train:
         nn.fit(self.a_in, self.a_out)
     else:
         nn._initialize(self.a_in, self.a_out)
     return nn
Exemple #41
0
def neural_net(features,
               target,
               test_size_percent=0.2,
               cv_split=3,
               n_iter=100,
               learning_rate=0.01):
    '''Features -> Pandas Dataframe with attributes as columns
        target -> Pandas Dataframe with target column for prediction
        Test_size_percent -> Percentage of data point to be used for testing'''
    scale = preprocessing.MinMaxScaler()
    X_array = scale.fit_transform(features)
    y_array = scale.fit_transform(target)
    mlp = Regressor(
        layers=[
            Layer("Rectifier", units=5),  # Hidden Layer1
            Layer("Rectifier", units=3)  # Hidden Layer2
            ,
            Layer("Linear")
        ],  # Output Layer
        n_iter=n_iter,
        learning_rate=0.01)
    X_train, X_test, y_train, y_test = train_test_split(
        X_array,
        y_array.T.squeeze(),
        test_size=test_size_percent,
        random_state=4)
    mlp.fit(X_train, y_train)
    test_prediction = mlp.predict(X_test)
    tscv = TimeSeriesSplit(cv_split)

    training_score = cross_val_score(mlp, X_train, y_train, cv=tscv.n_splits)
    testing_score = cross_val_score(mlp, X_test, y_test, cv=tscv.n_splits)
    print "Cross-val Training score:", training_score.mean()
    #    print"Cross-val Testing score:", testing_score.mean()
    training_predictions = cross_val_predict(mlp,
                                             X_train,
                                             y_train,
                                             cv=tscv.n_splits)
    testing_predictions = cross_val_predict(mlp,
                                            X_test,
                                            y_test,
                                            cv=tscv.n_splits)

    training_accuracy = metrics.r2_score(y_train, training_predictions)
    #    test_accuracy_model = metrics.r2_score(y_test,test_prediction_model)
    test_accuracy = metrics.r2_score(y_test, testing_predictions)

    #    print"Cross-val predicted accuracy:", training_accuracy
    print "Test-predictions accuracy:", test_accuracy

    plot_model(target, y_train, y_test, training_predictions,
               testing_predictions)
    return mlp
 def test_GetLayerParams(self):
     nn = MLPR(layers=[L("Linear")], n_iter=1)
     a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,4))
     nn._initialize(a_in, a_out)
     
     p = nn.get_parameters()
     assert_equals(type(p), list)
     assert_true(isinstance(p[0], tuple))
     
     assert_equals(p[0].layer, 'output')
     assert_equals(p[0].weights.shape, (16, 4))
     assert_equals(p[0].biases.shape, (4,))
Exemple #43
0
 def make(self, activation, seed=1234, train=False, **keywords):
     nn = MLPR(layers=[
         C(activation, channels=16, kernel_shape=(3, 3), **keywords),
         L("Linear")
     ],
               random_state=seed,
               n_iter=1)
     if train:
         nn.fit(self.a_in, self.a_out)
     else:
         nn._initialize(self.a_in, self.a_out)
     return nn
    def test_GetLayerParams(self):
        nn = MLPR(layers=[L("Linear")], n_iter=1)
        a_in, a_out = numpy.zeros((8, 16)), numpy.zeros((8, 4))
        nn._initialize(a_in, a_out)

        p = nn.get_parameters()
        assert_equals(type(p), list)
        assert_true(isinstance(p[0], tuple))

        assert_equals(p[0].layer, 'output')
        assert_equals(p[0].weights.shape, (16, 4))
        assert_equals(p[0].biases.shape, (4, ))
Exemple #45
0
    def test_SmallSquareKernel(self):
        nn = MLPR(layers=[
            C("Rectifier",
              channels=4,
              kernel_shape=(3, 3),
              border_mode='valid'),
            L("Linear", units=5)
        ])

        a_in = numpy.zeros((8, 32, 32, 1))
        nn._create_specs(a_in)
        assert_equal(nn.unit_counts, [1024, 30 * 30 * 4, 5])
Exemple #46
0
    def check(self, a_in, a_out, a_mask):
        nn = MLPR(layers=[L("Linear")],
                  learning_rule='adam',
                  learning_rate=0.1,
                  n_iter=50)
        nn.fit(a_in, a_out, a_mask)
        v_out = nn.predict(a_in)

        # Make sure the examples weighted 1.0 have low error, 0.0 high error.
        print(abs(a_out - v_out).T * a_mask)
        assert_true((abs(a_out - v_out).T * a_mask < 1E-1).all())
        assert_true((abs(a_out - v_out).T * (1.0 - a_mask) > 2.5E-1).any())
    def check(self, a_in, a_out, a_mask):
        nn = MLPR(layers=[L("Linear")], learning_rule='adam', learning_rate=0.05, n_iter=250, n_stable=25)
        nn.fit(a_in, a_out, a_mask)
        v_out = nn.predict(a_in)

        # Make sure the examples weighted 1.0 have low error, 0.0 high error.
        masked = abs(a_out - v_out).T * a_mask
        print('masked', masked)
        assert_true((masked < 5.0E-1).all())
        inversed = abs(a_out - v_out).T * (1.0 - a_mask)
        print('inversed', inversed)
        assert_greater(inversed.mean(), masked.mean())
Exemple #48
0
    def run_EqualityTest(self, copier, asserter):
        # Only PyLearn2 supports Maxout.
        extra =  ["Maxout"] if sknn.backend.name == 'pylearn2' else []
        for activation in ["Rectifier", "Sigmoid", "Tanh", "ExpLin"] + extra:
            nn1 = MLPR(layers=[L(activation, units=16), L("Linear", units=1)], random_state=1234)
            nn1._initialize(self.a_in, self.a_out)

            nn2 = copier(nn1, activation)
            print('activation', activation)
            a_out1 = nn1.predict(self.a_in)
            a_out2 = nn2.predict(self.a_in)
            print(a_out1, a_out2)
            asserter(numpy.all(nn1.predict(self.a_in) - nn2.predict(self.a_in) < 1E-6))
Exemple #49
0
    def test_Upscaling(self):
        nn = MLPR(layers=[
            C("Rectifier",
              channels=4,
              kernel_shape=(1, 1),
              scale_factor=(2, 2),
              border_mode='same'),
            L("Linear", units=5)
        ])

        a_in = numpy.zeros((8, 32, 32, 1))
        nn._create_specs(a_in)
        assert_equal(nn.unit_counts, [1024, 64 * 64 * 4, 5])
Exemple #50
0
 def fit_compiled(self, data_matrix_in, target=None):
     data_matrix_out = self._fit_transform(data_matrix_in, target=target)
     n_features_in = data_matrix_in.shape[1]
     n_features_out = data_matrix_out.shape[1]
     n_features_hidden = int(n_features_in * self.deepnet_n_features_hidden_factor)
     layers = []
     for i in range(self.deepnet_n_hidden_layers):
         layers.append(Layer("Rectifier", units=n_features_hidden, name='hidden%d' % i))
     layers.append(Layer("Linear", units=n_features_out))
     self.net = Regressor(layers=layers,
                          learning_rate=self.deepnet_learning_rate,
                          valid_size=0.1)
     self.net.fit(data_matrix_in, data_matrix_out)
     return self.net
Exemple #51
0
 def _run(self, activation):
     a_in, a_out = numpy.zeros((8, 32, 16, 1)), numpy.zeros((8, 4))
     nn = MLPR(layers=[
         C(activation,
           channels=4,
           kernel_shape=(3, 3),
           pool_shape=(2, 2),
           pool_type='mean'),
         L("Linear")
     ],
               n_iter=1)
     nn.fit(a_in, a_out)
     a_test = nn.predict(a_in)
     assert_equal(type(a_out), type(a_in))
    def __init__(self, params=None, seq_pre_processor=None): 	              
                 
        self.scale = StandardScaler()                          
        self.pre_processor = seq_pre_processor
        self.params = params      
        
        if params != None:
            # Initialize the network
            self.net = Regressor(layers=params['layers'], learning_rate=params['learning_rate'], n_iter=params['n_iter'], dropout_rate=params['dropout_rate'],
                                     batch_size=params['batch_size'], regularize=params['regularize'], valid_size=params['valid_size'])

            # Initialize the vectorizer
            self.vectorizer = graph.Vectorizer(r=params['radius'], d=params['d_seq'], min_r=params['min_r'], normalization=params['normalization'], 
                                                    inner_normalization=params['inner_normalization'], nbits=params['nbits_seq'])  
Exemple #53
0
class TestLinearNetwork(unittest.TestCase):
    def setUp(self):
        self.nn = MLPR(layers=[L("Linear")], n_iter=1)

    def test_LifeCycle(self):
        del self.nn

    def test_PredictNoOutputUnitsAssertion(self):
        a_in = numpy.zeros((8, 16))
        assert_raises(AssertionError, self.nn.predict, a_in)

    def test_AutoInitializeWithOutputUnits(self):
        self.nn.layers[-1].units = 4
        a_in = numpy.zeros((8, 16))
        self.nn.predict(a_in)

    def test_FitAutoInitialize(self):
        a_in, a_out = numpy.zeros((8, 16)), numpy.zeros((8, 4))
        self.nn.fit(a_in, a_out)
        assert_true(self.nn.is_initialized)

    def test_ResizeInputFrom4D(self):
        a_in, a_out = numpy.zeros((8, 4, 4, 1)), numpy.zeros((8, 4))
        self.nn.fit(a_in, a_out)
        assert_true(self.nn.is_initialized)

    def test_ResizeInputFrom3D(self):
        a_in, a_out = numpy.zeros((8, 4, 4)), numpy.zeros((8, 4))
        self.nn.fit(a_in, a_out)
        assert_true(self.nn.is_initialized)

    def test_FitWrongSize(self):
        a_in, a_out = numpy.zeros((7, 16)), numpy.zeros((9, 4))
        assert_raises(AssertionError, self.nn.fit, a_in, a_out)
Exemple #54
0
def train_nn(train_set, validation_set):

    nn = Regressor(
        layers=[Layer("Sigmoid", units=2),
                Layer("Sigmoid")],
        learning_rate=0.0001,
        batch_size=5,
        n_iter=10000,
        valid_set=validation_set,
        verbose=True,
    )
    nn.fit(train_set[0], train_set[1])

    return nn
Exemple #55
0
class Learner:
    def __init__(self, iterations=5):
        results = []
        situations = []
        logging.basicConfig()
        for i in range(0, iterations):
            g = Game(print_board=False)
            round_situations = []
            while not g.game_over:
                choices = g.available_cols()
                choice = random.choice(choices)
                round_situations.append(self.game_to_sit(g, choice))
                g.place_piece(choice)
            for situation in round_situations:
                results.append(g.points)
            situations.extend(round_situations)
        #self.pipeline = Pipeline([
        #    ('min/max scaler', MinMaxScaler(feature_range=(0.0, 1.0))),
        #    ('neural network', Regressor(
        self.nn = Regressor(layers=[
                    Layer("Rectifier", units=100),
                    Layer("Linear")],
                learning_rate=0.00002,
                n_iter=10)
        #self.pipeline.fit(np.array(situations), np.array(results))
        print np.array(situations).shape
        self.nn.fit(np.array(situations), np.array(results))
        #self.clf = MLPRegressor(algorithm='l-bfgs', alpha=1e-5,
        #                         hidden_layer_sizes=(5, 2), random_state=1)
        #clf.train(situations, results)

    def game_to_sit(self, game, choice):
        sit = [float(item) / 9 for sublist in game.board for item in sublist]
        sit.append(float(choice) / 7)
        sit.append(float(game.level) / 100)
        assert(float(game.level) / 100)
        sit.append(float(game.pieces_left) / 30)
        return sit
    
    def pick_move(self, game):
        choices = game.available_cols()
        max_choice, max_val = None, 0
        for c in choices:
            sit = np.array([self.game_to_sit(game, c)])
            final_score_predict = self.nn.predict(sit)
            if final_score_predict > max_val:
                max_val = final_score_predict
                max_choice = c
        return max_choice