예제 #1
1
class TestLinearNetwork(unittest.TestCase):

    def setUp(self):
        self.nn = MLPR(layers=[L("Linear")], n_iter=1)

    def test_LifeCycle(self):
        del self.nn

    def test_PredictNoOutputUnitsAssertion(self):
        a_in = numpy.zeros((8,16))
        assert_raises(AssertionError, self.nn.predict, a_in)

    def test_AutoInitializeWithOutputUnits(self):
        self.nn.layers[-1].units = 4
        a_in = numpy.zeros((8,16))
        self.nn.predict(a_in)

    def test_FitAutoInitialize(self):
        a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,4))
        self.nn.fit(a_in, a_out)
        assert_true(self.nn.is_initialized)

    def test_ResizeInputFrom4D(self):
        a_in, a_out = numpy.zeros((8,4,4,1)), numpy.zeros((8,4))
        self.nn.fit(a_in, a_out)
        assert_true(self.nn.is_initialized)

    def test_ResizeInputFrom3D(self):
        a_in, a_out = numpy.zeros((8,4,4)), numpy.zeros((8,4))
        self.nn.fit(a_in, a_out)
        assert_true(self.nn.is_initialized)

    def test_FitWrongSize(self):
        a_in, a_out = numpy.zeros((7,16)), numpy.zeros((9,4))
        assert_raises(AssertionError, self.nn.fit, a_in, a_out)
예제 #2
0
class TestLinearNetwork(unittest.TestCase):
    def setUp(self):
        self.nn = MLPR(layers=[L("Linear")], n_iter=1)

    def test_LifeCycle(self):
        del self.nn

    def test_PredictNoOutputUnitsAssertion(self):
        a_in = numpy.zeros((8, 16))
        assert_raises(AssertionError, self.nn.predict, a_in)

    def test_AutoInitializeWithOutputUnits(self):
        self.nn.layers[-1].units = 4
        a_in = numpy.zeros((8, 16))
        self.nn.predict(a_in)

    def test_FitAutoInitialize(self):
        a_in, a_out = numpy.zeros((8, 16)), numpy.zeros((8, 4))
        self.nn.fit(a_in, a_out)
        assert_true(self.nn.is_initialized)

    def test_ResizeInputFrom4D(self):
        a_in, a_out = numpy.zeros((8, 4, 4, 1)), numpy.zeros((8, 4))
        self.nn.fit(a_in, a_out)
        assert_true(self.nn.is_initialized)

    def test_ResizeInputFrom3D(self):
        a_in, a_out = numpy.zeros((8, 4, 4)), numpy.zeros((8, 4))
        self.nn.fit(a_in, a_out)
        assert_true(self.nn.is_initialized)

    def test_FitWrongSize(self):
        a_in, a_out = numpy.zeros((7, 16)), numpy.zeros((9, 4))
        assert_raises(AssertionError, self.nn.fit, a_in, a_out)
예제 #3
0
    def run_EqualityTest(self, copier, asserter):
        # Only PyLearn2 supports Maxout.
        extra =  ["Maxout"] if sknn.backend.name == 'pylearn2' else []
        for activation in ["Rectifier", "Sigmoid", "Tanh", "ExpLin"] + extra:
            nn1 = MLPR(layers=[L(activation, units=16), L("Linear", units=1)], random_state=1234)
            nn1._initialize(self.a_in, self.a_out)

            nn2 = copier(nn1, activation)
            print('activation', activation)
            a_out1 = nn1.predict(self.a_in)
            a_out2 = nn2.predict(self.a_in)
            print(a_out1, a_out2)
            asserter(numpy.all(nn1.predict(self.a_in) - nn2.predict(self.a_in) < 1E-6))
예제 #4
0
def NeuralNet(train, test, features):
    eta = 0.025
    niter = 2000

    regressor = Regressor(
        layers=[Layer("Rectifier", units=100), Layer("Tanh", units=100), Layer("Sigmoid", units=100), Layer("Linear")],
        learning_rate=eta,
        learning_rule="momentum",
        learning_momentum=0.9,
        batch_size=100,
        valid_size=0.01,
        n_stable=100,
        n_iter=niter,
        verbose=True,
    )

    print regressor.__class__.__name__
    start = time.time()
    regressor.fit(np.array(train[list(features)]), train[goal])
    print "  -> Training time:", time.time() - start

    if not os.path.exists("result/"):
        os.makedirs("result/")
    # TODO: fix this shit
    predictions = regressor.predict(np.array(test[features]))
    try:  # try to flatten a list that might be flattenable.
        predictions = list(itertools.chain.from_iterable(predictions))
    except:
        pass
    csvfile = "result/dat-nnet-eta%s-niter%s.csv" % (str(eta), str(niter))
    with open(csvfile, "w") as output:
        writer = csv.writer(output, lineterminator="\n")
        writer.writerow([myid, goal])
        for i in range(0, len(predictions)):
            writer.writerow([i + 1, predictions[i]])
class ClassificationTools():
	def __init__(self, inputVector=[], outputVector=[], filepath=''):
		if filepath == '':
			self.inputVector = numpy.asarray(inputVector)
			self.outputVector = numpy.asarray(outputVector)
			self.model = None
		else:
			self.model = pickle.load(file(filepath, 'r'))

	def setVectors(self, inputVector, outputVector):
		self.inputVector = numpy.asarray(inputVector)
		self.outputVector = numpy.asarray(outputVector)


	def trainMultilayerPerceptron(self, hlunits=10000, learningRate=0.01, iters=1000):
		# trains a simple MLP with a single hidden layer
		self.model = Regressor(
			layers=[
				Layer("Rectifier", units=hlunits),
				Layer("Linear")],
			learning_rate=learningRate,
			n_iter=iters)
		self.model.fit(self.inputVector, self.outputVector)

	def predict(self, toPredict):
		prediction = self.model.predict(numpy.asarray(toPredict))
		return prediction # this will be a 1D numpy array of floats

	def trainDeepNetwork(self):
		# trains a deep network based a multi layer autoencoder
		# which is then fine tuned using an MLP
		pass

	def serializeModel(self, filepath):
		pickle.dump(self.model, file(filepath, 'w'))
예제 #6
0
    def run_EqualityTest(self, copier, asserter):
        for activation in ["Rectifier", "Sigmoid", "Maxout", "Tanh"]:
            nn1 = MLPR(layers=[L(activation, units=16, pieces=2), L("Linear", units=1)], random_state=1234)
            nn1._initialize(self.a_in, self.a_out)

            nn2 = copier(nn1, activation)
            asserter(numpy.all(nn1.predict(self.a_in) == nn2.predict(self.a_in)))
def neural_net(features,target,test_size_percent=0.2,cv_split=3,n_iter=100,learning_rate=0.01):
    '''Features -> Pandas Dataframe with attributes as columns
        target -> Pandas Dataframe with target column for prediction
        Test_size_percent -> Percentage of data point to be used for testing'''
    scale=preprocessing.MinMaxScaler()
    X_array = scale.fit_transform(features)
    y_array = scale.fit_transform(target)
    mlp = Regressor(layers=[Layer("Rectifier",units=5), # Hidden Layer1
                            Layer("Rectifier",units=3)  # Hidden Layer2
                            ,Layer("Linear")],     # Output Layer
                        n_iter = n_iter, learning_rate=0.01)
    X_train, X_test, y_train, y_test = train_test_split(X_array, y_array.T.squeeze(), test_size=test_size_percent, random_state=4)
    mlp.fit(X_train,y_train)
    test_prediction = mlp.predict(X_test)
    tscv = TimeSeriesSplit(cv_split)
    
    training_score = cross_val_score(mlp,X_train,y_train,cv=tscv.n_splits) 
    testing_score = cross_val_score(mlp,X_test,y_test,cv=tscv.n_splits)
    print"Cross-val Training score:", training_score.mean()
#    print"Cross-val Testing score:", testing_score.mean()
    training_predictions = cross_val_predict(mlp,X_train,y_train,cv=tscv.n_splits)
    testing_predictions = cross_val_predict(mlp,X_test,y_test,cv=tscv.n_splits)
    
    training_accuracy = metrics.r2_score(y_train,training_predictions) 
#    test_accuracy_model = metrics.r2_score(y_test,test_prediction_model)
    test_accuracy = metrics.r2_score(y_test,testing_predictions)
    
#    print"Cross-val predicted accuracy:", training_accuracy
    print"Test-predictions accuracy:",test_accuracy

    plot_model(target,y_train,y_test,training_predictions,testing_predictions)
    return mlp
예제 #8
0
class NeuralRegLearner(object):

    def __init__(self, verbose = False):
        self.name = "Neural net Regression Learner"
        self.network =  Regressor( layers=[
										Layer("Rectifier", units=100),
										Layer("Linear")],
									learning_rate=0.02,
									n_iter=10)

    def addEvidence(self,dataX,dataY):
        """
        @summary: Add training data to learner
        @param dataX: X values of data to add
        @param dataY: the Y training values
        """
        dataX = np.array(dataX)
        dataY = np.array(dataY)
        self.network.fit(dataX, dataY) 
        
    def query(self,points):
        """
        @summary: Estimate a set of test points given the model we built.
        @param points: should be a numpy array with each row corresponding to a specific query.
        @returns the estimated values according to the saved model.
        """
        return self.network.predict(points)
예제 #9
0
def gamma():
    value_map = {'warm': 1.0, 'neutral': 0.5, 'cold': 0.0}

    X = data["x"][:, [0, 1, 2, 5, 6]]
    X = np.abs(X)
    maxX = np.amax(X, axis=0)
    minX = np.amax(X, axis=0)
    X = (X - minX) / maxX
    Y = data["y"][:, 1]
    Y = np.asarray([value_map[y] for y in Y])

    split_data = cross_validation.train_test_split(X, Y, test_size=0.2)
    X_train = split_data[0]
    X_test = split_data[1]
    Y_train = split_data[2]
    Y_test = split_data[3]

    nn = Regressor(
        layers=[
            Layer("Rectifier", units=3),
            Layer("Linear")],
        learning_rate=1e-3,
        n_iter=100)

    nn.fit(X_train, Y_train)

    print 'inosity accuracy'
    prediction = nn.predict(X_test)
    prediction = [closest(y[0]) for y in prediction]
    Y_test = [closest(y) for y in Y_test]
    print metrics.accuracy_score(prediction, Y_test)
예제 #10
0
 def _run(self, activation):
     a_in, a_out = numpy.zeros((8, 32, 16, 1)), numpy.zeros((8, 4))
     nn = MLPR(
         layers=[C(activation, channels=4, kernel_shape=(3, 3), pool_shape=(2, 2), pool_type="mean"), L("Linear")],
         n_iter=1,
     )
     nn.fit(a_in, a_out)
     a_test = nn.predict(a_in)
     assert_equal(type(a_out), type(a_in))
예제 #11
0
    def check(self, a_in, a_out, a_mask):
        nn = MLPR(layers=[L("Linear")], learning_rule='adam', learning_rate=0.1, n_iter=50)
        nn.fit(a_in, a_out, a_mask)
        v_out = nn.predict(a_in)

        # Make sure the examples weighted 1.0 have low error, 0.0 high error.
        print(abs(a_out - v_out).T * a_mask)
        assert_true((abs(a_out - v_out).T * a_mask < 1E-1).all())
        assert_true((abs(a_out - v_out).T * (1.0 - a_mask) > 2.5E-1).any())
예제 #12
0
    def check(self, a_in, a_out, a_mask):
        nn = MLPR(layers=[L("Linear")], learning_rule='adam', learning_rate=0.05, n_iter=250, n_stable=25)
        nn.fit(a_in, a_out, a_mask)
        v_out = nn.predict(a_in)

        # Make sure the examples weighted 1.0 have low error, 0.0 high error.
        masked = abs(a_out - v_out).T * a_mask
        print('masked', masked)
        assert_true((masked < 5.0E-1).all())
        inversed = abs(a_out - v_out).T * (1.0 - a_mask)
        print('inversed', inversed)
        assert_greater(inversed.mean(), masked.mean())
예제 #13
0
def neural_net(features,
               target,
               test_size_percent=0.2,
               cv_split=3,
               n_iter=100,
               learning_rate=0.01):
    '''Features -> Pandas Dataframe with attributes as columns
        target -> Pandas Dataframe with target column for prediction
        Test_size_percent -> Percentage of data point to be used for testing'''
    scale = preprocessing.MinMaxScaler()
    X_array = scale.fit_transform(features)
    y_array = scale.fit_transform(target)
    mlp = Regressor(
        layers=[
            Layer("Rectifier", units=5),  # Hidden Layer1
            Layer("Rectifier", units=3)  # Hidden Layer2
            ,
            Layer("Linear")
        ],  # Output Layer
        n_iter=n_iter,
        learning_rate=0.01)
    X_train, X_test, y_train, y_test = train_test_split(
        X_array,
        y_array.T.squeeze(),
        test_size=test_size_percent,
        random_state=4)
    mlp.fit(X_train, y_train)
    test_prediction = mlp.predict(X_test)
    tscv = TimeSeriesSplit(cv_split)

    training_score = cross_val_score(mlp, X_train, y_train, cv=tscv.n_splits)
    testing_score = cross_val_score(mlp, X_test, y_test, cv=tscv.n_splits)
    print "Cross-val Training score:", training_score.mean()
    #    print"Cross-val Testing score:", testing_score.mean()
    training_predictions = cross_val_predict(mlp,
                                             X_train,
                                             y_train,
                                             cv=tscv.n_splits)
    testing_predictions = cross_val_predict(mlp,
                                            X_test,
                                            y_test,
                                            cv=tscv.n_splits)

    training_accuracy = metrics.r2_score(y_train, training_predictions)
    #    test_accuracy_model = metrics.r2_score(y_test,test_prediction_model)
    test_accuracy = metrics.r2_score(y_test, testing_predictions)

    #    print"Cross-val predicted accuracy:", training_accuracy
    print "Test-predictions accuracy:", test_accuracy

    plot_model(target, y_train, y_test, training_predictions,
               testing_predictions)
    return mlp
예제 #14
0
    def check(self, a_in, a_out, a_mask):
        nn = MLPR(layers=[L("Linear")],
                  learning_rule='adam',
                  learning_rate=0.1,
                  n_iter=50)
        nn.fit(a_in, a_out, a_mask)
        v_out = nn.predict(a_in)

        # Make sure the examples weighted 1.0 have low error, 0.0 high error.
        print(abs(a_out - v_out).T * a_mask)
        assert_true((abs(a_out - v_out).T * a_mask < 1E-1).all())
        assert_true((abs(a_out - v_out).T * (1.0 - a_mask) > 2.5E-1).any())
예제 #15
0
 def _run(self, activation):
     a_in, a_out = numpy.zeros((8, 32, 16, 1)), numpy.zeros((8, 4))
     nn = MLPR(layers=[
         C(activation,
           channels=4,
           kernel_shape=(3, 3),
           pool_shape=(2, 2),
           pool_type='mean'),
         L("Linear")
     ],
               n_iter=1)
     nn.fit(a_in, a_out)
     a_test = nn.predict(a_in)
     assert_equal(type(a_out), type(a_in))
예제 #16
0
class Learner:
    def __init__(self, iterations=5):
        results = []
        situations = []
        logging.basicConfig()
        for i in range(0, iterations):
            g = Game(print_board=False)
            round_situations = []
            while not g.game_over:
                choices = g.available_cols()
                choice = random.choice(choices)
                round_situations.append(self.game_to_sit(g, choice))
                g.place_piece(choice)
            for situation in round_situations:
                results.append(g.points)
            situations.extend(round_situations)
        #self.pipeline = Pipeline([
        #    ('min/max scaler', MinMaxScaler(feature_range=(0.0, 1.0))),
        #    ('neural network', Regressor(
        self.nn = Regressor(layers=[
                    Layer("Rectifier", units=100),
                    Layer("Linear")],
                learning_rate=0.00002,
                n_iter=10)
        #self.pipeline.fit(np.array(situations), np.array(results))
        print np.array(situations).shape
        self.nn.fit(np.array(situations), np.array(results))
        #self.clf = MLPRegressor(algorithm='l-bfgs', alpha=1e-5,
        #                         hidden_layer_sizes=(5, 2), random_state=1)
        #clf.train(situations, results)

    def game_to_sit(self, game, choice):
        sit = [float(item) / 9 for sublist in game.board for item in sublist]
        sit.append(float(choice) / 7)
        sit.append(float(game.level) / 100)
        assert(float(game.level) / 100)
        sit.append(float(game.pieces_left) / 30)
        return sit
    
    def pick_move(self, game):
        choices = game.available_cols()
        max_choice, max_val = None, 0
        for c in choices:
            sit = np.array([self.game_to_sit(game, c)])
            final_score_predict = self.nn.predict(sit)
            if final_score_predict > max_val:
                max_val = final_score_predict
                max_choice = c
        return max_choice
예제 #17
0
    def check(self, a_in, a_out, a_mask):
        nn = MLPR(layers=[L("Linear")],
                  learning_rule='adam',
                  learning_rate=0.05,
                  n_iter=250,
                  n_stable=25)
        nn.fit(a_in, a_out, a_mask)
        v_out = nn.predict(a_in)

        # Make sure the examples weighted 1.0 have low error, 0.0 high error.
        masked = abs(a_out - v_out).T * a_mask
        print('masked', masked)
        assert_true((masked < 4.0E-1).all())
        inversed = abs(a_out - v_out).T * (1.0 - a_mask)
        print('inversed', inversed)
        assert_greater(inversed.mean(), masked.mean())
예제 #18
0
class RegressorNeuralNet():
	def __init__(self):
		self.nn = Regressor(
		    layers=[
		        Layer("Sigmoid", units=100),
		        Layer("Sigmoid", units=47),
		        Layer("Linear")],
		    learning_rate=0.02,
		    n_iter=200)

	def train(self):
		data = parser.load_echo_data('data/training_data.csv')
		self.nn.fit(data.data, data.target)

	def predictData(self, data):
		return self.nn.predict(data)
예제 #19
0
파일: neural.py 프로젝트: adshi/APM-project
def test():
    boom = pd.read_csv('boommagain.csv')
    boom = boom.drop(['Unnamed: 0', 'Unnamed: 0.1'], axis = 1)
    words = boom[list(boom.columns.values)[8:89]]
    y = boom['Rating']
    x = words
    print(y.shape, type(y))
    print(y[:10])
    print(x.shape, type(x))

    X_train, X_test, y_train, y_test = train_test_split(x, y, test_size = 0.4, random_state = 1)

    X_train = X_train.as_matrix()
    y_train = y_train.as_matrix()
    y_test = y_test.as_matrix()
    X_test = X_test.as_matrix()
    print(y_train[:10])
    print(X_train.shape, type(X_train))
    print(y_train.shape, type(y_train))

    y_test = Series(y_test)
    squared = [pow(x, 2) for x in y_test]
    avg = mean(squared)
    RMSE = sqrt(avg)
    print(RMSE)

    for num_nodes in [20]:
        for epoch in [50]:
            nn = Regressor(
            layers=[
                Layer("Sigmoid", units=num_nodes),
                Layer("Softmax")],
            learning_rate=0.01,
            n_iter=epoch)
            nn.fit(X_train, y_train)
            y_pred = nn.predict(X_test)
            print(y_pred.shape)
            # y_pred = Series(y_pred)
            # y_test = Series(y_test)
            # diff = [y_pred[i] - y_test[i] for i in range(len(y_pred))]
            # squared = [pow(x, 2) for x in diff]
            # avg = mean(squared)
            # RMSE = sqrt(avg)
            # print(RMSE)

            print(metrics.r2_score(y_test, y_pred))
예제 #20
0
def colour():
    Y = data["y"][:, 2]
    vals = np.unique(Y);
    value_map = {};
    for i in range(0, len(vals)):
        value_map[vals[i]] = (0.0 + i) / (len(vals) - 1)
    value_values = value_map.values()
    keys = value_map.keys()

    Ya = []
    for a in Y:
        k = []
        for i in range(0, len(keys)):
            k.append(0.0)
        k[keys.index(a)] = 1.0
        Ya.append(k)
    Y = np.asarray(Ya)
    
    X = data["x"][:, [0, 1, 2, 3, 4, 5, 6, 7]]
    X = np.abs(X)
    maxX = np.amax(X, axis=0)
    minX = np.amax(X, axis=0)
    X = (X - minX) / maxX

    split_data = cross_validation.train_test_split(X, Y, test_size=0.2)
    X_train = split_data[0]
    X_test = split_data[1]
    Y_train = split_data[2]
    Y_test = split_data[3]

    nn = Regressor(
        layers=[
            Layer("Linear", units=9),
            Layer("Softmax", units=9)],
        learning_rate=5e-2,
        n_iter=100)

    nn.fit(X_train, Y_train)

    print 'colour accuracy'
    prediction = nn.predict(X_test)
    prediction = [np.argmax(y) for y in prediction]
    Y_test = [np.argmax(y) for y in Y_test]
    print metrics.accuracy_score(prediction, Y_test)
예제 #21
0
def evalOne(parameters):
    all_obs = []
    all_pred = []
    for location in locations:
        trainX, testX, trainY, testY = splitDataForXValidation(
            location, "location", data, all_features, "target")
        normalizer_X = StandardScaler()
        trainX = normalizer_X.fit_transform(trainX)
        testX = normalizer_X.transform(testX)
        normalizer_Y = StandardScaler()
        trainY = normalizer_Y.fit_transform(trainY)
        testY = normalizer_Y.transform(testY)

        layers = []
        for _ in range(0, parameters["hidden_layers"]):
            layers.append(
                Layer(parameters["hidden_type"],
                      units=parameters["hidden_neurons"]))
        layers.append(Layer("Linear"))
        model = Regressor(layers=layers,
                          learning_rate=parameters["learning_rate"],
                          n_iter=parameters["iteration"],
                          random_state=42)

        X = np.array(trainX)
        y = np.array(trainY)

        model.fit(X, y)

        model.fit(trainX, trainY)
        prediction = model.predict(testX)
        prediction = normalizer_Y.inverse_transform(prediction)
        testY = normalizer_Y.inverse_transform(testY)

        print("location: " + str(location) + " -> " +
              str(rmseEval(prediction, testY)[1]))

        all_obs.extend(testY)
        all_pred.extend(prediction)

    return rmseEval(all_obs, all_pred)[1]
    def classify(self, x_train, y_train, x_test):
        x_train = np.array(x_train)
        y_train = np.array(y_train)
        x_test = np.array(x_test)
        # nn = Classifier(
        #    layers=[
        #        Layer("Maxout", units=100, pieces=2),
        #        Layer("Softmax")],
        #    learning_rate=0.001,
        #    n_iter=25)
        # nn.fit(x_train, y_train)
        # y_test = nn.predict(np.array(x_test))

        nn = Regressor(layers=[Layer('Rectifier', units=400), Layer('Linear')], learning_rate=0.02, n_iter=10)
        log_to_info('Fitting a NN to labeled training data...')
        nn.fit(np.array(x_train), np.array(y_train))
        log_to_info('Predicting test value')
        y_test = nn.predict(np.array(x_test))
        log_to_info('Done!')

        return y_test
예제 #23
0
def NeuralNet(train,test,features):
    eta = 0.025
    niter = 2000

    regressor = Regressor(
                      layers=[
                          Layer('Rectifier', units=100),
                          Layer("Tanh", units=100),
                          Layer("Sigmoid", units=100),
                          Layer('Linear')],
                      learning_rate=eta,
                      learning_rule='momentum',
                      learning_momentum=0.9,
                      batch_size=100,
                      valid_size=0.01,
                      n_stable=100,
                      n_iter=niter,
                      verbose=True)

    print regressor.__class__.__name__
    start = time.time()
    regressor.fit(np.array(train[list(features)]), train[goal])
    print '  -> Training time:', time.time() - start

    if not os.path.exists('result/'):
        os.makedirs('result/')
    # TODO: fix this shit
    predictions = regressor.predict(np.array(test[features]))
    try: # try to flatten a list that might be flattenable.
        predictions = list(itertools.chain.from_iterable(predictions))
    except:
        pass
    csvfile = 'result/dat-nnet-eta%s-niter%s.csv' % (str(eta),str(niter))
    with open(csvfile, 'w') as output:
        writer = csv.writer(output, lineterminator='\n')
        writer.writerow([myid,goal])
        for i in range(0, len(predictions)):
            writer.writerow([i+1,predictions[i]])
예제 #24
0
class DeepNeuralNetwork(object):

    def __init__(self, params=None, seq_pre_processor=None): 	              
                 
        self.scale = StandardScaler()                          
        self.pre_processor = seq_pre_processor
        self.params = params      
        
        if params != None:
            # Initialize the network
            self.net = Regressor(layers=params['layers'], learning_rate=params['learning_rate'], n_iter=params['n_iter'], dropout_rate=params['dropout_rate'],
                                     batch_size=params['batch_size'], regularize=params['regularize'], valid_size=params['valid_size'])

            # Initialize the vectorizer
            self.vectorizer = graph.Vectorizer(r=params['radius'], d=params['d_seq'], min_r=params['min_r'], normalization=params['normalization'], 
                                                    inner_normalization=params['inner_normalization'], nbits=params['nbits_seq'])  
                                 

    # Save the neural network object to the model_name path
    def save(self, model_name=None):        

        joblib.dump(self, model_name , compress=1)

    # Loads the neural network object from its respective path
    def load(self, model_name=None):

        self.__dict__.update(joblib.load(model_name).__dict__)  

    # Converts sequences to matrix
    def seq_to_data_matrix(self, sequences=None):               
                
        # Transform sequences to matrix
        graphs = mp_pre_process(sequences, pre_processor=self.pre_processor, pre_processor_args={}, n_jobs=-1)	        
                      
        seq_data_matrix = vectorize(graphs, vectorizer=self.vectorizer, n_jobs=-1)                

        # Densify the matrix
        seq_data_matrx = seq_data_matrix.toarray()

        # Standardize the matrix
        self.scale.fit(seq_data_matrx)
        std_seq_data_matrx = self.scale.transform(seq_data_matrx)        

        return std_seq_data_matrx

    # Training the network using traing sequences and the train structure matrix
    def fit(self, sequences=None, X_struct_std_train=None):

        # Convert sequences to data matrix
        X_seq_std_train = self.seq_to_data_matrix(sequences)    

        # Train the network
        self.net.fit(X_seq_std_train, X_struct_std_train)        

    # Predict the structure data matrix using testing sequences
    def predict(self, sequences=None):        

        # Convert sequences to data matrix
        X_seq_std_test = self.seq_to_data_matrix(sequences)
        
        # Predict the output matrix
        pred_data_matrix_out = self.net.predict(X_seq_std_test)             

        return pred_data_matrix_out                     

    # Function to train the network and predict the testing data
    def fit_predict(self, sequences_train=None, sequences_test=None, struct_matrix_train=None):                                               

        # Training the network using training sequences and the train structure matrix
        self.fit(sequences_train, struct_matrix_train)

        #Transform seq features to struct features using testing sequences
        return self.predict(sequences_test)                        
예제 #25
0
err_mse=[]  
for dim in 6*10**np.arange(1,6):    
    [train_set, valid_set,test_set]=rand_images(dim)    
    test_set_x, test_set_y = shape(test_set)
    valid_set_x, valid_set_y = shape(valid_set)
    train_set_x, train_set_y = shape(train_set)
    sz=np.shape(train_set_x)
    nn = Regressor(
        layers=[
            Layer("Tanh", units=sz[1]),
            Layer("Linear")],
            learning_rate=0.02,
            n_iter=10,
            batch_size=20)
    nn.fit(train_set_x, train_set_y)
    y_pred=nn.predict(test_set_x)
    # compute mse error
    print '.... computing error of prediction for the dataset size', str(dim) 
    rel_error=[]
    I=0
    for x in test_set_y:
        if x!=0:
            rel_error.append(np.abs(y_pred[I]-x)/np.abs(x))
        else:
            rel_error.append(np.abs(y_pred[I]-x))
        I=I+1    
    err_mse.append(np.mean(rel_error))  
    #err_mse.append(np.mean((y_pred-test_set_y)**2))
    print err_mse
f = gzip.open('mlp_errors.pkl.gz','wb')
cPickle.dump(err_mse, f, protocol=2)
예제 #26
0
    nn = Regressor(
        layers=[Layer("Rectifier", units=30),
                Layer("Linear")],
        learning_rate=0.01,
        batch_size=100,
        #learning_rule = "momentum",
        n_iter=2000,
        valid_size=0.25)
    # Training
    nn.fit(X_train, Y_train)
    pickle.dump(nn, open('autoencoder.pkl', 'wb'))
if not runTraining:
    nn = pickle.load(open('autoencoder.pkl', 'rb'))

# Testing
predicted_same = nn.predict(X_train)
predicted_diff = nn.predict(X_test)
predicted_signal = nn.predict(X_signal)

# Reconstruction error
rec_errors_same = reconstructionError(X_train, predicted_same)
rec_errors_diff = reconstructionError(X_test, predicted_diff)
rec_errors_sig = reconstructionError(X_signal, predicted_signal)

# Reconstruction errors by variable
rec_errors_varwise_same = reconstructionErrorByFeature(X_train, predicted_same)
rec_errors_varwise_diff = reconstructionErrorByFeature(X_test, predicted_diff)
rec_errors_varwise_sig = reconstructionErrorByFeature(X_signal,
                                                      predicted_signal)

# Plotting - reconstruction errors
예제 #27
0
파일: mapping.py 프로젝트: zted/multimodal

#define CCA
if model == 'CCA':
    from sklearn.cross_decomposition import CCA
    nn = CCA(copy=True, max_iter=500, n_components=1, scale=False, tol=1e-06)
    nn.fit(X_train, y_train)


#TODO: add autoencoder-pretrain


######################
# PREDICTION         #
######################
y_predicted = nn.predict(X_test)  # predict


#################
#  EVALUATION   # (to evaluate how well the REGRESSION did). For now we evaluate in the TRAINING DATA
#################
#TEST DATA
# R^2 measure
R2 = nn.score(X_test, y_test)
print('R^2_test= ',R2) #evaluating predictions with R^2
# My EVALUATION metric (mean cosine similarity)
cos = 0
for i in range(1,y_test.shape[0]):
    #cos = cos + np.dot(np.array(y_predicted[i,]), np.array(y_test[i,]))/ (np.linalg.norm(np.array(y_test[i,])) * np.linalg.norm(np.array(y_predicted[i,])))
    cos = cos + np.dot(y_predicted[i,], y_test[i,]) / (np.linalg.norm(y_test[i,]) * np.linalg.norm(y_predicted[i,]))
meanCos = cos/y_train.shape[0]
mean_squared_error_DF_CV = mean_squared_error(
    predict_DF_RF_CV["AC_cons"], predict_DF_RF_CV["AC_ConsPred_RF_CV"])
coeff_variation_DF_CV = np.sqrt(
    mean_squared_error_DF_CV) / predict_DF_RF_CV["AC_cons"].mean()

from sknn.mlp import Regressor, Layer
reg_NN = Regressor(
    layers=[
        Layer("Rectifier", units=5),  # Hidden Layer1
        Layer("Rectifier", units=3),  # Hidden Layer2
        Layer("Linear")
    ],  # Output Layer
    n_iter=100,
    learning_rate=0.02)
reg_NN.fit(X_train_norm.as_matrix(), y_train_norm.as_matrix())
predict_DF_NN = reg_NN.predict(X_test_norm.as_matrix())

predict_DF_NN_CV = pd.DataFrame(predict_DF_NN,
                                index=y_test_norm.index,
                                columns=["AC_ConsPred_NN_CV"])
predict_DF_NN_CV = predict_DF_NN_CV.join(y_test_norm).dropna()
predict_DF_NN_CV['2014-08-01':'2014-08-20'].plot()

R2_score_DF_NN_CV = r2_score(predict_DF_NN_CV["AC_cons"],
                             predict_DF_NN_CV["AC_ConsPred_NN_CV"])
mean_absolute_error_DF_CV = mean_absolute_error(
    predict_DF_NN_CV["AC_cons"], predict_DF_NN_CV["AC_ConsPred_NN_CV"])
mean_squared_error_DF_CV = mean_squared_error(
    predict_DF_NN_CV["AC_cons"], predict_DF_NN_CV["AC_ConsPred_NN_CV"])
coeff_variation_DF_CV = np.sqrt(
    mean_squared_error_DF_CV) / predict_DF_NN_CV["AC_cons"].mean()
trainSet = pd.read_csv('IOFolder\digit-train.csv', encoding="ISO-8859-1")
testSet = pd.read_csv('IOFolder\digit-test.csv', encoding="ISO-8859-1")

trainSetFeatures = trainSet.drop(['label'], axis=1).values #id and relevance is not features to use
trainSetLabels = trainSet['label'].values

testSetFeatures = testSet.drop(['label'], axis=1).values
testSetLabels = testSet['label'].values


print "\nBegin training..."
#train the model
hidLayer1=Layer(type="Sigmoid",units=10)
outputLayer=Layer(type="Softmax",units=1)
network_topology=[hidLayer1,outputLayer]
feedforwardNN=Regressor(layers=network_topology)
print "\nBegin fit..."
feedforwardNN.fit(trainSetFeatures,trainSetLabels)


print "\nBegin prediction..."
#make the prediction on the test set
predictedLabels = feedforwardNN.predict(testSetFeatures)

print "\nOutput the result..."
#output the prediction
testSetId = testSet['id']
pd.DataFrame({"id": testSetId, "relevance": predictedLabels}).to_csv('IOFolder/neural_network_results.csv', index=False)

print "RMSE :\t", utils.getRMSE(testSetLabels, predictedLabels)
print "MAE :\t", utils.getMAE(testSetLabels, predictedLabels)
예제 #30
0
else:
    print "training %s" % forward_df
    forward = Regressor(
        layers=[
            Layer(HIDDEN_LYR_ACT, units=N_HIDDEN),
            Layer("Linear", units=Y_forward_train.shape[1], dropout=DROPOUT),
        ],
        # learning_rate=0.02, verbose=True)
        learning_rate=LR,
        n_iter=N_ITER,
        verbose=True,
    )
    forward.fit(X_forward_train, Y_forward_train)
    joblib.dump(forward, forward_df)

Y_forward_pred = forward.predict(X_forward_test)

numcomps = Y_forward_pred.shape[1]
for i in range(Y_forward_pred.shape[1]):
    plt.subplot(numcomps, 1, i + 1)
    plt.plot(Y_forward_pred[:, i], "k-", label="prediction")
    plt.plot(Y_forward_test[:, i], "k--", label="target")
    plt.legend()
plt.show()

################################################################################
# inverse
inverse_df = "point_mass_lr_inverse_model"

if os.path.exists(inverse_df):
    print "loading %s" % inverse_df
예제 #31
0
class Learn:
    
    nx = 20
    ny = 20
    n_cell = nx * ny
    n_coups = 8
    coups_sautes = 60

    def __init__(self, new=False, display=False):
        self.possibilities = generate(Learn.n_coups)
        np.random.shuffle(self.possibilities)
        self.explore = 0.
        self.jeu = MJ.Jeu(autorepeat=False, display=display)
        self.jeu.restart(Learn.coups_sautes)
        self.image = self.get_image()
        if new:
            self.nn = Regressor(layers=[Layer("Linear", units=(Learn.n_cell+Learn.n_coups)), Layer("Sigmoid", units=1000), Layer("Sigmoid")], learning_rate=0.01, n_iter=1)
            self.nn.fit(self.good_shape(self.image, self.possibilities[Learn.n_coups/2 - 1]), np.array([[0]]))
        else:
            self.nn = pickle.load(open('nn.pkl', 'rb'))
            self.nn.fit(self.good_shape(self.image, self.possibilities[Learn.n_coups/2 - 1]), np.array([[1]]))
        self.current_data_set = []

    def good_shape(self, image, instructions):#instructions est un tableau de 0 et 1 à n_coups éléments
        tab = np.zeros((1, (Learn.n_cell + Learn.n_coups)))
        tab[0, ::] = np.append(image.flatten(), instructions)
        return 10 * tab

    def play(self, num_iter=1000):
        self.set_display(True)
        predicted_outcome = np.zeros(2**Learn.n_coups)
        for s in xrange(num_iter):
            self.image = self.get_image()
            if (s%100 == 0):
                print s
            outcome = 1
            indice_max=0
            for j, elt in enumerate(self.possibilities):
                a = self.nn.predict(self.good_shape(self.image, elt))[0][0]
                predicted_outcome[j] = a
                if a>0.99:
                    i=j
                    break
                elif (a>predicted_outcome[indice_max]):
                    indice_max=j
            i=indice_max
            elt = self.possibilities[i][0]
            if (outcome == 1):
                if elt == 1:
                    instr = 'd'
                elif elt == 0:
                    instr = 'q'
                if (self.jeu.update_all(instr) == "Dead"):
                    outcome = 0
                    self.jeu.restart(Learn.coups_sautes)   
  
        
    def auc(self, num_iter=10000):
        real_outputs = []
        predicted_outputs = []
        for s in xrange(num_iter):
            outcome = 1
            poss = self.possibilities
            i = rd.randint(0, len(poss)-1)
            elt = poss[i]
            predicted_outputs.append(self.nn.predict(self.good_shape(self.image, elt))[0])
            r = rd.random()
            if (r < 0.8):
                i = int(rd.random() * 2**(Learn.n_coups))
            for elt in self.possibilities[i]:
                if (outcome == 1):
                    if elt:
                        instr = 'd'
                    else:
                        instr = 'q'
                    if (self.jeu.update_all(instr) == "Dead"):
                        outcome = 0
                        self.jeu.restart(Learn.coups_sautes)
            real_outputs.append(outcome)
            self.image = self.get_image()        
        fpr, tpr, thresholds = metrics.roc_curve(real_outputs, predicted_outputs)
        return metrics.auc(fpr, tpr)
    
    
    def benchmark(self, num_iter=1000):
        temps_total = []
        t = 0
        while t < num_iter:
            self.jeu.restart(Learn.coups_sautes)
            while(True):
                r = rd.random()
                if r < 0.5:
                    instr = 'q'
                else:
                    instr = 'd'
                if self.jeu.update_all(instr) == "Dead":
                    t += 1
                    temps_total.append(self.jeu.temps)
                    self.jeu.restart()
                    break
        self.image = self.get_image()
        print str(sum(temps_total)*1. / num_iter) +" +/- "+ str(0.96 * np.sqrt(np.var(np.array(temps_total)) / num_iter)  )


    def get_image(self):
        nx_im, ny = 2 * Learn.nx, Learn.ny
        tab = np.ones((nx_im, ny))/2.
        x, y = self.jeu.joueur.position
        x,y=self.jeu.joueur.position
        for elt in self.jeu.missiles:
            x_m, y_m = elt.position
            x_p = x_m - (x - 0.5) 
            if (y_m < 0.5):
                tab[int(nx_im * x_p) % nx_im, int(ny * y_m)] = -1

        return tab[10:30, ::]

    def set_display(self, boolean):
        self.display = boolean
        self.jeu = MJ.Jeu(autorepeat=False, display=self.display)
        self.jeu.restart(Learn.coups_sautes)
        self.image = self.get_image()

    def save_rd_train_set(self, num_iter=5000): # returns a set of situations, choice sequences, and outcomes
        #self.jeu.rand_init(40)
        train_set = []
        for i in xrange(num_iter):
            self.jeu.restart(100)
            im = self.get_image()
            choice = self.possibilities[rd.randint(0, 2**Learn.n_coups-1)]
            outcome = 1
            #print [b.position for b in self.jeu.missiles]
            for elt in choice:
                if (outcome == 1):
                    if elt:
                        instr = 'd'
                    else:
                        instr = 'q'
                    if (self.jeu.update_all(instr) == "Dead"):
                        outcome = 0
            train_set.append((im, choice, outcome))
        self.current_data_set = train_set
        return
        
    def intensive_train(self): 
        for training in self.current_data_set:
            im, choice, outcome = training
            self.nn.fit(self.good_shape(im, choice), np.array([[outcome]]))
        print "Commence à sauver"
        pickle.dump(self.nn, open('nn.pkl', 'wb'))
        print "NN Saved"
            
    def error_on_train_set(self):
        error = 0.
        for training in self.current_data_set:
            im, choice, outcome=training
            s = self.nn.predict(self.good_shape(im,choice))
            error += abs(s[0][0]-outcome)
        error = error / len(train_set)
        return error
        
    def auc_on_train_set(self):
        real_outputs = []    
        predicted_outputs = []
        for training in self.current_data_set:
            im, choice, outcome = training
            predicted_outputs.append(self.nn.predict(self.good_shape(im, choice))[0])
            real_outputs.append(outcome)
        fpr, tpr, thresholds = metrics.roc_curve(real_outputs, predicted_outputs)
        return metrics.auc(fpr, tpr)
예제 #32
0
def NN_compare(initial_position, start, N, x, training_input, training_output,
               data_to_learn, deadrec_x, deadrec_y, x_coords, y_coords):
    # generate the path data using a Neural Net and compare to the LWR results

    # initialize and instantiate training and test data
    X_train = training_output
    y_train = training_input
    X_to_train = data_to_learn

    # initialize neural network
    nn = Regressor(layers=[Layer("Rectifier", units=100),
                           Layer("Linear")],
                   learning_rate=0.02,
                   n_iter=10)

    # train the neural network
    nn.fit(X_train, y_train)

    # use freshly-trained neural network to predict output values
    nn_output2 = []
    for i in range(len(X_to_train)):
        X = X_to_train[i]
        X = X.reshape((1, 2))
        y = nn.predict(X)
        nn_output2.append(y[0].tolist())

    # transform the deltas into positions for plotting and comparison
    NN_pose = []
    NN_pose.append(
        [initial_position[1], initial_position[2], initial_position[3]])
    NN_x_coords = []
    NN_y_coords = []
    NN_theta_coords = []
    dists2 = []
    angs2 = []
    for i, [dist, ang] in enumerate(nn_output2):
        x_coord = NN_pose[-1][0] + dist * np.cos(NN_pose[-1][2])
        NN_x_coords.append(x_coord)

        y_coord = NN_pose[-1][1] + dist * np.sin(NN_pose[-1][2])
        NN_y_coords.append(y_coord)

        theta_coord = NN_pose[-1][2] + ang
        NN_theta_coords.append(theta_coord)

        NN_pose.append([x_coord, y_coord, theta_coord])
        dists2.append(dist)
        angs2.append(ang)

    # plot the comparison data
    plt.figure(figsize=(15, 10))
    plt.plot(x[start:start + N, 1],
             x[start:start + N, 2],
             c='green',
             label='Groundtruth')  # plot original data in background
    plt.plot(deadrec_x, deadrec_y, c='cyan',
             label='Dead Reckoning')  # plot deadreckoning results
    plt.plot(x_coords, y_coords, c='red', label='LWR')  # plot learned results
    plt.plot(NN_x_coords, NN_y_coords, c='blue',
             label='Neural Net')  # plot learned results
    plt.title(
        "Learned, Dead-Reckoned, & Groundtruth Path Data - Sequential Data")
    plt.legend()
    # plt.show()

    return 0
예제 #33
0
numiteraciones = 9000

redneural = Regressor(layers=[
    Layer("ExpLin", units=neurones),
    Layer("ExpLin", units=neurones),
    Layer("Linear")
],
                      learning_rate=tasaaprendizaje,
                      n_iter=numiteraciones)
redneural.fit(capasinicio, capasalida)

capasinicio1 = TodasEstaciones.ix['2010-01-01':'2010-12-31'].as_matrix(
)[:, [0, 2]]
valor1 = ([])
for i in range(capasinicio1.shape[0]):
    prediccion = redneural.predict(np.array([capasinicio1[i, :].tolist()]))
    valor1.append(prediccion[0][0])

TodasEstaciones['Est2_Completed'] = TodasEstaciones['Est2']
TodasEstaciones['Est2_Completed'].ix['2010-01-01':'2010-12-31'] = valor1

fig, axs = plt.subplots(4, 1, sharex=True)

fig.subplots_adjust(hspace=0)
axs[0].plot(TodasEstaciones['Est1'].ix['1983-08-02':'2014-04-30'],
            label='PorvenirCompl')
axs[0].legend(loc=2)
axs[1].plot(TodasEstaciones['Est2'].ix['1983-08-02':'2014-04-30'],
            label='sanantonio',
            color='g')
axs[1].legend(loc=2)
예제 #34
0
class Embedder2D(object):
    """
    Transform a set of high dimensional vectors to a set of two dimensional vectors.

    Take in input list of selectors, then for each point find the closest selected instance and materialize
    an edge between the two. Finally output 2D coordinates of the corresponding graph embedding using the sfdp
    Graphviz algorithm.

    """
    def __init__(self,
                 compiled=False,
                 learning_rate=0.002,
                 n_layers=1,
                 n_features_hidden_factor=10,
                 selectors=[QuickShiftSelector()],
                 n_nearest_neighbors=10,
                 n_links=1,
                 layout='force',
                 layout_prog='sfdp',
                 layout_prog_args='-Goverlap=scale',
                 n_eigenvectors=10,
                 random_state=1,
                 metric='rbf',
                 **kwds):
        self.compiled = compiled
        self.learning_rate = learning_rate
        self.n_layers = n_layers
        self.n_features_hidden_factor = n_features_hidden_factor
        self.selectors = selectors
        self.n_nearest_neighbors = n_nearest_neighbors
        self.n_links = n_links
        self.layout = layout
        self.layout_prog = layout_prog
        self.layout_prog_args = layout_prog_args
        self.n_eigenvectors = n_eigenvectors
        self.metric = metric
        self.kwds = kwds
        self.random_state = random_state
        self.selected_instances_list = []
        self.selected_instances_ids_list = []

    def __repr__(self):
        serial = []
        serial.append('Embedder2D:')
        if self.compiled is True:
            serial.append('compiled: yes')
            serial.append('learning_rate: %.6f' % self.learning_rate)
            serial.append('n_features_hidden_factor: %d' %
                          self.n_features_hidden_factor)
        else:
            serial.append('compiled: no')
        serial.append('layout: %s' % (self.layout))
        serial.append('layout_prog: %s' % (self.layout_prog))
        if self.layout_prog_args:
            serial.append('layout_prog_args: %s' % (self.layout_prog_args))
        serial.append('n_links: %s' % (self.n_links))
        if self.n_nearest_neighbors is None:
            serial.append('n_nearest_neighbors: None')
        else:
            serial.append('n_nearest_neighbors: %d' % self.n_nearest_neighbors)
        serial.append('metric: %s' % self.metric)
        if self.kwds is None or len(self.kwds) == 0:
            pass
        else:
            serial.append('params:')
            serial.append(serialize_dict(self.kwds))
        serial.append('selectors [%d]:' % len(self.selectors))
        for i, selector in enumerate(self.selectors):
            if len(self.selectors) > 1:
                serial.append('%d/%d  ' % (i + 1, len(self.selectors)))
            serial.append(str(selector))
        return '\n'.join(serial)

    def fit(self, data_matrix, target=None):
        if self.compiled is True:
            return self.fit_compiled(data_matrix, target=target)
        else:
            return self._fit(data_matrix, target=target)

    def transform(self, data_matrix):
        if self.compiled is True:
            return self.transform_compiled(data_matrix)
        else:
            return self._transform(data_matrix)

    def fit_transform(self, data_matrix, target=None):
        if self.compiled is True:
            return self.fit_transform_compiled(data_matrix, target=target)
        else:
            return self._fit_transform(data_matrix, target=target)

    def fit_compiled(self, data_matrix_in, target=None):
        data_matrix_out = self._fit_transform(data_matrix_in, target=target)
        n_features_in = data_matrix_in.shape[1]
        n_features_out = data_matrix_out.shape[1]
        n_features_hidden = int(n_features_in * self.n_features_hidden_factor)
        layers = []
        for i in range(self.n_layers):
            layers.append(
                Layer("Rectifier",
                      units=n_features_hidden,
                      name='hidden%d' % i))
        layers.append(Layer("Linear", units=n_features_out))
        self.net = Regressor(layers=layers,
                             learning_rate=self.learning_rate,
                             valid_size=0.1)
        self.net.fit(data_matrix_in, data_matrix_out)
        return self.net

    def transform_compiled(self, data_matrix):
        return self.net.predict(data_matrix)

    def fit_transform_compiled(self, data_matrix, target=None):
        self.fit_compiled(data_matrix, target=target)
        return self.transform_compiled(data_matrix)

    def _fit(self, data_matrix, target=None):
        # find selected instances
        self.selected_instances_list = []
        self.selected_instances_ids_list = []
        for i, selector in enumerate(self.selectors):
            selected_instances = selector.fit_transform(data_matrix,
                                                        target=target)
            selected_instances_ids = selector.selected_instances_ids
            self.selected_instances_list.append(selected_instances)
            self.selected_instances_ids_list.append(selected_instances_ids)
        return self

    def _fit_transform(self, data_matrix, target=None):
        return self._fit(data_matrix, target=target)._transform(data_matrix)

    def _transform(self, data_matrix):
        # make a graph with instances as nodes
        graph = self._init_graph(data_matrix)
        if self.n_links > 0:
            # find the closest selected instance and instantiate knn edges
            for selected_instances, selected_instances_ids in \
                    zip(self.selected_instances_list, self.selected_instances_ids_list):
                if len(selected_instances) > 2:
                    graph = self._selection_knn_links(graph, data_matrix,
                                                      selected_instances,
                                                      selected_instances_ids)
        self.graph = graph
        # use graph layout
        embedded_data_matrix = self._graph_layout(graph)
        # normalize display using 2D PCA
        embedded_data_matrix = PCA(
            n_components=2).fit_transform(embedded_data_matrix)
        return embedded_data_matrix

    def _kernel_shift_links(self, data_matrix):
        data_size = data_matrix.shape[0]
        kernel_matrix = pairwise_kernels(data_matrix,
                                         metric=self.metric,
                                         **self.kwds)
        # compute instance density as average pairwise similarity
        density = np.sum(kernel_matrix, 0) / data_size
        # compute list of nearest neighbors
        kernel_matrix_sorted = np.argsort(-kernel_matrix)
        # make matrix of densities ordered by nearest neighbor
        density_matrix = density[kernel_matrix_sorted]
        # if a denser neighbor cannot be found then assign link to the instance itself
        link_ids = list(range(density_matrix.shape[0]))
        # for all instances determine link link
        for i, row in enumerate(density_matrix):
            i_density = row[0]
            # for all neighbors from the closest to the furthest
            for jj, d in enumerate(row):
                # proceed until n_nearest_neighbors have been explored
                if self.n_nearest_neighbors is not None and jj > self.n_nearest_neighbors:
                    break
                j = kernel_matrix_sorted[i, jj]
                if jj > 0:
                    j_density = d
                    # if the density of the neighbor is higher than the density of the instance assign link
                    if j_density > i_density:
                        link_ids[i] = j
                        break
        return link_ids

    def _init_graph(self, data_matrix):
        graph = nx.Graph()
        graph.add_nodes_from(range(data_matrix.shape[0]))
        self.link_ids = self._kernel_shift_links(data_matrix)
        for i, link in enumerate(self.link_ids):
            graph.add_edge(i, link)
        return graph

    def _selection_knn_links(self, graph, data_matrix, selected_instances,
                             selected_instances_ids):
        n_neighbors = min(self.n_links, len(selected_instances))
        nn = NearestNeighbors(n_neighbors=n_neighbors)
        nn.fit(selected_instances)
        knns = nn.kneighbors(data_matrix, return_distance=0)
        for i, knn in enumerate(knns):
            # add edges to the knns
            for id in knn:
                original_id = selected_instances_ids[id]
                graph.add_edge(i, original_id)
        return graph

    def _graph_layout(self, graph):
        if self.layout == 'force':
            return self._layout_force(graph)
        elif self.layout == 'laplacian':
            return self._layout_laplacian(graph)
        else:
            raise Exception('Unknown layout type: %s' % self.layout)

    def _layout_force(self, graph):
        two_dimensional_data_matrix = nx.graphviz_layout(
            graph, prog=self.layout_prog, args=self.layout_prog_args)
        two_dimensional_data_list = [
            list(two_dimensional_data_matrix[i]) for i in range(len(graph))
        ]
        embedded_data_matrix = scale(np.array(two_dimensional_data_list))
        return embedded_data_matrix

    def _layout_laplacian(self, graph):
        nlm = nx.normalized_laplacian_matrix(graph)
        eigvals, eigvects = eigs(nlm, k=self.n_eigenvectors, which='SR')
        eigvals, eigvects = np.real(eigvals), np.real(eigvects)
        return scale(eigvects)

    def randomize(self, data_matrix, amount=.5):
        random.seed(self.random_state)
        inclusion_threshold = random.uniform(amount, 1)
        selectors = []
        if random.random() > inclusion_threshold:
            selectors.append(
                SparseSelector(random_state=random.randint(1, 1e9)))
        if random.random() > inclusion_threshold:
            selectors.append(
                MaxVolSelector(random_state=random.randint(1, 1e9)))
        if random.random() > inclusion_threshold:
            selectors.append(
                QuickShiftSelector(random_state=random.randint(1, 1e9)))
        if random.random() > inclusion_threshold:
            selectors.append(
                DensitySelector(random_state=random.randint(1, 1e9)))
        if random.random() > inclusion_threshold:
            selectors.append(
                OnionSelector(random_state=random.randint(1, 1e9)))
        if not selectors:
            selectors.append(
                DensitySelector(random_state=random.randint(1, 1e9)))
            selectors.append(
                SparseSelector(random_state=random.randint(1, 1e9)))
        selector = CompositeSelector(selectors=selectors)
        selector.randomize(data_matrix, amount=amount)
        self.selectors = deepcopy(selector.selectors)
        self.metric = 'rbf'
        self.kwds = {'gamma': random.choice([10**x for x in range(-3, 3)])}
        if random.random() > inclusion_threshold:
            self.n_nearest_neighbors = random.randint(3, 20)
        else:
            self.n_nearest_neighbors = None
        self.n_links = random.randint(1, 5)
        self.random_state = self.random_state ^ random.randint(1, 1e9)
예제 #35
0
def main():
    
    #Initialize the board - the state
    state = np.ndarray(shape=(3,3), dtype=int)
    win_states = makeWinStates()
    actions = [[0,0], [0,1], [0,2], [1,0], [1,1], [1,2], [2,0], [2,1], [2,2]] #also spaces
    
    #Variables - might not be necessary
    k = 1
    alpha = 1/k
    gamma = .3
    eps = .1

    #Initializing our 'overkill' neural networks
    actor = Regressor(layers, warning=None, weights=None, random_state=None, 
                     learning_rule='sgd', 
                     learning_rate=0.01, 
                     learning_momentum=0.9, 
                     regularize=None, weight_decay=None, 
                     dropout_rate=None, batch_size=1, 
                     n_iter=None, n_stable=10, 
                     f_stable=0.001, valid_set=None, 
                     valid_size=0.0, loss_type=None, 
                     callback=None, debug=False, 
                     verbose=None) #???
    
    #Training the actor with a random policy
    trainStates = []; acts = []
    for i in range(500):
	sample_st = np.ndarray(shape=(3,3), dtype=int)
	for j in range(9):
	    sample_st[math.floor(j/3),j%3] = random.randint(-1,1)
	    
	act = random.randint(0,8) #action represented by its index
	trainStates.append(sample_st)
	acts.append(act)
	
    actor.fit(trainStates, acts)
    
    target_mu = actor
    
    
    critic = Regressor(layers=[Layer("Rectifier", name="layer1", units=11, pieces=2), #9 squares, 1 action, 1 bias
                       Layer("Rectifier", name="layer2", units=11, pieces=2),
                       Layer("Rectifier", name="layer3", units=11, pieces=2),
                       Layer("Softmax")], learning_rate=0.02)
    
    #Randomly initialize the critic
    statesAndActs = []; rewards = []
    for i in range(500):
	sample_st = np.ndarray(shape=(3,3), dtype=int)
	for j in range(9):
	    sample_st[math.floor(j/3),j%3] = random.randint(-1,1)

	#random action, random reward
	act = random.randint(0,8)
	rew = random.randint(-1,1)

	statesAndActs.append([sample_st,act])
	rewards.append(rew)

    critic.fit(statesAndActs,rewards)

    target_Q = critic

    
    for i in range(10):
	reward = 0; end = False; R = []
	
	while (end != True):

	    action = actor.predict(state)
	    newstate = getNextState(state, action) #Execute action
	    
	    #Observe reward
	    reward = getReward(state)
	    if reward != 0: #Game is done
		end = True		    
	    
	    #Replay buffer review
	    R.append(state, action, reward, newstate)
	    
	    N = math.floor(math.log(len(R)))
	    R2 = R; minibatch = []
	    for i in range(N):
		j = random.randint(0,len(R2)-1)
		minibatch.append(R2[j])
		R2.remove(R2[j])
		
	    ys = []; batchStates = []
	    for i in range(N):
		s_1 = minibatch[i][3]; r = minibatch[i][2]
		ys.append(r + gamma*target_Q.predict(s_1))
		
		#Make new input for retraining - includes state and action
		batchStates.append([minibatch[i][0],minibatch[i][0]])
		
	    #minimize the loss L = (1/N)*sum(ys[i] - critic.predict(state))^2 - a linear regression
	    if len(batchStates) != 0:
		critic.fit(batchStates,ys)
	    
	    #update the actor policy somehow -- this is the hard part; test the critic alone first
	    
	    #Update the target critic
	    Q_para = np.array(critic.get_parameters())
	    if i == 0:
		target_Q.set_parameters(Q_para)
	    else:
		Qp_para = np.array(target_Q.get_parameters())
		new_para = tau*Q_para + (1-tau)*Qp_para
		target_Q.set_parameters(new_para)
		
	    #Update the target actor
	    
	    
	    #How do I write this
	    
	    #Set state to the new state.
	    state = newstate
	    
	    reward = getReward(state)
	    if reward != 0: #Game is done
		end = True	    
	    
	    #We play as "O"
	    x = -1; y = -1
	    while not (x >= 0 and x <= 2 and y >= 0 and y <= 2):
		try:
		    x, y = int(input("Enter the row and column indices of the location at which you intend to draw an 'O.' (Format: x, y):    "))
		    while x <= 0 or x >= 2 or y <= 0 or y >= 2:
			x, y = int(input("Sorry, those indices are invalid. Please input integral indices between 0 and 2 inclusive, in the correct format:    "))
		except:
		    print ("I'm sorry, but x and y should be numerical.")
	    
	    state[x,y] = -1
	    reward = getLoss(state)
	    if reward != 0: #Game is done
		end = True
# np.save("data_x_combined", data_x)
# np.save("data_y_combined", data_y)

data_x = np.load("data_x_combined.npy")[100:]
data_y = np.load("data_y_combined.npy")[100:]
print data_y
nn = Regressor(
    layers=[
        Layer("Sigmoid", units=512),
        Layer("Sigmoid")],
    learning_rate=0.0001,
    n_iter=40
    )
print("Generating Fit")
nn.fit(data_x, data_y)
print("Fit generated")
fs = open('nn_combined_reg.pkl', 'wb')
pickle.dump(nn, fs)
fs = open('nn_combined_reg.pkl', 'rb')
nn = pickle.load(fs)
n = 2590
for x in nn.predict(data_x[n:n+2000]):
    if np.count_nonzero(x):
        print 'nonzero', x
# print()
# print(data_y[n:n+2000])
# nn.score(data_x, data_y)
# fs.close()
# print("NN Pickled")
# pickle.save()
예제 #37
0
    0.4175298805, 3.9104976997, 1.705969706, 41.29775641025662
])

#Artifical Neural Network

ann_network = Regressor(layers=[
    Layer("Linear", units=10),
    Layer("Tanh", units=200),
    Layer("Linear", units=1)
],
                        learning_rate=0.0001,
                        n_iter=10)

ann_network.fit(input_array, output_array)
print ann_network.predict([
    7876.94, 7876.94, 201.25, 96.0895023003, 89.5191739429, 97.5470278405,
    0.4175298805, 3.9104976997, 1.705969706, 41.2977564
])

#SVM Regression
svregressor = SVR(kernel='rbf', C=1e3, degree=3)
print input_array.shape, output_array.shape
svregressor.fit(input_array, output_array)
print svregressor.predict([
    7876.94, 7876.94, 201.25, 96.0895023003, 89.5191739429, 97.5470278405,
    0.4175298805, 3.9104976997, 1.705969706, 41.2977564103
])

#plt.plot(input_array)
#plt.show()
예제 #38
0
    x_train[i,:] = time_series[i:i+lag]
    y_train[i] = np.cos(i+lag)

#training
nn.fit(x_train, y_train)

#testing
x_test = np.zeros((len(test_series)-lag,lag))
y_test = np.zeros(len(test_series)-lag)
predictions = np.zeros(len(test_series)-lag)

for i in range(len(test_series)-lag):
    x_test[i,:] = test_series[i:i+lag]
    y_test[i] = test_series[i+lag]

predictions = nn.predict( x_test )

# RMSE Training error
rmse = mean_squared_error(y_test, predictions)**0.5
print rmse

window = 24*7;
plt.plot(range(len(predictions[:window])), predictions[:window], '-r', label='Predictions', linewidth=1)
plt.plot(range(len(y_test[:window])), y_test[:window], '-g',  label='Original series')
plt.title("")
plt.xlabel("")
plt.ylabel("")
plt.legend()
plt.show()

예제 #39
0
def main():

    print "Loading Data..."
    data = readData("./data.csv") #, shuffle=shuffle_data, test=test)
    # Ymotor = np.squeeze(np.asarray([example[1][0] for example in data[0]]))
    # Ytotal= np.squeeze(np.asarray([example[1][1] for example in data[0]]))

    # X = SelectKBest(f_regression, k=9).fit_transform(X, target[:,1])
    # print  X.shape

    # print Ytotal
    # print Ymotor.shape
    # print Ytotal, "," , Ymotor
    # print X

    # degrees = [1, 2, 3]
    # for i in range(len(degrees)):


    Error= []
    MSE=[]
    Avg_score= []
    record = []

    # X = SelectKBest(f_regression, k=9).fit_transform(X, target[:,1])
    # # print  X.shape
    #
    # polynomial_features = PolynomialFeatures(degree=1,include_bias=True)
    # X= polynomial_features.fit_transform(X)
    # # X= np.c_[np.ones(len(X)),X] #concatente 2 columns vertically ;)
    # # print len(X[1])
    # # print  X[0,:]



    for layer in [[400,5]]:#,[400,5],[400,6],[400,7],[450,6],[450,7],[485,5],[485,6],[485,7],[500,6],[600,300],[800,6],[800,500],[1000,6],[1000,100],[1000,500]]:

        for lr in [0.0009]:
        # for layer in [[6],[10],[15],[20],[25],[30],[35],[40],[50],[60],[75],[100],[400],[600]]:

            for alpha in [0.9]:#[0.01, 0.1, 0.7, 0.8 , 0.9]:

                # for k in range(6,8):   #k=[5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]
                # for n_folds in range(10,11):

                X = np.squeeze(np.asarray([example[0] for example in data])) #Total examples of 5875
                target= np.squeeze(np.asarray([example[1] for example in data]))

                polynomial_features = PolynomialFeatures(degree=2, include_bias=True)
                X= polynomial_features.fit_transform(X)
                # print len(X[1])
                # print  X[0,:]

                X = SelectKBest(f_regression, k=6).fit_transform(X, target[:,1])
                # print  X.shape



                n_folds =5

                kf = KFold(len(X[0]), n_folds)
                # print len(kf)
                # print(kf)
                for train_index, test_index in kf:
                    # print("TRAIN:", train_index, "TEST:", test_index)
                    train_X, valid_X = X[train_index], X[test_index]
                    train_target, valid_target = target[train_index], target[test_index]



                    # print 'Standardizing...'
                    scaler = preprocessing.StandardScaler().fit(train_X)  #fit only on training data (Compute the mean and std to be used for later scaling)
                    train_set = scaler.transform(train_X)  #Perform standardization by centering and scaling
                    valid_set = scaler.transform(valid_X) # apply same transformation to test data
                    # print train_X[:4]
                    # print train_set[:4]



                    #
                    # nn = Regressor(
                    # layers=[
                    #     # Layer("Sigmoid", units=1000),
                    #     Layer("Sigmoid", units=500),
                    #     Layer("Linear", units=6),
                    #     Layer("Linear", units=2)],
                    # learning_rule='sgd',
                    # regularize='L2',
                    # weight_decay=0.7,
                    # learning_rate=0.0009,
                    # batch_size=30,
                    # n_iter=10,
                    # loss_type='mse',)



                    nn = Regressor(
                        layers=[
                            Layer("Sigmoid", units=layer[0]),
                            # Layer("Sigmoid", units=600),
                            Layer("Linear", units=layer[1]),
                            Layer("Linear", units=2)],
                        learning_rule='sgd',
                        regularize='L2',
                        weight_decay= alpha,
                        learning_rate=lr,
                        batch_size=30,
                        n_iter=10,
                        loss_type='mse',)

                    nn.fit(train_set, train_target)
                    y_example = np.squeeze(np.asarray(nn.predict(valid_set)))
                    # print valid_target, y_example
                    Error.append(np.absolute(valid_target - y_example))
                    MSE.append(np.power(Error[-1], 2).mean(0))
                    # MSE= np.mean(MSE, axis=0)
                    # print 'Fold:',f, np.matrix(MSE).mean(0)
                y1= np.array([example[0] for example in MSE]).tolist()
                y2= np.array([example[1] for example in MSE]).tolist()

                Avg_score= np.matrix(zip(y1,y2)).min(0)

                #
                # record.append([lr,layer_size_list,err_list[best_fold]])
                # record.append([k,Avg_score])
                # print record
                print  " layer:", layer,","," lr:",lr,",","alpha:",alpha,",", np.around(Avg_score,decimals=3)
예제 #40
0
파일: primes.py 프로젝트: PCJohn/Walrus
    nn.fit(X_train, Y_train)
    pickle.dump(nn, open(path, 'w'))


N = 100
X, Y = create_ds(N)
print X.shape, '--', Y.shape
print X[:5]
print Y[:5]
print '___________'
nn = Regressor(
    layers=[
        #Convolution("Rectifier",channels=1,kernel_shape=(1,1)),
        Layer("Rectifier", units=128),
        Layer("Rectifier", units=128),
        Layer("Linear", units=64),
        Layer("Tanh")
    ],
    learning_rate=0.01,
    verbose=True)
train(nn, X, Y, './mod_prim', 2, 2)

print "#-----TESTING-----#"
nn = pickle.load(open('./mod_prim', 'r'))
test = create_ds(2 * N)
pred = nn.predict(test)
for i, p in enumerate(pred):
    #plt.imshow(test[i])
    #plt.show()
    print test[i], ' == ', round(1 / p)
def Main(ticker, window, startRow, startCol, endCol, targetCol, numDataPoints,
         daysForward, daysBackward, numNodes):
    trainY = []
    yBack = []
    randL = []

    start_time = time.time()

    for x in range(1, int(numDataPoints - daysBackward - daysForward)):
        randL.append(x)
    np.random.shuffle(randL)
    print "Step 1 Complete"

    #TrainX Process
    pool = mp.Pool(100)
    partial_TrainX = partial(Train_X, ticker, window, startRow, startCol,
                             endCol, targetCol, numDataPoints, daysForward,
                             daysBackward, randL)
    trainX = pool.map(partial_TrainX, (Node for Node in range(1, numNodes)))
    pool.close()
    pool.join()

    print "Step 2 Complete"

    pool = mp.Pool(100)
    partial_TrainX2 = partial(Train_X2, ticker, window, startRow, startCol,
                              endCol, targetCol, numDataPoints, daysForward,
                              daysBackward, randL)
    v = pool.map(partial_TrainX2, (Node for Node in range(1, numNodes)))
    pool.close()
    pool.join()

    print "Step 3 Complete"

    pool = mp.Pool(100)
    partial_TestX = partial(Test_X, ticker, window, startRow, startCol, endCol,
                            targetCol, numDataPoints, daysForward,
                            daysBackward, randL)
    testX = pool.map(partial_TestX, (Node for Node in range(1, numNodes)))
    pool.close()
    pool.join()

    print "Step 4 Complete"

    trainY.append(
        FirstNode.train(ticker, 1, startRow, startCol, endCol, targetCol,
                        numDataPoints, daysForward, daysBackward, randL,
                        False)[1])

    print "Step 5 Complete"
    yBack.append(
        FirstNode.train(ticker, 1, startRow, startCol, endCol, targetCol,
                        numDataPoints, daysForward, daysBackward, randL,
                        False)[4])
    trainXF = []
    for x in range(len(trainX)):
        trainXF.append(trainX[x] + v[x])

    print "Training..."

    trainRXF = zip(*trainXF[::-1])
    testRX = zip(*testX[::-1])

    trainYF = trainY[0][0:len(trainRXF)]
    testY = trainY[0][len(trainRXF):]

    NumIterations = 10
    NumNeurons = 10
    Layers = [Layer(type="Sigmoid", units=5), Layer(type="Linear")]
    model = Regressor(layers=Layers, learning_rate=.05, n_iter=NumIterations)

    model.fit(trainRXF, trainYF)
    print "fit!"

    predictions = []
    for x in testRX:
        predictions.append(model.predict(x))
    error = []

    for x in range(len(predictions)):
        error.append(abs(predictions[x] - testY[x]) / testY[x])
    print average(error)
    #    plt.plot(predictions,"g-")
    #    plt.plot(testY,"b-")
    #    plt.show()

    errorGL = []
    percent = []

    print predictions[-1]
    randL.remove(len(randL))
    for x in range(len(predictions)):
        today = randL[x]
        todayG = predictions[x]
        todayA = trainY[0][today]
        yesterday = trainY[0][today - daysForward]

        errorG = abs(todayG - todayA)
        errorGL.append(errorG)

        dA = todayA - yesterday
        dG = todayG - yesterday

        if (dA < 0 and dG < 0) or (dA > 0 and dG > 0):
            percent.append(1)
        else:
            percent.append(0)

    print percent.count(1) / (len(percent) * 1.0)

    print("--- %s seconds ---" % (time.time() - start_time))

    print "done"

    PREDICTION = predictions[-1]
    PERCENT = percent.count(1) / (len(percent) * 1.0)
    ERROR = average(error)
    return PREDICTION, PERCENT, ERROR, yBack
예제 #42
0
    
                pre_label = bst.predict(xgbtest)
                loss = t-pre_label 
                MAPE = (sum(abs(loss)/t))/len(t)
                mapeSet.append(MAPE)
                para.append([inn,im,ie])
                count=count+1
                print('---> ',count,'......')
    
# scikit-neuralnetwork for regression(有问题,无法训练)
if 0:
    
    mlp = Regressor(layers=[Layer('Rectifier',units=100,weight_decay=0.0001,dropout=0.5),Layer('Linear')],learning_rule='sgd', learning_rate=0.01,
                            batch_size=500, n_iter=10,loss_type = 'mse')
    mlp.fit(X,y)
    pre_label = mlp.predict(T)
'''
# %%
# plot 
loss = t-pre_label # 误差
Z = np.zeros([len(loss)])
plt.plot(loss,'g')
plt.plot(Z,'r')
plt.xlabel('Number of the sample')
plt.ylabel('loss(s)')
plt.title('Visualizing loss')
plt.show()

# %%
# MAPE
MAPE = (sum(abs(loss)/t))/len(t)
예제 #43
0
파일: PLMR.py 프로젝트: XI-x/React-admin
    Layer("Rectifier", units=14),
    Layer("Linear")
],
                      learning_rate=0.02,
                      regularize="L2",
                      random_state=2019,
                      weight_decay=0.001,
                      n_iter=100)

print("fitting model right now")
fit1_Sigmoid.fit(x_train, y_train)
fit2_ReLU.fit(x_train, y_train)
fit3_ReLU.fit(x_train, y_train)
fit4_ReLU.fit(x_train, y_train)

pred1_train = fit1_Sigmoid.predict(x_train)
pred2_train = fit2_ReLU.predict(x_train)
pred3_train = fit3_ReLU.predict(x_train)
pred4_train = fit4_ReLU.predict(x_train)

mse_1_train = mean_squared_error(pred1_train, y_train)
mse_2_train = mean_squared_error(pred2_train, y_train)
mse_3_train = mean_squared_error(pred3_train, y_train)
mse_4_train = mean_squared_error(pred4_train, y_train)

print("train ERROR :\n \
mse_1_train = %s  \n mse_2_train = %s  \n mse_3_train = %s  \n mse_4_train = %s "\
%(mse_1_train, mse_2_train,mse_3_train,mse_4_train))

pred1_test = fit1_Sigmoid.predict(x_test)
pred2_test = fit2_ReLU.predict(x_test)
예제 #44
0
파일: vangogh.py 프로젝트: imclab/bots
from sknn.mlp import Regressor, Layer

print a_in.min(), a_in.max(), a_out.min(), a_out.max()

nn = Regressor(
    layers=[Layer("Rectifier", units=1024), Layer("Linear")], learning_rate=0.001, n_iter=5, verbose=1, valid_size=0.2
)

nn.fit(a_in, a_out)

"""
# RECONSTRUCTION.
samples = []
for i in range(SAMPLES):
    yh, xh = random.randint(8*SCALE, img_high.shape[0]-16*SCALE), random.randint(8*SCALE, img_high.shape[1]-16*SCALE)
    yl, xl = yh / SCALE, xh / SCALE
    a_in[i] = img_low[yl-4:yl+4,xl-4:xl+4].flatten() / 255.0
    samples.append((yh, xh))
"""

a_test = nn.predict(a_in)

img_test = numpy.zeros(img_high.shape, dtype=numpy.float32)
for i, (yh, xh) in enumerate(samples):
    img_test[yh - 8 : yh + 8, xh - 8 : xh + 8] = (a_test[i].reshape(16, 16, 3) + 0.5) * 255.0

print "Reconstructing..."
# scipy.misc.imsave('VG_DT1567.test.png', img_test)
scipy.misc.imsave("VG_DT1567.plain.png", img_plain)
예제 #45
0
    layers = []
    for _ in range(0, parameters["hidden_layers"]):
        layers.append(
            Layer(parameters["hidden_type"],
                  units=parameters["hidden_neurons"]))
    layers.append(Layer("Linear"))
    model = Regressor(layers=layers,
                      learning_rate=parameters["learning_rate"],
                      n_iter=parameters["iteration"],
                      random_state=42)

    X = np.array(trainX)
    y = np.array(trainY)

    model.fit(X, y)

    model.fit(trainX, trainY)
    prediction = model.predict(testX)
    prediction = normalizer_Y.inverse_transform(prediction)
    testY = normalizer_Y.inverse_transform(testY)

    for i in range(0, len(testY)):
        output.write(str(location))
        output.write(",")
        output.write(str(testY[i]))
        output.write(",")
        output.write(str(prediction[i][0]))
        output.write("\n")

output.close()
예제 #46
0
#print numpy.isinf(input_array).any()
#print numpy.isinf(output_array).any()
#First Model
#Random Forest Regression
####
###
##
#
print input_array.shape, output_array.shape
reg = RandomForestRegressor(n_estimators=10)
reg.fit(input_array, output_array)
print reg.predict([7876.94, 7876.94, 201.25, 96.0895023003,	89.5191739429,	97.5470278405,	0.4175298805, 3.9104976997	, 1.705969706, 41.29775641025662])


#Artifical Neural Network

ann_network = Regressor(layers= [Layer("Linear", units=10), Layer("Tanh", units=200), Layer("Linear", units=1)], learning_rate=0.0001, n_iter=10)

ann_network.fit(input_array, output_array)
print ann_network.predict([7876.94, 7876.94, 201.25, 96.0895023003,	89.5191739429, 97.5470278405, 0.4175298805, 3.9104976997, 1.705969706, 41.2977564])


#SVM Regression
svregressor = SVR(kernel='rbf', C=1e3, degree=3)
print input_array.shape, output_array.shape
svregressor.fit(input_array, output_array)
print svregressor.predict([7876.94, 7876.94, 201.25, 96.0895023003,	89.5191739429, 97.5470278405, 0.4175298805, 3.9104976997, 1.705969706, 41.2977564103])

#plt.plot(input_array)
#plt.show()
예제 #47
0
파일: 2c.py 프로젝트: ttommytang/EE219
#============================Save pre-processed data===========================
data.to_csv('revised_data.csv', index=False)
data = pd.read_csv('revised_data.csv')
#==============================================================================


def calculate_RMSE(predicted, actual):
    return math.sqrt(mean_squared_error(actual, predicted))


#===========================Neural Network Fitting=============================
training_data = data.copy()
training_data.drop('duration', 1, inplace=True)
target_data = training_data.pop('size')

#cross validation
X_train, X_test, y_train, y_test = cross_validation.train_test_split(
    training_data.values, target_data.values, test_size=0.1, random_state=42)

i = 0.1
neu_net_reg = Regressor(layers=[Layer("Sigmoid", units=30),
                                Layer("Linear")],
                        learning_rate=i,
                        n_iter=19)
neu_net_reg.fit(X_train, y_train)
predicted_target_data = neu_net_reg.predict(X_test)
print 'Learning rate: ' + str(i) + '   RMSE is: ' + str(
    calculate_RMSE(y_test, predicted_target_data))

#==============================================================================
                       layers=[
                               Layer("Rectifier", units=30),
                               Layer("Linear")],
                       learning_rate=0.01,
                       batch_size = 100,
                       #learning_rule = "momentum",
                       n_iter=100)
                       #valid_size=0.25)
        # Training
        nn.fit(X_train_bg,Y_train)
        pickle.dump(nn, open('autoencoder.pkl', 'wb'))
    if not runTraining:
        nn = pickle.load(open('autoencoder.pkl', 'rb'))

    # Testing
    predicted_diff = nn.predict(X_test_bg)
    predicted_signal = nn.predict(X_test_sig)

    # Reconstruction error
    rec_errors_diff = reconstructionError(X_test_bg,predicted_diff)
    rec_errors_sig = reconstructionError(X_test_sig,predicted_signal)

    # Reconstruction errors by variable
    rec_errors_varwise_diff = reconstructionErrorByFeature(X_test_bg,predicted_diff)
    rec_errors_varwise_sig = reconstructionErrorByFeature(X_test_sig,predicted_signal)

    ## Plotting - performance curves
    ## ROC
    true_positive,false_positive,precisions,recalls,f1s = makeMetrics(2000,rec_errors_sig,rec_errors_diff)
    auc = areaUnderROC(true_positive,false_positive)
    print "Area under ROC = ",auc
예제 #49
0
class Learn:
    
    nx = 20
    ny = 20
    n_cell = nx * ny
    coups_sautes = 60
    
    #This calculates the distance between two images
    def dist(self, prediction, im3):
        err = 0.
        for i in xrange(len(prediction)):
            err += abs(prediction[i] - im3[i])
        return err / len(prediction)

    def good_shape_2(self, im1, im2):#Good shape for the input (two images)
        tab = np.zeros((1, (Learn.n_cell*2)))
        tab[0, ::] = np.append(im1.flatten(), im2.flatten())
        return tab
    
    def good_shape_1(self, im3):#Good shape for the output (1 image)
        tab = np.zeros((1, (Learn.n_cell)))
        tab[0, ::] = im3.flatten()
        return tab

    def __init__(self, new=False, display=False):
        #       self.possibilities = generate(Learn.n_coups)
#        np.random.shuffle(self.possibilities)
        self.jeu = MJ.Jeu(autorepeat=False, display=display)
        self.jeu.restart(Learn.coups_sautes)
        self.previous_image = self.get_image()
        self.jeu.update_all()
        self.current_image=self.get_image()
        if new:
            self.nn = Regressor(layers=[Layer("Linear", units=(Learn.n_cell*2)), Layer("Linear", units=Learn.n_cell*4), Layer("Linear",units=Learn.n_cell)], learning_rate=0.01, n_iter=1)
            self.nn.fit(self.good_shape_2(self.previous_image, self.current_image), self.good_shape_1(self.current_image))
        else:
            self.nn = pickle.load(open('nn_image_prediction.pkl', 'rb'))
            self.nn.fit(self.good_shape_2(self.previous_image, self.current_image), self.good_shape_1(self.current_image))
        self.current_data_set = []
    

    def play(self, num_iter=1000):
        self.set_display(True)
        predicted_outcome = np.zeros(2**Learn.n_coups)
        for s in xrange(num_iter):
            self.image = self.get_image()
            if (s%100 == 0):
                print s
            outcome = 1
            indice_max=0
            for j, elt in enumerate(self.possibilities):
                a = self.nn.predict(self.good_shape(self.image, elt))[0][0]
                predicted_outcome[j] = a
                if (a>predicted_outcome[indice_max]):
                    indice_max=j
            i=indice_max
            elt = self.possibilities[i][0]
            if (outcome == 1):
                if elt == 1:
                    instr = 'd'
                elif elt == 0:
                    instr = 'q'
                if (self.jeu.update_all(instr) == "Dead"):
                    outcome = 0
                    self.jeu.restart(Learn.coups_sautes)   



    def get_image(self):
        nx_im, ny = 2 * Learn.nx, Learn.ny
        tab = np.ones((nx_im, ny))/2.
        x, y = self.jeu.joueur.position
        x,y=self.jeu.joueur.position
        for elt in self.jeu.missiles:
            x_m, y_m = elt.position
            x_p = x_m - (x - 0.5) 
            if (y_m < 0.5):
                tab[int(nx_im * x_p) % nx_im, int(ny * y_m)] = -1

        return tab[10:30, ::]

    def save_rd_train_set(self, num_iter=5000): # returns a set of situations, choice sequences, and outcomes
        train_set = []
        for i in xrange(num_iter):
            self.jeu.restart(100)
            im1 = self.get_image().flatten()
            self.jeu.update_all()
            im2=self.get_image().flatten()
            self.jeu.update_all()
            im3=self.get_image().flatten()
            train_set.append((im1, im2, im3))
        self.current_data_set = train_set
        return
        
    def intensive_train(self): 
        for training in self.current_data_set:
            im1, im2, im3 = training
            self.nn.fit(self.good_shape_2(im1, im2), self.good_shape_1(im3))
        print "Commence à sauver"
        pickle.dump(self.nn, open('nn_image_prediction.pkl', 'wb'))
        print "NN Saved"
            
    def error_on_train_set(self):
        error = 0.
        for training in self.current_data_set:
            im1, im2, im3=training
            s = self.nn.predict(self.good_shape_2(im1, im2))[0]
            if (rd.random()<0.01):
                print "Differences : "
                print np.max(abs(s-im3))
            error += self.dist(s,im3)
        error = error / len(self.current_data_set)
        return error
예제 #50
0
class Embedder2D(object):

    """
    Transform a set of high dimensional vectors to a set of two dimensional vectors.

    Take in input list of selectors, then for each point find the closest selected instance and materialize
    an edge between the two. Finally output 2D coordinates of the corresponding graph embedding using the sfdp
    Graphviz algorithm.

    Parameters
    ----------
    compiled : boolean (default: False)
        If set to True then a deep neural network is fit to generalize the embedding computed via the
        graph layout.

    deepnet_learning_rate: float (default: 0.002)
        Learning rate parameter for the deep neural network predictor.

    deepnet_n_hidden_layers: int (default: 1)
        Number of hidden layers for the deep neural network predictor.

    deepnet_n_features_hidden_factor: int (default: 10)
        Factor that multiplies the number of features in the input layer to obtain the number
        of features in each hidden layer for the deep neural network predictor.

    selectors: list of selector objects derived from AbstractSelector
        Each selector object exposes a fit_transform method to select with a characteristic bias
        a subset of the input instances.

    """

    def __init__(self,
                 compiled=False,
                 deepnet_learning_rate=0.002,
                 deepnet_n_hidden_layers=1,
                 deepnet_n_features_hidden_factor=10,
                 selectors=[QuickShiftSelector()],
                 n_nearest_neighbors=10,
                 n_nearest_neighbor_links=1,
                 layout='force',
                 layout_prog='sfdp',
                 layout_prog_args='-Goverlap=scale',
                 n_eigenvectors=10,
                 random_state=1,
                 metric='rbf', **kwds):
        self.compiled = compiled
        self.deepnet_learning_rate = deepnet_learning_rate
        self.deepnet_n_hidden_layers = deepnet_n_hidden_layers
        self.deepnet_n_features_hidden_factor = deepnet_n_features_hidden_factor
        self.selectors = selectors
        self.n_nearest_neighbors = n_nearest_neighbors
        self.n_nearest_neighbor_links = n_nearest_neighbor_links
        self.layout = layout
        self.layout_prog = layout_prog
        self.layout_prog_args = layout_prog_args
        self.n_eigenvectors = n_eigenvectors
        self.metric = metric
        self.kwds = kwds
        self.random_state = random_state
        self.selected_instances_list = []
        self.selected_instances_ids_list = []

    def __repr__(self):
        serial = []
        serial.append('Embedder2D:')
        if self.compiled is True:
            serial.append('compiled: yes')
            serial.append('learning_rate: %.6f' % self.deepnet_learning_rate)
            serial.append('n_features_hidden_factor: %d' % self.deepnet_n_features_hidden_factor)
        else:
            serial.append('compiled: no')
        serial.append('layout: %s' % (self.layout))
        serial.append('layout_prog: %s' % (self.layout_prog))
        if self.layout_prog_args:
            serial.append('layout_prog_args: %s' % (self.layout_prog_args))
        serial.append('n_nearest_neighbor_links: %s' % (self.n_nearest_neighbor_links))
        if self.n_nearest_neighbors is None:
            serial.append('n_nearest_neighbors: None')
        else:
            serial.append('n_nearest_neighbors: %d' % self.n_nearest_neighbors)
        serial.append('metric: %s' % self.metric)
        if self.kwds is None or len(self.kwds) == 0:
            pass
        else:
            serial.append('params:')
            serial.append(serialize_dict(self.kwds))
        serial.append('selectors [%d]:' % len(self.selectors))
        for i, selector in enumerate(self.selectors):
            if len(self.selectors) > 1:
                serial.append('%d/%d  ' % (i + 1, len(self.selectors)))
            serial.append(str(selector))
        return '\n'.join(serial)

    def fit(self, data_matrix, target=None):
        if self.compiled is True:
            return self.fit_compiled(data_matrix, target=target)
        else:
            return self._fit(data_matrix, target=target)

    def transform(self, data_matrix):
        if self.compiled is True:
            return self.transform_compiled(data_matrix)
        else:
            return self._transform(data_matrix)

    def fit_transform(self, data_matrix, target=None):
        if self.compiled is True:
            return self.fit_transform_compiled(data_matrix, target=target)
        else:
            return self._fit_transform(data_matrix, target=target)

    def fit_compiled(self, data_matrix_in, target=None):
        data_matrix_out = self._fit_transform(data_matrix_in, target=target)
        n_features_in = data_matrix_in.shape[1]
        n_features_out = data_matrix_out.shape[1]
        n_features_hidden = int(n_features_in * self.deepnet_n_features_hidden_factor)
        layers = []
        for i in range(self.deepnet_n_hidden_layers):
            layers.append(Layer("Rectifier", units=n_features_hidden, name='hidden%d' % i))
        layers.append(Layer("Linear", units=n_features_out))
        self.net = Regressor(layers=layers,
                             learning_rate=self.deepnet_learning_rate,
                             valid_size=0.1)
        self.net.fit(data_matrix_in, data_matrix_out)
        return self.net

    def transform_compiled(self, data_matrix):
        return self.net.predict(data_matrix)

    def fit_transform_compiled(self, data_matrix, target=None):
        self.fit_compiled(data_matrix, target=target)
        return self.transform_compiled(data_matrix)

    def _fit(self, data_matrix, target=None):
        # find selected instances
        self.selected_instances_list = []
        self.selected_instances_ids_list = []
        for i, selector in enumerate(self.selectors):
            selected_instances = selector.fit_transform(data_matrix, target=target)
            selected_instances_ids = selector.selected_instances_ids
            self.selected_instances_list.append(selected_instances)
            self.selected_instances_ids_list.append(selected_instances_ids)
        return self

    def _fit_transform(self, data_matrix, target=None):
        return self._fit(data_matrix, target=target)._transform(data_matrix)

    def _transform(self, data_matrix):
        # make a graph with instances as nodes
        graph = self._build_graph(data_matrix)
        # add the distance attribute to edges
        self.graph = self._add_distance_to_edges(graph, data_matrix)
        # use graph layout
        embedded_data_matrix = self._graph_layout(graph)
        # normalize display using 2D PCA
        embedded_data_matrix = PCA(n_components=2).fit_transform(embedded_data_matrix)
        return embedded_data_matrix

    def _add_distance_to_edges(self, graph, data_matrix):
        for e in graph.edges():
            src_id, dest_id = e[0], e[1]
            dist = np.linalg.norm(data_matrix[src_id] - data_matrix[dest_id])
            graph[src_id][dest_id]['len'] = dist
        return graph

    def _build_graph(self, data_matrix):
        graph = nx.Graph()
        graph.add_nodes_from(range(data_matrix.shape[0]))

        # build shift tree
        self.link_ids = self._kernel_shift_links(data_matrix)
        for i, link in enumerate(self.link_ids):
            if i != link:
                graph.add_edge(i, link)

        # build knn edges
        if self.n_nearest_neighbor_links > 0:
            # find the closest selected instance and instantiate knn edges
            for selected_instances, selected_instances_ids in \
                    zip(self.selected_instances_list, self.selected_instances_ids_list):
                if len(selected_instances) > 2:
                    graph = self._add_knn_links(graph,
                                                data_matrix,
                                                selected_instances,
                                                selected_instances_ids)
        return graph

    def _kernel_shift_links(self, data_matrix):
        data_size = data_matrix.shape[0]
        kernel_matrix = pairwise_kernels(data_matrix, metric=self.metric, **self.kwds)
        # compute instance density as average pairwise similarity
        density = np.sum(kernel_matrix, 0) / data_size
        # compute list of nearest neighbors
        kernel_matrix_sorted = np.argsort(-kernel_matrix)
        # make matrix of densities ordered by nearest neighbor
        density_matrix = density[kernel_matrix_sorted]
        # if a denser neighbor cannot be found then assign link to the instance itself
        link_ids = list(range(density_matrix.shape[0]))
        # for all instances determine link link
        for i, row in enumerate(density_matrix):
            i_density = row[0]
            # for all neighbors from the closest to the furthest
            for jj, d in enumerate(row):
                # proceed until n_nearest_neighbors have been explored
                if self.n_nearest_neighbors is not None and jj > self.n_nearest_neighbors:
                    break
                j = kernel_matrix_sorted[i, jj]
                if jj > 0:
                    j_density = d
                    # if the density of the neighbor is higher than the density of the instance assign link
                    if j_density > i_density:
                        link_ids[i] = j
                        break
        return link_ids

    def _add_knn_links(self, graph, data_matrix, selected_instances, selected_instances_ids):
        n_neighbors = min(self.n_nearest_neighbor_links, len(selected_instances))
        nn = NearestNeighbors(n_neighbors=n_neighbors)
        nn.fit(selected_instances)
        knns = nn.kneighbors(data_matrix, return_distance=0)
        for i, knn in enumerate(knns):
            # add edges to the knns
            for id in knn:
                original_id = selected_instances_ids[id]
                if i != original_id:
                    graph.add_edge(i, original_id)
        return graph

    def _graph_layout(self, graph):
        if self.layout == 'force':
            return self._layout_force(graph)
        elif self.layout == 'laplacian':
            return self._layout_laplacian(graph)
        else:
            raise Exception('Unknown layout type: %s' % self.layout)

    def _layout_force(self, graph):
        two_dimensional_data_matrix = nx.graphviz_layout(graph,
                                                         prog=self.layout_prog, args=self.layout_prog_args)
        two_dimensional_data_list = [list(two_dimensional_data_matrix[i]) for i in range(len(graph))]
        embedded_data_matrix = scale(np.array(two_dimensional_data_list))
        return embedded_data_matrix

    def _layout_laplacian(self, graph):
        nlm = nx.normalized_laplacian_matrix(graph)
        eigvals, eigvects = eigs(nlm, k=self.n_eigenvectors, which='SR')
        eigvals, eigvects = np.real(eigvals), np.real(eigvects)
        return scale(eigvects)

    def randomize(self, data_matrix, amount=.5):
        random.seed(self.random_state)
        inclusion_threshold = random.uniform(amount, 1)
        selectors = []
        if random.random() > inclusion_threshold:
            selectors.append(SparseSelector(random_state=random.randint(1, 1e9)))
        if random.random() > inclusion_threshold:
            selectors.append(MaxVolSelector(random_state=random.randint(1, 1e9)))
        if random.random() > inclusion_threshold:
            selectors.append(QuickShiftSelector(random_state=random.randint(1, 1e9)))
        if random.random() > inclusion_threshold:
            selectors.append(DensitySelector(random_state=random.randint(1, 1e9)))
        if random.random() > inclusion_threshold:
            selectors.append(OnionSelector(random_state=random.randint(1, 1e9)))
        if not selectors:
            selectors.append(DensitySelector(random_state=random.randint(1, 1e9)))
            selectors.append(SparseSelector(random_state=random.randint(1, 1e9)))
        selector = CompositeSelector(selectors=selectors)
        selector.randomize(data_matrix, amount=amount)
        self.selectors = deepcopy(selector.selectors)
        self.metric = 'rbf'
        self.kwds = {'gamma': random.choice([10 ** x for x in range(-3, 3)])}
        if random.random() > inclusion_threshold:
            self.n_nearest_neighbors = random.randint(3, 20)
        else:
            self.n_nearest_neighbors = None
        self.n_nearest_neighbor_links = random.randint(1, 5)
        self.random_state = self.random_state ^ random.randint(1, 1e9)
예제 #51
0
nn = Regressor([hiddenLayer, outputLayer],
               learning_rule='sgd',
               learning_rate=.001,
               batch_size=5,
               loss_type="mse")


# Generate Data
def cubic(x):
    return x**3 + x**2 - x - 1


def get_cubic_data(start, end, step_size):
    X = np.arange(start, end, step_size)
    X.shape = (len(X), 1)
    y = np.array([cubic(X[i]) for i in range(len(X))])
    y.shape = (len(y), 1)
    return X, y


# Train Model
X, y = get_cubic_data(-2, 2, .1)
nn.fit(X, y)

# Predict
predictions = nn.predict(X)

# Visualize
plt.plot(predictions)
plt.plot(y)
plt.savefig("approximate2.png")
예제 #52
0
파일: primes.py 프로젝트: PCJohn/Walrus
    nn.n_iter = n_epoch%save_part
    nn.fit(X_train, Y_train)
    pickle.dump(nn,open(path,'w'))


N = 100
X,Y = create_ds(N)
print X.shape,'--',Y.shape
print X[:5]
print Y[:5]
print '___________'
nn = Regressor(
    layers=[
        #Convolution("Rectifier",channels=1,kernel_shape=(1,1)),
        Layer("Rectifier",units=128),
        Layer("Rectifier",units=128),
        Layer("Linear",units=64),
        Layer("Tanh")],
    learning_rate=0.01,
    verbose=True)
train(nn, X, Y, './mod_prim', 2, 2)

print "#-----TESTING-----#"
nn = pickle.load(open('./mod_prim','r'))
test = create_ds(2*N)
pred = nn.predict(test)
for i,p in enumerate(pred):
    #plt.imshow(test[i])
    #plt.show()
    print test[i],' == ',round(1/p)
예제 #53
0
class Learn:

    nx = 20
    ny = 20
    n_cell = nx * ny
    n_coups = 8
    coups_sautes = 60

    def __init__(self, new=False, display=False):
        self.possibilities = generate(Learn.n_coups)
        np.random.shuffle(self.possibilities)
        self.explore = 0.
        self.jeu = MJ.Jeu(autorepeat=False, display=display)
        self.jeu.restart(Learn.coups_sautes)
        self.image = self.get_image()
        if new:
            self.nn = Regressor(layers=[
                Layer("Linear", units=(Learn.n_cell + Learn.n_coups)),
                Layer("Sigmoid", units=1000),
                Layer("Sigmoid")
            ],
                                learning_rate=0.01,
                                n_iter=1)
            self.nn.fit(
                self.good_shape(self.image,
                                self.possibilities[Learn.n_coups / 2 - 1]),
                np.array([[0]]))
        else:
            self.nn = pickle.load(open('nn.pkl', 'rb'))
            self.nn.fit(
                self.good_shape(self.image,
                                self.possibilities[Learn.n_coups / 2 - 1]),
                np.array([[1]]))
        self.current_data_set = []

    def good_shape(
        self, image, instructions
    ):  #instructions est un tableau de 0 et 1 à n_coups éléments
        tab = np.zeros((1, (Learn.n_cell + Learn.n_coups)))
        tab[0, ::] = np.append(image.flatten(), instructions)
        return 10 * tab

    def play(self, num_iter=1000):
        self.set_display(True)
        predicted_outcome = np.zeros(2**Learn.n_coups)
        for s in xrange(num_iter):
            self.image = self.get_image()
            if (s % 100 == 0):
                print s
            outcome = 1
            indice_max = 0
            for j, elt in enumerate(self.possibilities):
                a = self.nn.predict(self.good_shape(self.image, elt))[0][0]
                predicted_outcome[j] = a
                if a > 0.99:
                    i = j
                    break
                elif (a > predicted_outcome[indice_max]):
                    indice_max = j
            i = indice_max
            elt = self.possibilities[i][0]
            if (outcome == 1):
                if elt == 1:
                    instr = 'd'
                elif elt == 0:
                    instr = 'q'
                if (self.jeu.update_all(instr) == "Dead"):
                    outcome = 0
                    self.jeu.restart(Learn.coups_sautes)

    def auc(self, num_iter=10000):
        real_outputs = []
        predicted_outputs = []
        for s in xrange(num_iter):
            outcome = 1
            poss = self.possibilities
            i = rd.randint(0, len(poss) - 1)
            elt = poss[i]
            predicted_outputs.append(
                self.nn.predict(self.good_shape(self.image, elt))[0])
            r = rd.random()
            if (r < 0.8):
                i = int(rd.random() * 2**(Learn.n_coups))
            for elt in self.possibilities[i]:
                if (outcome == 1):
                    if elt:
                        instr = 'd'
                    else:
                        instr = 'q'
                    if (self.jeu.update_all(instr) == "Dead"):
                        outcome = 0
                        self.jeu.restart(Learn.coups_sautes)
            real_outputs.append(outcome)
            self.image = self.get_image()
        fpr, tpr, thresholds = metrics.roc_curve(real_outputs,
                                                 predicted_outputs)
        return metrics.auc(fpr, tpr)

    def benchmark(self, num_iter=1000):
        temps_total = []
        t = 0
        while t < num_iter:
            self.jeu.restart(Learn.coups_sautes)
            while (True):
                r = rd.random()
                if r < 0.5:
                    instr = 'q'
                else:
                    instr = 'd'
                if self.jeu.update_all(instr) == "Dead":
                    t += 1
                    temps_total.append(self.jeu.temps)
                    self.jeu.restart()
                    break
        self.image = self.get_image()
        print str(sum(temps_total) * 1. / num_iter) + " +/- " + str(
            0.96 * np.sqrt(np.var(np.array(temps_total)) / num_iter))

    def get_image(self):
        nx_im, ny = 2 * Learn.nx, Learn.ny
        tab = np.ones((nx_im, ny)) / 2.
        x, y = self.jeu.joueur.position
        x, y = self.jeu.joueur.position
        for elt in self.jeu.missiles:
            x_m, y_m = elt.position
            x_p = x_m - (x - 0.5)
            if (y_m < 0.5):
                tab[int(nx_im * x_p) % nx_im, int(ny * y_m)] = -1

        return tab[10:30, ::]

    def set_display(self, boolean):
        self.display = boolean
        self.jeu = MJ.Jeu(autorepeat=False, display=self.display)
        self.jeu.restart(Learn.coups_sautes)
        self.image = self.get_image()

    def save_rd_train_set(
        self,
        num_iter=5000
    ):  # returns a set of situations, choice sequences, and outcomes
        #self.jeu.rand_init(40)
        train_set = []
        for i in xrange(num_iter):
            self.jeu.restart(100)
            im = self.get_image()
            choice = self.possibilities[rd.randint(0, 2**Learn.n_coups - 1)]
            outcome = 1
            #print [b.position for b in self.jeu.missiles]
            for elt in choice:
                if (outcome == 1):
                    if elt:
                        instr = 'd'
                    else:
                        instr = 'q'
                    if (self.jeu.update_all(instr) == "Dead"):
                        outcome = 0
            train_set.append((im, choice, outcome))
        self.current_data_set = train_set
        return

    def intensive_train(self):
        for training in self.current_data_set:
            im, choice, outcome = training
            self.nn.fit(self.good_shape(im, choice), np.array([[outcome]]))
        print "Commence à sauver"
        pickle.dump(self.nn, open('nn.pkl', 'wb'))
        print "NN Saved"

    def error_on_train_set(self):
        error = 0.
        for training in self.current_data_set:
            im, choice, outcome = training
            s = self.nn.predict(self.good_shape(im, choice))
            error += abs(s[0][0] - outcome)
        error = error / len(train_set)
        return error

    def auc_on_train_set(self):
        real_outputs = []
        predicted_outputs = []
        for training in self.current_data_set:
            im, choice, outcome = training
            predicted_outputs.append(
                self.nn.predict(self.good_shape(im, choice))[0])
            real_outputs.append(outcome)
        fpr, tpr, thresholds = metrics.roc_curve(real_outputs,
                                                 predicted_outputs)
        return metrics.auc(fpr, tpr)
예제 #54
0
nn = Regressor(layers=[
    Layer("Rectifier", units=1024),
    Layer("Linear"),
],
               learning_rate=0.001,
               n_iter=5,
               verbose=1,
               valid_size=0.2)

nn.fit(a_in, a_out)
"""
# RECONSTRUCTION.
samples = []
for i in range(SAMPLES):
    yh, xh = random.randint(8*SCALE, img_high.shape[0]-16*SCALE), random.randint(8*SCALE, img_high.shape[1]-16*SCALE)
    yl, xl = yh / SCALE, xh / SCALE
    a_in[i] = img_low[yl-4:yl+4,xl-4:xl+4].flatten() / 255.0
    samples.append((yh, xh))
"""

a_test = nn.predict(a_in)

img_test = numpy.zeros(img_high.shape, dtype=numpy.float32)
for i, (yh, xh) in enumerate(samples):
    img_test[yh - 8:yh + 8,
             xh - 8:xh + 8] = (a_test[i].reshape(16, 16, 3) + 0.5) * 255.0

print "Reconstructing..."
# scipy.misc.imsave('VG_DT1567.test.png', img_test)
scipy.misc.imsave('VG_DT1567.plain.png', img_plain)
예제 #55
0
	#-------------------------------------------------------------- 
	# Regression models
	#--------------------------------------------------------------

	# Neural Net regression
	
	nn_regr = Regressor(
	layers=[
		Layer("Rectifier", units=100),
		Layer("Linear")],
	learning_rate=0.02,
	n_iter=10)
	
	nn_regr.fit(features_train, targets_train)
	
	nn_prediction = nn_regr.predict(features_test)
	
	# mean square error
	nn_regr_error = np.mean((nn_prediction-targets_test)**2)


	#-------------------------------------------------------------- 
	# Write error to DataFrame
	#--------------------------------------------------------------

	table_val = pd.DataFrame({
		'training_date_start' : [dataset['date_game'][train_start].isoformat()], 
		'training_date_end' : [dataset['date_game'][train_end].isoformat()],
		'test_date_start' : [dataset['date_game'][test_start].isoformat()],
		'test_date_end' : [dataset['date_game'][test_end].isoformat()],
		'training_sample_size' : [training_increment],