Beispiel #1
0
def create_series(in_array,
                  window_size,
                  period,
                  minV,
                  maxV,
                  layer_nodes=[2, 3],
                  sigmoid='tanh',
                  epochs=50000):
    global_max = maxV
    global_min = minV

    X_train = []
    y_train = []
    for i in range(len(in_array) - window_size):
        X = []
        for j in range(window_size):
            X.append(_scale_to_binary(in_array[i + j], global_min, global_max))
        X_train.append(X)
        y_train.append(
            _scale_to_binary(in_array[i + window_size], global_min,
                             global_max))

    X_train = np.array(X_train)
    y_train = np.array(y_train)

    layers = []
    layers.append(window_size)
    for i in range(len(layer_nodes)):
        layers.append(layer_nodes[i])

    n = NeuralNetwork(layers, sigmoid)

    n.fit(X_train, y_train, epochs)

    X_test = in_array[-window_size:]

    for i in range(len(X_test)):
        X_test[i] = _scale_to_binary(X_test[i], global_min, global_max)

    preds = []
    X_test = deque(X_test)

    for i in range(period):
        val = n.predict(X_test)
        preds.append(rescale_from_binary(val[0], global_min, global_max))

        X_test.rotate(-1)
        X_test[window_size - 1] = val[0]

    return preds
def create_series(in_array,window_size,period, minV, maxV, layer_nodes = [2,3], sigmoid = 'tanh', epochs = 50000):
    global_max = maxV
    global_min = minV
    
    
            
    X_train = []
    y_train = []
    for i in range(len(in_array)-window_size):
        X = []
        for j in range(window_size):
            X.append(_scale_to_binary(in_array[i+j],global_min,global_max))
        X_train.append(X)
        y_train.append(_scale_to_binary(in_array[i+window_size],global_min,global_max))
        
    X_train = np.array(X_train)
    y_train = np.array(y_train) 

        
    layers = []
    layers.append(window_size)
    for i in range(len(layer_nodes)):
        layers.append(layer_nodes[i])
    
                     
        
    n = NeuralNetwork(layers,sigmoid)
       
    n.fit(X_train,y_train, epochs)
        
       
        
    X_test = in_array[-window_size:]

    for i in range(len(X_test)):
        X_test[i]=_scale_to_binary(X_test[i],global_min,global_max)

    preds = []   
    X_test = deque(X_test)
          
    for i in range(period):
        val = n.predict(X_test)
        preds.append(rescale_from_binary(val[0], global_min, global_max))
            
        X_test.rotate(-1)
        X_test[window_size-1] = val[0]
        
              
    return preds
    def initialize(self, args=None):

        if args is None:
            #random.seed(1)
            self.nn = NeuralNetwork(5, 5, 1)
        else:
            self.nn = args['nn']

        self.crashed = False
        # movementInfo = showWelcomeAnimation()
        # select random player sprites
        randPlayer = random.randint(0, len(settings.PLAYERS_LIST) - 1)

        #print(type(settings.IMAGES['player'][0][0]))

        self.movementInfo = {
            'playery':
            int((settings.SCREENHEIGHT -
                 settings.IMAGES['player'][0][0].get_height()) / 2),
            'basex':
            -10,
            'playerIndexGen':
            cycle([0, 1, 2, 1]),
        }
        self.basex = self.movementInfo['basex']

        self.score = self.playerIndex = self.loopIter = 0
        self.playerIndexGen = self.movementInfo['playerIndexGen']
        self.playerx, self.playery = int(settings.SCREENWIDTH *
                                         0.2), self.movementInfo['playery']

        # player velocity, max velocity, downward accleration, accleration on flap
        self.playerVelY = -9  # player's velocity along Y, default same as playerFlapped
        self.playerMaxVelY = 10  # max vel along Y, max descend speed
        self.playerMinVelY = -8  # min vel along Y, max ascend speed
        self.playerAccY = 1  # players downward accleration
        self.playerRot = 45  # player's rotation
        self.playerVelRot = 3  # angular speed
        self.playerRotThr = 20  # rotation threshold
        self.playerFlapAcc = -9  # players speed on flapping
        self.playerFlapped = False  # True when player flaps
def cross_validate(network_shape, epochs_num, learn_rate, _groups_x,
                   _groups_y):
    k = _groups_x.shape[0]
    _sum = 0
    results = np.zeros(k)
    for i in range(k):

        train_x = None
        train_y = None
        valid_x = np.copy(
            _groups_x[i])  # the validation set for th i'th iteration.
        valid_y = np.copy(_groups_y[i])

        net = NeuralNetwork(network_shape, epochs_num, learn_rate)

        for j in range(k):
            if j != i:
                # arrange the train set for the i'th iteration.
                if train_x is None:
                    train_x = np.copy(_groups_x[j])
                    train_y = np.copy(_groups_y[j])
                else:
                    train_x = np.concatenate((train_x, _groups_x[j]), axis=0)
                    train_y = np.concatenate((train_y, _groups_y[j]), axis=0)

        old_mins, denoms = norm.minmax_params(train_x)
        train_x = norm.minmax(train_x, 0, 1)
        valid_x = norm.minmax(valid_x, 0, 1, old_mins, denoms)

        net.train(train_x, train_y)
        results[i] = net.accuracy(valid_x, valid_y)

        old_mins, denoms = norm.minmax_params(train_x)
        train_x = norm.minmax(train_x, 0, 1)
        valid_x = norm.minmax(valid_x, 0, 1, old_mins, denoms)

    print(results)
    return np.average(results)
Beispiel #5
0
def main():
    node_pairs_list = read_file("digit-examples-all.txt")
    train_split = 1

    print("Split: {0}0% train, {1}0% test".format(str(train_split),
                                                  str(10 - train_split)))
    train_set_size = (5620 // 10) * train_split

    training_set = node_pairs_list[0:train_set_size + 1]
    test_set = node_pairs_list[train_set_size + 1:]

    #training_set = node_pairs_list[0 : 50]
    #test_set = node_pairs_list[50 : 100]

    weights = []

    for i in range(64):
        weights.append([random.uniform(-1, 1) for x in range(10)])

    for pair in training_set:
        neural_net = NeuralNetwork(pair[0], pair[1], weights)
        neural_net.train_NN()
        weights = neural_net.weights_list

    euclidean_distance = 0

    for pair in test_set:
        neural_net = NeuralNetwork(pair[0], pair[1], weights)
        euclidean_distance += neural_net.test_NN()
        weights = neural_net.weights_list

    if (euclidean_distance == 0):
        avg_euclidean_distance = 0
    else:
        avg_euclidean_distance = euclidean_distance / len(test_set)

    print("Average Euclidean Distance: {}".format(avg_euclidean_distance))
Beispiel #6
0
'''
    Load the data. For this demo, we're using sklearn's digits dataset
    Digits are 8x8 pixel images. Each row is one image, in a linear format,
    where columns 65-74 correspond to one hot encoded responses representing
    digits 0 through 9. 1797 rows 74 columns
'''
data = np.loadtxt("transformed.csv", delimiter = ',')
m = len(data)

# Split the data into training set and test set.
train_set = data[:(3*m/4),:]
test_set = data[m/4:,:]

# Instantiate a new neural network. 64 input, 64 hidden, 10 output nodes.
NN = NeuralNetwork(64,HIDDEN_NODES,10,LEARNING_RATE,ITERATIONS)

# Train on the training set, test on the test set. The test() function
# will print out the percent correctness on the test set.
errors = NN.train(train_set)
NN.test(test_set)



# Plot the error curve
if VIEW_PLOT == True:
    plt.plot(errors)
    plt.title("Average Error Per Iteration On Training Set")
    plt.xlabel("Iteration")
    plt.ylabel("Average Error")
    pylab.show()
Beispiel #7
0
import numpy as np

digits = load_digits()

x = np.array(digits.data[:100])
y = np.array([[int(i == digit) for i in range(10)]
              for digit in digits.target[:100]])

validation_x = np.array(digits.data[100:120])
validation_y = np.array([[int(i == digit) for i in range(10)]
                         for digit in digits.target[100:120]])

training_data = {'inputs': x, 'labels': y}
validation_data = {'inputs': validation_x, 'labels': validation_y}

P = Preprocessor.from_data(training_data)
NN = NeuralNetwork.new([64, 10, 10], 'tanh')

training_data = P.transform_data(training_data)
validation_data = P.transform_data(validation_data)

trainer = Trainer(NN, training_data, validation_data, classification=True)

trainer.train({
    'learning_rate': 0.01,
    'epoch_blocks': 10,
    'batch_size': 100
}, {
    'max_epochs': 2000,
    'max_stall_blocks': 10
})
Beispiel #8
0
            #result = [a - b for a, b in zip(A, B)]
            result = np.subtract(A, B)
        return (result)  #This is the next choice


if __name__ == '__main__':

    test = test()
    print("getting dataset")
    test.getDataset(1)
    print(np.asarray(test.y).reshape((9, -1)))
    print(np.shape(test.X))
    print(np.shape(test.y))

    nn = NeuralNetwork([9, 18, 18, 9])
    nn.train(X=np.asarray(test.X).reshape((9, -1)),
             y=np.asarray(test.y).reshape((9, -1)),
             batch_size=9,
             epochs=2,
             learning_rate=0.4,
             print_every=10,
             validation_split=0.2,
             tqdm_=False,
             plot_every=20000)

    #X is the current gamestate and y is the next move to make
    #X = np.random.random((1,9))
    #print(X)

    #network = Network()
class bird():
    def __init__(self, args=None):
        if args is None:
            #random.seed(1)
            self.nn = NeuralNetwork(5, 4, 1)
        else:
            self.nn = args['nn']

    def initialize(self, args=None):

        if args is None:
            #random.seed(1)
            self.nn = NeuralNetwork(5, 5, 1)
        else:
            self.nn = args['nn']

        self.crashed = False
        # movementInfo = showWelcomeAnimation()
        # select random player sprites
        randPlayer = random.randint(0, len(settings.PLAYERS_LIST) - 1)

        #print(type(settings.IMAGES['player'][0][0]))

        self.movementInfo = {
            'playery':
            int((settings.SCREENHEIGHT -
                 settings.IMAGES['player'][0][0].get_height()) / 2),
            'basex':
            -10,
            'playerIndexGen':
            cycle([0, 1, 2, 1]),
        }
        self.basex = self.movementInfo['basex']

        self.score = self.playerIndex = self.loopIter = 0
        self.playerIndexGen = self.movementInfo['playerIndexGen']
        self.playerx, self.playery = int(settings.SCREENWIDTH *
                                         0.2), self.movementInfo['playery']

        # player velocity, max velocity, downward accleration, accleration on flap
        self.playerVelY = -9  # player's velocity along Y, default same as playerFlapped
        self.playerMaxVelY = 10  # max vel along Y, max descend speed
        self.playerMinVelY = -8  # min vel along Y, max ascend speed
        self.playerAccY = 1  # players downward accleration
        self.playerRot = 45  # player's rotation
        self.playerVelRot = 3  # angular speed
        self.playerRotThr = 20  # rotation threshold
        self.playerFlapAcc = -9  # players speed on flapping
        self.playerFlapped = False  # True when player flaps
        # hitmask for player'

    def getHitmask(self, image):
        """returns a hitmask using an image's alpha."""
        mask = []
        for x in range(image.get_width()):
            mask.append([])
            for y in range(image.get_height()):
                mask[x].append(bool(image.get_at((x, y))[3]))
        return mask

    def checkcrash(self, upperpipes=None, lowerpipes=None):
        return self.checkCrashhelper(
            {
                'x': self.playerx,
                'y': self.playery,
                'index': self.playerIndex
            }, upperpipes, lowerpipes)

    def checkCrashhelper(self, player, upperPipes, lowerPipes):
        """returns True if player collders with base or pipes."""
        pi = player['index']

        player['w'] = settings.IMAGES['player'][0][0].get_width()
        player['h'] = settings.IMAGES['player'][0][0].get_height()

        # if player crashes into ground
        if player['y'] + player['h'] >= settings.BASEY - 1:
            return [True, True]
        # if player['y'] + player['h'] <= SCREENHEIGHT - 1:
        #     return [True, True]
        else:

            playerRect = pygame.Rect(player['x'], player['y'], player['w'],
                                     player['h'])

            for uPipe, lPipe in zip(upperPipes, lowerPipes):
                # upper and lower pipe rects
                uPipeRect = pygame.Rect(uPipe['x'], uPipe['y'], settings.pipeW,
                                        settings.pipeH)
                lPipeRect = pygame.Rect(lPipe['x'], lPipe['y'], settings.pipeW,
                                        settings.pipeH)

                # player and upper/lower pipe hitmasks
                pHitMask = settings.HITMASKS['player'][pi][0]
                uHitmask = settings.HITMASKS['pipe'][0]
                lHitmask = settings.HITMASKS['pipe'][1]

                # if bird collided with upipe or lpipe
                uCollide = self.pixelCollision(playerRect, uPipeRect, pHitMask,
                                               uHitmask)
                lCollide = self.pixelCollision(playerRect, lPipeRect, pHitMask,
                                               lHitmask)

                if uCollide or lCollide:
                    return [True, False]

        return [False, False]

    def pixelCollision(self, rect1, rect2, hitmask1, hitmask2):
        """Checks if two objects collide and not just their rects"""
        rect = rect1.clip(rect2)

        if rect.width == 0 or rect.height == 0:
            return False

        x1, y1 = rect.x - rect1.x, rect.y - rect1.y
        x2, y2 = rect.x - rect2.x, rect.y - rect2.y

        for x in range(rect.width):
            for y in range(rect.height):
                if hitmask1[x1 + x][y1 + y] and hitmask2[x2 + x][y2 + y]:
                    return True
        return False

    def decide_to_flap(self, args=None):
        input = [
            args['y'], args['pipex'], args['upipey'], args['lpipey'],
            args['vely']
        ]
        hidden_state, output = self.nn.think(np.array(input))
        #print(output)
        if output[0] > 0.5:
            #print("*True")
            return True
        else:
            #print("*False")
            return False

    def think(self, upperPipes, lowerPipes):
        closestpipe = None
        closestpipeindex = 0
        closestDistance = math.inf
        for index, pipe in enumerate(upperPipes):
            pipeMidPos = pipe['x'] + settings.IMAGES['pipe'][0][0].get_width(
            ) / 2
            dist = pipeMidPos - self.playerMidPos
            if dist > 0 and closestDistance > dist:
                closestpipe = pipe
                closestDistance = dist
                closestpipeindex = index

        if len(upperPipes) > 0:
            pipex = closestpipe['x'] + settings.IMAGES['pipe'][0][0].get_width(
            ) / 2 - self.playerMidPos
            upipey = closestpipe['y'] + settings.IMAGES['pipe'][0][
                0].get_height()
            lpipey = lowerPipes[closestpipeindex]['y']
            # print("x : {} y : {} pipex : {} upipey: {} lpipey: {}".format(playerMidPos, playerMidPosy , pipex,upipey,lpipey))

        if self.playery > -2 * settings.IMAGES['player'][0][0].get_height():
            args = {}
            args['y'], args['pipex'], args['lpipey'], args[
                'upipey'] = self.playerMidPosy / settings.SCREENHEIGHT, pipex / settings.SCREENWIDTH, lpipey / settings.SCREENHEIGHT, upipey / settings.SCREENHEIGHT
            args['y'], args['pipex'], args['lpipey'], args[
                'upipey'] = self.playerMidPosy, pipex, lpipey, upipey
            args['vely'] = self.playerVelY
            # print(args)
            self.playerFlapped = self.decide_to_flap(args=args)
            if self.playerFlapped:
                self.playerVelY = self.playerFlapAcc

        # playerIndex basex change
        if (self.loopIter + 1) % 3 == 0:
            playerIndex = next(self.playerIndexGen)
        self.loopIter = (self.loopIter + 1) % 30
        self.basex = -((-self.basex + 100) % settings.baseShift)

        # rotate the player
        if self.playerRot > -90:
            self.playerRot -= self.playerVelRot

        # player's movement
        if self.playerVelY < self.playerMaxVelY and not self.playerFlapped:
            self.playerVelY += self.playerAccY
        if self.playerFlapped:
            self.playerFlapped = False
            # more rotation to cover the threshold (calculated in visible rotation)
            self.playerRot = 45

        self.playerHeight = settings.IMAGES['player'][
            self.playerIndex][0].get_height()
        self.playery += min(self.playerVelY,
                            settings.BASEY - self.playery - self.playerHeight)

    def update_score(self, upperPipes):
        # check for score
        self.playerMidPos = self.playerx + settings.IMAGES['player'][0][
            0].get_width() / 2
        self.playerMidPosy = self.playery
        indexiter = -1
        for pipe in upperPipes:
            pipeMidPos = pipe['x'] + settings.IMAGES['pipe'][0][0].get_width(
            ) / 2
            indexiter += 1
            if pipeMidPos <= self.playerMidPos < pipeMidPos + 4:
                # print("Check 1")
                self.score += 1

    def update_surface(self):
        # Player rotation has a threshold
        self.visibleRot = self.playerRotThr
        if self.playerRot <= self.playerRotThr:
            self.visibleRot = self.playerRot
        self.playerSurface = pygame.transform.rotate(
            settings.IMAGES['player'][self.playerIndex][0], self.visibleRot)
Beispiel #10
0
from NeuralNet import NeuralNetwork

digits = load_digits()

X = digits.data
Y = digits.target

Y_classes = np.zeros((X.shape[0], 10))

for i in range(Y.shape[0]):
    Y_classes[i, Y[i]] = 1

Y = Y_classes

X_train, X_test, y_train, y_test = train_test_split(X, Y)
nn = NeuralNetwork(X_train, y_train, X_train.shape[1], 0.01, 0.1, 1000, 100,
                   100, y_train.shape[1])
# nn.train_neural_network()

# Save theta values.
# nn.save_theta()

# This is executed once we have trained the neural network.
index = random.randrange(0, X_test.shape[0])

nn.load_theta('theta0.csv', 'theta1.csv', 'theta2.csv')

prediction = np.argmax(nn.predict(X_test[index, :].reshape((-1, 1))))
label = np.argmax(y_test[index])

plt.gray()
plt.matshow(X_test[index, :].reshape((8, 8)))
 def __init__(self, args=None):
     if args is None:
         #random.seed(1)
         self.nn = NeuralNetwork(5, 4, 1)
     else:
         self.nn = args['nn']
Beispiel #12
0
def main():
    l1 = NeuronLayer((28, 28), True, False)
    l2 = NeuronLayer((10, 10))
    l3 = NeuronLayer((10,), False, True)
    
    network = NeuralNetwork()
    
    network.add_layer(l1)
    network.add_layer(l2)
    network.add_layer(l3)
    network.connect_layers()
    
    pr = cProfile.Profile()
    pr.enable()
    
    training_images = os.path.abspath(os.path.join(MAIN_MODULE_PATH, "..", "data", "train-images.idx3-ubyte"))
    training_labels = os.path.abspath(os.path.join(MAIN_MODULE_PATH, "..", "data", "train-labels.idx1-ubyte"))
    
    network.load_data(training_images, training_labels)
    
    test_images = os.path.join(MAIN_MODULE_PATH, "..", "data", "t10k-images.idx3-ubyte")
    test_labels = os.path.join(MAIN_MODULE_PATH, "..", "data", "t10k-labels.idx1-ubyte")
    
    network.load_test_data(test_images, test_labels)
    
    network.SGD(0.1, 0.1, 30, 10)
    
    pr.disable()
    pr.print_stats(sort="cumtime")
Beispiel #13
0
def run_neural_nets(url_feature="", attention_url="", url_weight="sp", encoder_length=24, encoder_size=15, decoder_length=8, decoder_size=9, is_test=False, restore=False, model="NN", pre_train=False):
    if model == "NN":
        model = NeuralNetwork(encoder_length=encoder_length, encoder_vector_size=encoder_size, decoder_length=decoder_length, decoder_vector_size=decoder_size)
    elif model == "SAE":
        model = StackAutoEncoder(encoder_length=encoder_length, encoder_vector_size=encoder_size, decoder_length=decoder_length, pre_train=pre_train)
    else:
        model = Adain(encoder_length=encoder_length, encoder_vector_size=encoder_size, decoder_length=decoder_length)
    print('==> initializing models')
    with tf.device('/%s' % p.device):
        model.init_model()
        init = tf.global_variables_initializer()
        saver = tf.train.Saver()
    utils.assert_url(url_feature)

    tconfig = get_gpu_options()
    sum_dir = 'summaries'
    if not utils.check_file(sum_dir):
        os.makedirs(sum_dir)

    train_writer = None
    with tf.Session(config=tconfig) as session:
        if not restore:
            session.run(init)
        else:
            print("==> Reload pre-trained weights")
            saver.restore(session, url_weight)
            url_weight = url_weight.split("/")[-1]
            url_weight = url_weight.rstrip(".weights")
        
        if not is_test:
            suf = time.strftime("%Y.%m.%d_%H.%M")
            train_writer = tf.summary.FileWriter(sum_dir + "/" + url_weight + "_train", session.graph, filename_suffix=suf)
            valid_writer = tf.summary.FileWriter(sum_dir + "/" + url_weight + "_valid", session.graph, filename_suffix=suf)

        print("==> Loading dataset")
        dataset = utils.load_file(url_feature)
        if dataset:
            dataset = np.asarray(dataset, dtype=np.float32)
            lt = len(dataset)
            st = int(lt/2)
            lt = lt - st
            dataset = dataset[st:,:,:]
            train, valid = utils.process_data_grid(lt, p.batch_size, encoder_length, decoder_length, is_test)
            if attention_url:
                attention_data = utils.load_file(attention_url)
            else:
                attention_data = None
            model.set_data(dataset, train, valid, attention_data, session)
            if not is_test:
                best_val_epoch = 0
                best_val_loss = float('inf')
                # best_overall_val_loss = float('inf')
                print('==> starting training')
                for epoch in xrange(p.total_iteration):
                    print('Epoch {}'.format(epoch))
                    start = time.time()
                    train_loss, _ = model.run_epoch(session, train, epoch, train_writer, train_op=model.train_op, train=True)
                    print('Training loss: {}'.format(train_loss))

                    valid_loss, _ = model.run_epoch(session, valid, epoch, valid_writer)
                    print('Validation loss: {}'.format(valid_loss))

                    if valid_loss < best_val_loss:
                        best_val_loss = valid_loss
                        best_val_epoch = epoch
                        print('Saving weights')
                        saver.save(session, 'weights/%s.weights' % url_weight)

                    if (epoch - best_val_epoch) > p.early_stopping:
                        break
                    print('Total time: {}'.format(time.time() - start))
            else:
                # saver.restore(session, url_weight)
                print('==> running model')
                _, preds = model.run_epoch(session, model.train, shuffle=False)
                pt = re.compile("weights/([A-Za-z0-9_.]*).weights")
                name = pt.match(url_weight)
                if name:
                    name_s = name.group(1)
                else:
                    name_s = url_weight
                utils.save_file("test_sp/%s" % name_s, preds)
Beispiel #14
0
def run_neural_nets(dataset,
                    url_weight="sp",
                    encoder_length=24,
                    encoder_size=15,
                    decoder_length=8,
                    decoder_size=9,
                    is_test=False,
                    restore=False,
                    model="NN",
                    pre_train=False,
                    forecast_factor=0):
    tf.reset_default_graph()
    print("training %s with decoder_length = %i" % (model, decoder_length))
    if model == "NN":
        model = NeuralNetwork(encoder_length=encoder_length,
                              encoder_vector_size=encoder_size,
                              decoder_length=decoder_length,
                              decoder_vector_size=decoder_size)
    elif model == "SAE":
        model = StackAutoEncoder(encoder_length=encoder_length,
                                 encoder_vector_size=encoder_size,
                                 decoder_length=decoder_length,
                                 pre_train=pre_train,
                                 forecast_factor=forecast_factor)
    else:
        model = Adain(encoder_length=encoder_length,
                      encoder_vector_size=encoder_size,
                      decoder_length=decoder_length,
                      forecast_factor=forecast_factor)
    print('==> initializing models')
    with tf.device('/%s' % p.device):
        model.init_model()
        init = tf.global_variables_initializer()
        saver = tf.train.Saver()
    tconfig = get_gpu_options()
    with tf.Session(config=tconfig) as session:
        if not restore:
            session.run(init)
        else:
            print("==> Reload pre-trained weights")
            saver.restore(
                session,
                "weights/%s_%ih.weights" % (url_weight, decoder_length))

        print("==> Loading dataset")

        train, valid = utils.process_data_grid(len(dataset), p.batch_size,
                                               encoder_length, decoder_length,
                                               is_test)
        model.set_data(dataset, train, valid, None, session)
        if not is_test:
            best_val_epoch = 0
            best_val_loss = float('inf')
            print('==> starting training')
            for epoch in xrange(p.total_iteration):
                print('Epoch {}'.format(epoch))
                start = time.time()
                train_loss, _ = model.run_epoch(session,
                                                train,
                                                epoch,
                                                None,
                                                train_op=model.train_op,
                                                train=True)
                print('Training loss: {}'.format(train_loss))

                valid_loss, _ = model.run_epoch(session, valid, epoch, None)
                print('Validation loss: {}'.format(valid_loss))

                if valid_loss < best_val_loss:
                    best_val_loss = valid_loss
                    best_val_epoch = epoch
                    print('Saving weights')
                    saver.save(
                        session, 'weights/%s_%ih.weights' %
                        (url_weight, decoder_length))

                if (epoch - best_val_epoch) > p.early_stopping:
                    break
                print('Total time: {}'.format(time.time() - start))
        else:
            # saver.restore(session, url_weight)
            print('==> running model')
            _, preds = model.run_epoch(session,
                                       model.train,
                                       shuffle=False,
                                       stride=2)
            return preds
    return None
Beispiel #15
0
    plt.grid(1)
    plt.xlabel('epochs')
    plt.legend()

    plt.subplot(1, 2, 2)
    plt.plot(range(history['epochs'])[:n],
             history['train_acc'][:n],
             label='train_acc')
    plt.plot(range(history['epochs'])[:n],
             history['test_acc'][:n],
             label='test_acc')
    plt.title('train & test accuracy')
    plt.grid(1)
    plt.xlabel('epochs')
    plt.legend()


#LINEAR PROBLEM
data = datasets.make_blobs(n_samples=1000, centers=2, random_state=2)
X = data[0].T
y = np.expand_dims(data[1], 1).T

neural_net = NeuralNetwork([2, 4, 4, 1], seed=0)
history = neural_net.train(X=X,
                           y=y,
                           batch_size=16,
                           epochs=100,
                           learning_rate=0.4,
                           validation_split=0.2)

plot_history(history)
Beispiel #16
0
import os.path
from sklearn import datasets
from matplotlib import pyplot as plt
from NeuralNet import NeuralNetwork


def generate_halfmoon_dataset(n_samples=200, shuffle=True, noise=0):
    np.random.seed(0)
    X, y = datasets.make_moons(n_samples, shuffle=shuffle, noise=noise)
    return X, y


X_train, y_train = generate_halfmoon_dataset(noise=0.1)
X_test, y_test = generate_halfmoon_dataset(noise=0.1)

nn = NeuralNetwork([2, 4, 2, 1], 0.03)
if (not os.path.isfile("nn_halfmoon_noise_0.1_tanh.npy")):
    train = [X_train, y_train]
    nn.train_network(train, n_epochs=0, threshold=0.001)
    np.save("nn_halfmoon_noise_0.1_tanh", nn.get_network())
else:
    W = np.load("nn_halfmoon_noise_0.1_tanh.npy")
    print("loaded weight matrix W = %s\n" % (W))
    nn.load_network(W)

y_test_test = []
for i in range(len(y_test)):
    y_test_test.append(np.around(np.squeeze(nn.predict(X_test[i]))))

y_train_test = []
for j in range(len(y_test)):
Beispiel #17
0
import numpy as np
from sklearn.datasets import load_iris
from NeuralNet import NeuralNetwork
from NeuralNet import calc_accuracy
from sklearn.model_selection import train_test_split

iris = load_iris()

X = iris.data
Y = iris.target

# Transform Y into the required format.
Y_iris = np.zeros((Y.shape[0], 3))

for i in range(Y_iris.shape[0]):
    Y_iris[i, Y[i]] = 1

# Split the data into training and testing data.
X_train, X_test, y_train, y_test = train_test_split(X, Y_iris, random_state=76)

nn = NeuralNetwork(X_train, y_train, X_train.shape[1], 0.01, 0.1, 1000, 32,
                   y_train.shape[1])
nn.train_neural_network()

print("Training accuracy: " + str(calc_accuracy(nn, X_train, y_train)))
print("Testing accuracy: " + str(calc_accuracy(nn, X_test, y_test)))

nn.save_theta()
Beispiel #18
0
from NeuralNet import NeuralNetwork
from NeuronLayer import NeuronLayer
import cProfile
import os

if __name__ == "__main__":
    l1 = NeuronLayer((28, 28), True, False)
    l2 = NeuronLayer((100, ))
    l3 = NeuronLayer((10, ), False, True)
    network = NeuralNetwork()
    network.add_layer(l1)
    network.add_layer(l2)
    network.add_layer(l3)
    network.connect_layers()
    pr = cProfile.Profile()
    pr.enable()
    network.load_data(os.path.abspath("data/train-images.idx3-ubyte"),
                      os.path.abspath("data/train-labels.idx1-ubyte"))
    network.load_test_data(os.path.abspath("data/t10k-images.idx3-ubyte"),
                           os.path.abspath("data/t10k-labels.idx1-ubyte"))
    network.SGD(0.1, 0.1, 30, 10)
    pr.disable()
    pr.print_stats(sort="cumtime")
Beispiel #19
0
	# Preset Parameters
	"n_inputs" 				:  image_length, 		# Number of input signals
	"n_outputs"				:  1, 					# Number of output signals from the network
	"n_hidden_layers"		:  1,					# Number of hidden layers in the network (0 or 1 for now)
	"n_hiddens"				:  100,   				# Number of nodes per hidden layer
	"activation_functions"	:  [ LReLU_function, sigmoid_function ],		# Activation functions by layer

	# Optional parameters

	"weights_low"			: -0.1,		# Lower bound on initial weight range
	"weights_high"			: 0.1,  	# Upper bound on initial weight range
	"save_trained_network"  : False,	# Save trained weights or not.

	"batch_size"			: 1, 		# 1 for stochastic gradient descent, 0 for gradient descent
}

# Initialization
network = NeuralNetwork( settings )


# Train
network.train( 				fem_images, fem_scores, 	# Trainingset
							ERROR_LIMIT = 1e-3,			# Acceptable error bounds
							learning_rate	= 1e-5,		# Learning Rate
						)

# Alter image

network.alter_image(		fem_images[0], 				# Image to alter
							fem_scores[0]				# Label for initial backprop
						)