Esempio n. 1
0
def heatmap(eta_vals, lmbd_vals, X_test_sc, X_train_sc, Y_train_onehot, y_test,
            epochs, batch_size, n_hidden_neurons, n_categories):

    accuracy_array = np.zeros((len(eta_vals), len(lmbd_vals)), dtype=object)
    # grid search
    for i, eta in enumerate(eta_vals):
        for j, lmbd in enumerate(lmbd_vals):
            dnn = NN(X_train_sc,
                     Y_train_onehot,
                     eta=eta,
                     lmbd=lmbd,
                     epochs=epochs,
                     batch_size=batch_size,
                     n_hidden_neurons=n_hidden_neurons,
                     n_categories=n_categories)
            dnn.train()

            test_predict = dnn.predict(X_test_sc)
            accuracy_array[i][j] = accuracy_score(y_test, test_predict)

            print("Learning rate             = ", eta)
            print("Lambda                    = ", lmbd)
            print("Accuracy score on test set: ",
                  accuracy_score(y_test, test_predict))
            print()

    np.save('acc_score', accuracy_array)
    np.save('eta_values', eta_vals)
    np.save('lambda_values', lmbd_vals)

    P.map()
Esempio n. 2
0
    def __init__(self, map, x, y, sim, model=None, train=False):
        Bot.__init__(self, map, x, y, sim)

        if model:
            self.model = model
        else:
            self.model = NN(33, [10, 8])

        # attribue le type au bot
        self.type = "B"  # => bot normal

        self.train = train

        if train:  # si le bot est en mode training
            # lui donne de l'energie en plus pour avoir un peux plus de temps pour train
            self.incr_energy(100)
            # lui donne le type training
            self.type = "T"

        self.cellNum = 0

        self.e_fruit = 4  # energy donne en absorbant un fruit
        self.e_meat = 20  # energy donne en absorbant de la viande
        self.c_move = 1  # cout de bouger
        self.c_rien = 1  # cout de rien faire
Esempio n. 3
0
class Gene:
    def __init__(self, input_len, hidden_layers, output_len):
        self.input_len = input_len
        self.hidden_layers = hidden_layers
        self.output_len = output_len
        self.nn = NN(input_len, hidden_layers, output_len)

    def breed(self, gene):  # weight-wise? # layer-wise? # average everything?
        # print(gene)

        # technically not the best way of doing this because it will first init the weights and thus wastes time...
        new_gene = Gene(self.input_len, self.hidden_layers, self.output_len)

        for i in np.arange(len(self.nn.weights)):
            new_gene.nn.weights[i] = (self.nn.weights[i] +
                                      gene.nn.weights[i]) / 2

        for i in np.arange(len(self.nn.biases)):
            new_gene.nn.biases[i] = (self.nn.biases[i] + gene.nn.biases[i]) / 2

        return new_gene

    def mutate(self):  # add epsilon? # pick a weight and re-init?
        pass

    def action(self, state):
        return self.nn.predict(state)

    def save(self, file_name):
        self.nn.save(file_name)

    def load(self, file_name):
        self.nn.load(file_name)
Esempio n. 4
0
def train(config):
    '''The config object required for training is in train_config.py'''

    config['hyperparameters']['seed'] = np.random.randint(1e5)

    if config['dataset_name'] == 'mnist':
        dataset = load_mnist(config['mnist_sample_shape'])

    elif config['dataset_name'] == 'cifar10':
        dataset = load_cifar10(flatten_input=(config['nn_type'] == 'mlp'))

    else:
        raise ValueError(err_msg)

    nn = NN(data=dataset, **config['hyperparameters'])

    start = timeit.default_timer()
    
    train_logs = nn.train_loop(eval_each_epoch=config['eval_while_training'])
    
    elapsed = round(timeit.default_timer()-start,4)
    print(f'training runtime: {elapsed} seconds')

    exp = utils.ExperimentResults()
    exp.save(train_logs, 'train_logs')
    exp.save(config, 'config')
    exp.save(nn, 'neural_network')
    exp.save(elapsed,'training_runtime')

    test_results = nn.evaluate()
    exp.save(test_results, 'test_results')
Esempio n. 5
0
    def __init__(self, z_dim, g_dim, x_dim, h_dim, lr=0.01, dropout=0.0):
        #结构参数
        self.z_dim = z_dim
        self.g_dim = g_dim
        self.x_dim = x_dim
        self.h_dim = h_dim

        #学习参数
        assert (lr > 0)
        self.lr = float(lr)
        self.dropout = min(max(dropout, 0.0), 1.0)
        ##要为生成器和判别器分别分配优化器
        self.generator_optimizer = Adam(alpha=self.lr)
        #self.discriminator_optimizer = Adam(alpha=self.lr)

        #网络参数
        self.generator = NN(input_dim=z_dim,
                            hidden_dim=g_dim,
                            output_dim=x_dim,
                            lr=self.lr * 0.5,
                            dropout=self.dropout)

        self.generator.mode = 'binary'

        self.discriminator = NN(input_dim=x_dim,
                                hidden_dim=h_dim,
                                output_dim=1,
                                lr=self.lr * 0.1,
                                dropout=self.dropout)
Esempio n. 6
0
def mnist_net():
    mndata = MNIST("mnist", return_type="numpy")
    print("Loading images...")
    images, labels = mndata.load_training()
    features = images.T / 255
    z = np.zeros((60000, 10))
    z[np.arange(60000), labels] = 1
    Y = z.T
    nn = NN([784, 100, 30, 10])
    nn.set_hyperparameters(learning_rate=0.5)
    t = time()
    nn.initialize_parameters()
    print("Start Training...")
    nn.minimize({"features": features, "labels": Y}, 20)
    print("Finish Training.")
    print("Training time: {0} seconds".format(round(time() - t, 2)))
    print("Start Testing...")
    t = time()
    test_images, test_labels = mndata.load_testing()
    test_features = test_images.T / 255
    z = np.zeros((10000, 10))
    z[np.arange(10000), test_labels] = 1
    test_Y = z.T
    print("Testing accuracy: {}".format(
        round(nn.evaluate({
            "features": test_features,
            "labels": test_Y
        }), 4)))
    print("Testing time: {0} seconds".format(round(time() - t, 2)))
Esempio n. 7
0
 def __init__(self):
     info_df = pd.read_csv('hdb-carpark-information-with-lat-lng.csv')
     self.nn = NN()
     df = self.nn.getCurrentAvailability()
     self.info_df = pd.merge(df,
                             info_df,
                             left_on=['carpark_number'],
                             right_on=['carpark_number'])
     self.parkings = self.info_df[[
         'carpark_number', 'lat', 'lng', 'night_parking', 'free_parking',
         'car_park_type', 'type_of_parking_system'
     ]]
     self.db = firestore.client()
Esempio n. 8
0
    def __init__(self, x_dim, h_dim, z_dim, g_dim, lr=0.01, dropout=0.0):
        #结构参数
        self.z_dim = z_dim
        self.h_dim = h_dim
        self.g_dim = g_dim
        self.x_dim = x_dim

        #学习参数
        assert (lr > 0)
        self.lr = float(lr)
        self.dropout = min(max(dropout, 0.0), 1.0)

        #网络参数
        self.encoder = SimpleNN(input_dim=x_dim,
                                hidden_dim=h_dim,
                                output_dim=2 * z_dim,
                                lr=self.lr,
                                dropout=self.dropout)

        self.encoder_optimizer = Adam(alpha=self.lr)

        self.decoder = NN(input_dim=z_dim,
                          hidden_dim=g_dim,
                          output_dim=x_dim,
                          lr=self.lr,
                          dropout=self.dropout)

        self.decoder.mode = 'binary'
Esempio n. 9
0
 def __init__(self, map, x, y, sim, model=None):
     self.map = map
     self.x = x
     self.y = y
     self.incr_energy(10)
     self.nb_steps = 0
     self.sim = sim
     self.model = NN(13, [20, 8])
     self.type = "A"
Esempio n. 10
0
    def getFilteredParkings(self, lat, lon, night_parking, free_parking,
                            car_park_type, type_of_parking_system):
        nn = NN()
        df = nn.getCurrentAvailability()
        df = pd.merge(df,
                      self.parkings,
                      left_on=['carpark_number'],
                      right_on=['carpark_number'])

        id = self.DistCalc(lat, lon)

        # filtparkings=df[(df['night_parking']==night_parking) & (df['free_parking']==free_parking) &(df['car_park_type'].isin(car_park_type)) & (df['type_of_parking_system']==type_of_parking_system)]
        # filteredparkings=filtparkings[filtparkings['carpark_number'].isin(id)]
        # print(filteredparkings)
        # res=filteredparkings[['carpark_number','lat','lng','lots_available']].set_index('carpark_number').T.to_json()

        filtparkings = df
        if night_parking != None:
            filtparkings = filtparkings[(
                filtparkings['night_parking'] == night_parking)]
        if free_parking != None:
            filtparkings = filtparkings[(
                filtparkings['free_parking'] == free_parking)]
        if car_park_type != None:
            filtparkings = filtparkings[(
                filtparkings['car_park_type'].isin(car_park_type))]
        if type_of_parking_system != None:
            filtparkings = filtparkings[(filtparkings['type_of_parking_system']
                                         == type_of_parking_system)]
        filteredparkings = filtparkings[filtparkings['carpark_number'].isin(
            id)]
        #filtparkings=df[(df['night_parking']==req.get("night_parking")) & (df['free_parking']==req.get("free_parking")) & (df['car_park_type'].isin(req.get("car_park_type"))) & (df['type_of_parking_system']==req.get("type_of_parking_system"))]
        print(filteredparkings)
        res = filteredparkings[[
            'carpark_number', 'lat', 'lng', 'lots_available'
        ]].set_index('carpark_number').T.to_json()

        return res
Esempio n. 11
0
def train(hyperparameters, sample_shape, nn_type):

    hyperparameters['seed'] = np.random.randint(1e5)

    cifar10 = load_cifar10(flatten_input=(
        nn_type == 'mlp'))  # because mlp only processes 1D input
    nn = NN(data=cifar10, **hyperparameters)

    perform_evaluation = False
    if nn_type == 'mlp':
        perform_evaluation = True
    elif hyperparameters['n_epochs'] == 1:
        perform_evaluation = True

    train_logs = nn.train_loop(eval_each_epoch=perform_evaluation)

    exp = utils.ExperimentResults()
    exp.save(train_logs, 'train_logs')
    exp.save(hyperparameters, 'hyperparams')
    exp.save(nn, 'neural_network')

    test_results = nn.evaluate()
    exp.save(test_results, 'test_results')
Esempio n. 12
0
def xor_net():
    a = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0]])
    features = a[:, 0:2].T
    labels = a[:, 2].reshape((1, 4))
    z = np.zeros((4, 2))
    z[np.arange(4), labels] = 1
    labels = z.T
    nn = NN([2, 4, 3, 2])
    nn.set_hyperparameters(batch_size=4, learning_rate=0.75)
    t = time()
    nn.initialize_parameters()
    print("Start Training...")
    nn.minimize({"features": features, "labels": labels}, 10000)
    print("Finish Training.")
    print("Training time: {0} seconds".format(round(time() - t, 2)))
    print("Start Testing...")
    t = time()
    print("Testing accuracy: {}".format(
        round(nn.evaluate({
            "features": features,
            "labels": labels
        }), 4)))
    print("Testing time: {0} seconds".format(round(time() - t, 2)))
Esempio n. 13
0
def gen_NN(genes=[]):
    # # Inputs
    # input = Input(shape=(7,))

    # x = Dense(10, activation='tanh')(input)
    # x = Dense(10, activation='tanh')(x)

    # predictions = Dense(2, activation='tanh')(x)

    # model = Model(inputs=input, outputs=predictions)

    # if len(genes) > 0:
    #     model.set_weights(genes)

    # model._make_predict_function()

    model = NN()

    return model
Esempio n. 14
0
    def __init__(self,
                 z_dim,
                 g_dim,
                 x_dim,
                 h_dim,
                 n_classes,
                 embedding_dim=8,
                 lr=0.01,
                 dropout=0.0):

        self.n_classes = n_classes
        new_z_dim = z_dim + embedding_dim
        super(AcGAN, self).__init__(new_z_dim, g_dim, x_dim, h_dim, lr,
                                    dropout)

        self.classifier = NN(input_dim=x_dim,
                             hidden_dim=h_dim,
                             output_dim=n_classes,
                             lr=self.lr,
                             dropout=self.dropout)
        #将条件变量嵌入到一个向量空间,并保持为常量
        self.c_vectors = normal_sample(dim=embedding_dim, n=n_classes)
        self.embedding_dim = embedding_dim
Esempio n. 15
0
 def __init__(self):
     # appropriately initialize your neural network
     self.model = NN([2, 1])
Esempio n. 16
0
#!/bin/python


import numpy as np
from neural_network import NN






xor_states = np.array( [ [0,0], [0,1], [1,1], [1,0] ] )
xor_outputs = np.array( [ 0, 1, 0, 1 ] )


net = NN()

net.load()
net.useThresholds = 1
net.useSigmoid = 1


for inpt, out in zip( xor_states, xor_outputs ):
    print net.run( inpt )
    print out
    print "\n\n"    
Esempio n. 17
0
#from __future__ import print_function as print
from neural_network import NeuralNetwork as NN
import torch
from logic_gates import AND, OR, NOT, XOR

if __name__ == "__main__":

    nn = NN([2, 1])
    and_gate = AND()
    or_gate = OR()
    not_gate = NOT()
    xor_gate = XOR()

    print(and_gate(False, False))
    print(and_gate(False, True))
    print(and_gate(True, False))
    print(and_gate(True, True))
    print(" ")

    print(or_gate(False, False))
    print(or_gate(False, True))
    print(or_gate(True, False))
    print(or_gate(True, True))
    print("")

    print(not_gate(True))
    print(not_gate(False))
    print("")

    print(xor_gate(False, False))
    print(xor_gate(False, True))
Esempio n. 18
0
 def __init__(self, input_len, hidden_layers, output_len):
     self.input_len = input_len
     self.hidden_layers = hidden_layers
     self.output_len = output_len
     self.nn = NN(input_len, hidden_layers, output_len)
Esempio n. 19
0
# Iris Mutli-Class Classifier
"""
NN class takes in a model, input training data and target data
as arguments.
The model argument is a Python list that defines the layers in a
neural network. The length of the defines how 'deep' the neural_network is,
Each element of the list defines the number of neurons at the given layer.

ex: model = [4, 5, 7, 3] |
This model is a neural network with 4 layers
it has 4 input neurons or features and 3 output neurons(classes)
The 1st hidden layer has 5 neurons; the 2nd hidden layer has 7 neurons
"""

NN1 = NN(model_1, X_train_1, Y_train_1)
NN1.activation_function('tanh')
NN1.output_function('softmax')
NN1.loss_function('cross_entropy')

# Breast Cancer Logistic Regression
NN2 = NN(model_2, X_train_2, Y_train_2)
NN2.activation_function('tanh')
NN2.output_function('sigmoid')
NN2.loss_function('log_loss')

# Train neural networks
NN1.train()
NN1.predict()
NN2.train()
NN2.predict()
Esempio n. 20
0
class Parking:
    def __init__(self):
        info_df = pd.read_csv('hdb-carpark-information-with-lat-lng.csv')
        self.nn = NN()
        df = self.nn.getCurrentAvailability()
        self.info_df = pd.merge(df,
                                info_df,
                                left_on=['carpark_number'],
                                right_on=['carpark_number'])
        self.parkings = self.info_df[[
            'carpark_number', 'lat', 'lng', 'night_parking', 'free_parking',
            'car_park_type', 'type_of_parking_system'
        ]]
        self.db = firestore.client()

    def initializeLocations(self):

        #for every record in the df, store it in the parking_info collection using the parking ID as document ID
        parking = self.info_df.set_index('carpark_number').T.to_json()
        parking = json.loads(parking)
        #sending the array to firestored
        data = {u'parking': parking}
        self.db.collection(u'parkingsinfo').document('parkings').set(data)

        #return true after you're done

        return (True, )

    def updateCurrentAvailability(self):
        next_call = time.time()
        while True:
            print("getting current availability")
            parking = self.nn.getCurrentAvailability()
            parking = self.info_df.set_index('carpark_number')[[
                'lots_available'
            ]].T.to_json()
            parking = json.loads(parking)
            data = {u'current_availability': parking}
            self.db.collection(u'parking_info').document('parkings').set(data)
            print("pushed to db")
            next_call += 60
            print("sleeping")
            time.sleep(next_call - time.time())

    def DistCalc(self, latitude, longtitude):
        R = 6373.0
        location = []
        lattemp = latitude
        lontemp = longtitude
        j = 0
        for i in range(len(self.parkings.lat)):
            lat1 = radians(self.parkings.lat[i])
            lon1 = radians(self.parkings.lng[i])
            lat2 = radians(lattemp)
            lon2 = radians(lontemp)
            dlon = lon2 - lon1
            dlat = lat2 - lat1
            a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
            c = 2 * atan2(sqrt(a), sqrt(1 - a))
            distance = R * c
            if (distance < 1.0000):
                location.append(self.parkings.carpark_number[i])
                print(location[j])
                j += 1
        return location

    def getFilteredParkings(self, lat, lon, night_parking, free_parking,
                            car_park_type, type_of_parking_system):
        nn = NN()
        df = nn.getCurrentAvailability()
        df = pd.merge(df,
                      self.parkings,
                      left_on=['carpark_number'],
                      right_on=['carpark_number'])

        id = self.DistCalc(lat, lon)

        # filtparkings=df[(df['night_parking']==night_parking) & (df['free_parking']==free_parking) &(df['car_park_type'].isin(car_park_type)) & (df['type_of_parking_system']==type_of_parking_system)]
        # filteredparkings=filtparkings[filtparkings['carpark_number'].isin(id)]
        # print(filteredparkings)
        # res=filteredparkings[['carpark_number','lat','lng','lots_available']].set_index('carpark_number').T.to_json()

        filtparkings = df
        if night_parking != None:
            filtparkings = filtparkings[(
                filtparkings['night_parking'] == night_parking)]
        if free_parking != None:
            filtparkings = filtparkings[(
                filtparkings['free_parking'] == free_parking)]
        if car_park_type != None:
            filtparkings = filtparkings[(
                filtparkings['car_park_type'].isin(car_park_type))]
        if type_of_parking_system != None:
            filtparkings = filtparkings[(filtparkings['type_of_parking_system']
                                         == type_of_parking_system)]
        filteredparkings = filtparkings[filtparkings['carpark_number'].isin(
            id)]
        #filtparkings=df[(df['night_parking']==req.get("night_parking")) & (df['free_parking']==req.get("free_parking")) & (df['car_park_type'].isin(req.get("car_park_type"))) & (df['type_of_parking_system']==req.get("type_of_parking_system"))]
        print(filteredparkings)
        res = filteredparkings[[
            'carpark_number', 'lat', 'lng', 'lots_available'
        ]].set_index('carpark_number').T.to_json()

        return res

    def generateSequencePrediction(self):
        next_call = time.time()
        while True:
            df_list = self.nn.getSequenceFromCurrentTime()
            self.df_list_for_nn = self.nn.modifyDataframeListForNN(df_list)
            predictions = {}
            #generate prediction
            for df in self.df_list_for_nn:
                model = self.models[df.carpark_number.iloc[0]]
                prediction = self.nn.generatePrediction(model, df)
                prediction = prediction[0].tolist()
                predictions[df.carpark_number.iloc[0]] = prediction
                print("[", df.carpark_number.iloc[0], "] = ", prediction)

            #push to firebase
            data = {u'predictions': predictions}
            self.db.collection(u'parking_predictions').document(
                'predictions').set(data)
            next_call += 60 * 15  # every 15 minutes
            time.sleep(next_call - time.time())

    def loadModels(self):
        #read all the models from the folder
        #use this:
        self.models = {}
        directory = r"./trained_models/"
        for filename in os.listdir(directory):
            #load model
            model = load_model(directory + filename,
                               custom_objects=None,
                               compile=True,
                               options=None)
            #get carpark id from filename
            filename_split = filename.split('_')
            carpark_id_split = filename_split[-1].split('.')
            carpark_id = carpark_id_split[0]

            #add model to dict with car_park id as key
            self.models[carpark_id] = model
            print("[" + carpark_id + "]=\t")
            print(self.models[carpark_id])
# plotting single number from training
def show_example(data, x):
    img_array = np.asfarray(data[x].split(',')[1:]).reshape((28,28))
    plt.imshow(img_array, cmap='Greys', interpolation='None')
    plt.show()

# Number of input, hidden, and output nodes
input_nodes = 784
hidden_nodes = 100
output_nodes = 10

# Learning rate
learning_rate = 0.1

# Initialise Neural network
N = NN(input_nodes, hidden_nodes, output_nodes, learning_rate)

# TRAINING
epochs = 2

for epoch in range(epochs):
    for sample in training_data:
        sample_values = sample.split(',')
        # scale and shift input
        inputs = (np.asfarray(sample_values[1:]) / 255.0 * 0.99) + 0.01
        # create target values (0.01 -> 0.99)
        targets = np.zeros(output_nodes) + 0.01
        # assign desired target values
        targets[int(sample_values[0])] = 0.99
        N.train(inputs, targets)
Esempio n. 22
0
import tensorflow as tf
from neural_network import NN
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()

y_train = tf.one_hot(y_train, depth=10).numpy()
y_test = tf.one_hot(y_test, depth=10).numpy()

x_train = x_train.reshape(x_train.shape[0], -1).T
y_train = y_train.reshape(y_train.shape[0], -1).T
dev_X = x_test.reshape(x_test.shape[0], -1).T
dev_Y = y_test.reshape(y_test.shape[0], -1).T
nn = NN([784, 10, 40, 20, 10])
x = tf.Variable(x_train, dtype='float32')
y = tf.Variable(y_train, dtype='float32')
nn.model(x, y, dev_X, dev_Y, num_epochs=100, learning_rate=0.001, minibatch_size=512, beta_1=0.9, lamda=0,
         softmax=True)
Esempio n. 23
0
        n_categories = 2
        eta_vals = np.logspace(-7, -4, 7)
        lmbd_vals = np.logspace(-7, -1, 7)

        # Make heatmap of the accuracy score with eta_vals and lmbd_vals
        # Commented out to save time
        #func.heatmap(eta_vals, lmbd_vals, X_test_sc, X_train_sc, Y_train_onehot, y_test, epochs, batch_size, n_hidden_neurons, n_categories)

        # Use best values from heatmap
        eta_final = 1e-4
        lmbd_final = 1e-2

        dnn_f = NN(X_train_sc,
                   Y_train_onehot,
                   eta=eta_final,
                   lmbd=lmbd_final,
                   epochs=epochs,
                   batch_size=batch_size,
                   n_hidden_neurons=n_hidden_neurons,
                   n_categories=n_categories)
        dnn_f.train()

        y_predict = dnn_f.predict(X_test_sc)
        model = y_predict

        # Make Cumulative gain plot
        P.Cumulative_gain_plot(y_test, model)

        # Creating a Confusion matrix using pandas and pandas dataframe
        CM = func.Create_ConfusionMatrix(model, y_test, plot=True)
        CM_DataFrame = func.ConfusionMatrix_DataFrame(
            CM, labels=['pay', 'default'])
Esempio n. 24
0
class NN_bot(Bot):
    def __init__(self, map, x, y, sim, model=None, train=False):
        Bot.__init__(self, map, x, y, sim)

        if model:
            self.model = model
        else:
            self.model = NN(33, [10, 8])

        # attribue le type au bot
        self.type = "B"  # => bot normal

        self.train = train

        if train:  # si le bot est en mode training
            # lui donne de l'energie en plus pour avoir un peux plus de temps pour train
            self.incr_energy(100)
            # lui donne le type training
            self.type = "T"

        self.cellNum = 0

        self.e_fruit = 4  # energy donne en absorbant un fruit
        self.e_meat = 20  # energy donne en absorbant de la viande
        self.c_move = 1  # cout de bouger
        self.c_rien = 1  # cout de rien faire

        #TODO rajouter et gerer les variables pour la mitose

    def g_inputs(self):
        # TODO mettre en log les energis
        inputs = np.array([])
        inputs = np.append(inputs, np.cbrt(self.g_energy()))
        inputs = np.append(inputs, np.cbrt(self.g_nb_fruit_on_pos()))

        # des inputs qui servent juste a donner des variables qui bouclent pour
        # permettre au bot un peu de changement dans son comportement et lui
        # permettre d'avoir des actions un peu cyclique
        inputs = np.append(inputs, np.cbrt(self.sim.current_nb_step % 2))
        inputs = np.append(inputs, np.cbrt(self.sim.current_nb_step % 10))
        inputs = np.append(inputs, np.cbrt(self.sim.current_nb_step % 50))

        inputs = np.append(inputs, np.cbrt(self.g_info_sum_on_dir(21, 6, [0, 1])))
        inputs = np.append(inputs, np.cbrt(self.g_info_sum_on_dir(21, 6, [0, -1])))
        inputs = np.append(inputs, np.cbrt(self.g_info_sum_on_dir(21, 6, [1, 0])))
        inputs = np.append(inputs, np.cbrt(self.g_info_sum_on_dir(21, 6, [-1, 0])))

        inputs = np.append(inputs, np.cbrt(self.g_info_sum_on_dir(21, 5, [0, 1])))
        inputs = np.append(inputs, np.cbrt(self.g_info_sum_on_dir(21, 5, [0, -1])))
        inputs = np.append(inputs, np.cbrt(self.g_info_sum_on_dir(21, 5, [1, 0])))
        inputs = np.append(inputs, np.cbrt(self.g_info_sum_on_dir(21, 5, [-1, 0])))

        inputs = np.append(inputs, np.cbrt(self.g_info_sum_on_dir(21, 4, [0, 1])))
        inputs = np.append(inputs, np.cbrt(self.g_info_sum_on_dir(21, 4, [0, -1])))
        inputs = np.append(inputs, np.cbrt(self.g_info_sum_on_dir(21, 4, [1, 0])))
        inputs = np.append(inputs, np.cbrt(self.g_info_sum_on_dir(21, 4, [-1, 0])))

        inputs = np.append(inputs, np.cbrt(self.g_info_sum_on_dir(21, 3, [0, 1])))
        inputs = np.append(inputs, np.cbrt(self.g_info_sum_on_dir(21, 3, [0, -1])))
        inputs = np.append(inputs, np.cbrt(self.g_info_sum_on_dir(21, 3, [1, 0])))
        inputs = np.append(inputs, np.cbrt(self.g_info_sum_on_dir(21, 3, [-1, 0])))

        inputs = np.append(inputs, np.cbrt(self.g_info_sum_on_dir(21, 2, [0, 1])))
        inputs = np.append(inputs, np.cbrt(self.g_info_sum_on_dir(21, 2, [0, -1])))
        inputs = np.append(inputs, np.cbrt(self.g_info_sum_on_dir(21, 2, [1, 0])))
        inputs = np.append(inputs, np.cbrt(self.g_info_sum_on_dir(21, 2, [-1, 0])))

        inputs = np.append(inputs, np.cbrt(self.g_info_sum_on_dir(21, 1, [0, 1])))
        inputs = np.append(inputs, np.cbrt(self.g_info_sum_on_dir(21, 1, [0, -1])))
        inputs = np.append(inputs, np.cbrt(self.g_info_sum_on_dir(21, 1, [1, 0])))
        inputs = np.append(inputs, np.cbrt(self.g_info_sum_on_dir(21, 1, [-1, 0])))

        inputs = np.append(inputs, np.cbrt(self.g_info_sum_on_dir(21, 0, [0, 1])))
        inputs = np.append(inputs, np.cbrt(self.g_info_sum_on_dir(21, 0, [0, -1])))
        inputs = np.append(inputs, np.cbrt(self.g_info_sum_on_dir(21, 0, [1, 0])))
        inputs = np.append(inputs, np.cbrt(self.g_info_sum_on_dir(21, 0, [-1, 0])))

        # inputs = np.append(inputs, self.g_nb_fruit_on_dir([0, 1], 3))
        # inputs = np.append(inputs, self.g_nb_fruit_on_dir([0, -1], 3))
        # inputs = np.append(inputs, self.g_nb_fruit_on_dir([1, 0], 3))
        # inputs = np.append(inputs, self.g_nb_fruit_on_dir([-1, 0], 3))

        # inputs = np.append(inputs, self.g_bot_on_dir([1, 0]))
        # inputs = np.append(inputs, self.g_bot_on_dir([-1, 0]))
        # inputs = np.append(inputs, self.g_bot_on_dir([0, 1]))
        # inputs = np.append(inputs, self.g_bot_on_dir([0, -1]))

        # inputs = np.append(inputs, self.g_bot_on_dir([1, 0], 3))
        # inputs = np.append(inputs, self.g_bot_on_dir([-1, 0], 3))
        # inputs = np.append(inputs, self.g_bot_on_dir([0, 1]), 3)
        # inputs = np.append(inputs, self.g_bot_on_dir([0, -1], 3))

        return inputs

    def predict(self):
        inputs = self.g_inputs()
        prediction = self.model.predict(inputs)

        action = np.argmax(prediction)

        if self.train:
            albot_actions = self.albot_predict()
            self.model.fit_on_one(self.g_inputs(), albot_actions, 0.001)
            action = albot_actions

        return action

    def albot_predict(self):
        actions = [0, 0, 0, 0, 0, 0, 0, 0]

        if self.g_energy() > 300:
            pass
            # actions[6] = 1000
        if self.g_nb_fruit_on_pos() > 0:
            actions[7] = 10000

        actions[1] = self.g_nb_fruit_on_dir([0, -1], 3) - (
            self.g_bot_on_dir([0, -1]) * 0000
        )
        actions[2] = self.g_nb_fruit_on_dir([0, 1], 3) - (
            self.g_bot_on_dir([0, 1]) * 0000
        )
        actions[3] = self.g_nb_fruit_on_dir([-1, 0], 3) - (
            self.g_bot_on_dir([-1, 0]) * 0000
        )
        actions[4] = self.g_nb_fruit_on_dir([1, 0], 3) - (
            self.g_bot_on_dir([1, 0]) * 0000
        )

        return actions

    def mitose(self):
        if self.g_cd_repro() == 0:

            self.incr_cd_repro(20)
            self.incr_energy(-5)  # loose of energy to make the child
            # energy that will be transfered to the child
            energy_to_child = 5

            x = -1
            y = -1

            # TODO: faire une fonction pour rendre ca plus propre
            if self.y - 2 > 0:
                if self.map.cellLibre(self.x, self.y-1) == 0:
                    x = self.x
                    y = self.y - 1

            elif self.y + 2 < self.map.height - 1:
                if self.map.cellLibre(self.x, self.y+1) == 0:
                    x = self.x
                    y = self.y + 1

            elif self.x - 2 > 0:
                if self.map.cellLibre(self.x-1, self.y) == 0:
                    x = self.x - 1
                    y = self.y

            elif self.x + 2 < self.map.height - 1:
                if self.map.cellLibre(self.x+1, self.y) == 0:
                    x = self.x + 1
                    y = self.y
            else:
                # no place to put the child
                self.incr_energy(energy_to_child)
                self.incr_energy(-1)

            if x == -1:
                pass
            else:
                new_model = genetic.mutate(self.model.weights, 1, 1)
                new_bot = NN_bot(self.map, x, y, self.sim, new_model)
                new_bot.s_energy(energy_to_child)
                self.sim.add_bots([new_bot])