import numpy as np
from brain.network import Network
from dataInterface import DataInterface
from errorGraphs import ErrorGraphs


data_interface = DataInterface('Mnist_debug')

param1, param2, data = data_interface.load_old('2017-11-23-143508_error_during_learning.csv')
print("param1", param1)
print("param2", param2)

training_size = param1[2]

testing_size = param1[3]

learning_iterations = param1[0]
test_period = param1[1]

activation_funs = np.array(param2[1])

# net = Network(param['network_layers'], activation_funs, error_fun)

eta = param1[4]
error_graphs = ErrorGraphs('Mnist_debug_graphes',learning_iterations, eta, param2[0], test_period)

print(type(test_period))
# error_graphs.save(data)

error_graphs.save(data)
import numpy as np
from brain.network import Network
import dataLoader
from dataInterface import DataInterface
from ganGame import GanGame, WGanGame
from ganPlot import GanPlot
# import matplotlib.pyplot as plt
# import os

# Récupération des paramètres du config.ini
data_interface = DataInterface()

param_liste = data_interface.read_conf(
    'config.ini', 'GanMnist')  # Lecture du fichier de config dans la
# session [GanMnist]
param_desc_disc_liste = data_interface.read_conf('config_algo_descente.ini',
                                                 'Param de desc du disc')
param_desc_gen_liste = data_interface.read_conf('config_algo_descente.ini',
                                                'Param de desc du gen')

# Initialisation des données pour l'apprentissage
training_images, training_labels, _, _ = dataLoader.load_data(
    param_liste['file'][0], param_liste['dataset'][0])

number_exp = param_liste['number_exp'][0]

for exp in range(number_exp):

    print("Lancement de l'experience n°", exp)

    param = data_interface.extract_param(param_liste, exp)
    def __init__(self,
                 activation_function=Function(),
                 input_size=1,
                 output_size=1,
                 noise_size=0,
                 learning_batch_size=1,
                 param_desc='Parametres de descente',
                 nb_exp=0):
        """
        Creates a fully connected neuron layer

        :param activation_function:
        :param input_size:
        :param output_size:
        :param noise_size:
        :param learning_batch_size:
        :param param_desc:
        :param nb_exp:
        """
        self._input_size = input_size
        self._output_size = output_size
        self._learning_batch_size = learning_batch_size
        self._noise_size = noise_size
        # self._weights = np.transpose(np.random.randn(input_size, output_size))
        self._weights = np.random.randn(output_size, input_size + noise_size)
        self._bias = np.zeros((output_size, 1))  # Vecteur colonne
        # On peut laisser le biais comme un vecteur colonne, car en faire une matrice contenant
        # learning_batch_size fois la même colonne. Lorsque l'on aura besoin du biais dans les
        # calculs, il y aura mathématiquement parlant un problème de dimension (addition vecteur
        # + matrice), cependant numpy gère ça en additionnant le vecteur de biais à chacune des
        # colonnes de la matrice (broadcast)
        self.input = np.zeros((input_size, learning_batch_size))
        self._activation_function = activation_function
        self._activation_function.vectorize()
        self.activation_levels = np.zeros(
            (output_size, learning_batch_size))  # Chaque colonne
        # correspond à une entrée du batch
        self.output = np.zeros(
            (output_size, learning_batch_size))  # Chaque colonne
        # correspond à une entrée du batch

        self.update_weights_value = np.zeros(
            (output_size, input_size + noise_size))
        self.update_bias_value = np.zeros((output_size, 1))

        self.noise_input = np.zeros((noise_size, learning_batch_size))

        # self.update_weights_value = np.zeros((output_size, input_size))

        self.weights_gradients_sum = np.zeros(
            (output_size, input_size + noise_size))
        # self.weights_gradients_sum = np.zeros((output_size, input_size))
        self.bias_gradients_sum = np.zeros((output_size, 1))
        self.weights_moment = np.zeros((output_size, input_size + noise_size))
        # self.weights_moment = np.zeros((output_size, input_size))
        self.bias_moment = np.zeros((output_size, 1))
        self.weights_eta = np.zeros((output_size, input_size + noise_size))
        # self.weights_eta = np.zeros((output_size, input_size))            # need meilleur nom
        self.bias_eta = np.zeros((output_size, 1))  # need meilleur nom

        data_interface = DataInterface()
        param_liste = data_interface.read_conf('config_algo_descente.ini',
                                               param_desc)  # Lecture
        # du fichier de config
        param_liste = data_interface.extract_param(param_liste, nb_exp)
        self.algo_utilise = param_liste['algo_utilise']
        self.eta = param_liste['eta']
        self.momentum = param_liste['momentum']
        self.epsilon = param_liste['epsilon']
        self.gamma = param_liste['gamma']
        self.moment = param_liste['moment']
        self.eta = param_liste['eta']
        self.gamma_1 = param_liste['gamma_1']
        self.gamma_2 = param_liste['gamma_2']
        self.instant = 0
import numpy as np
from brain.network import Network
from dataInterface import DataInterface
from errorGraphs import ErrorGraphs

data_interface = DataInterface('Mnist_debug')

param, data = data_interface.load(
    "2017-11-23-162838_error_during_learning.csv")
print(param)
training_size = param['training_size']

testing_size = param['testing_size']

learning_iterations = param['learning_iterations']
test_period = param['test_period']
randomize_learning_set = param['learning_iterations']

activation_funs = np.array(param['activation_funs'])
error_fun = param['error_fun']

# net = Network(param['network_layers'], activation_funs, error_fun)

eta = param['eta']
error_graphs = ErrorGraphs('Mnist_debug_graphes', learning_iterations, eta,
                           param['network_layers'], test_period)

print(type(test_period))
# error_graphs.save(data)

error_graphs.save(data)
示例#5
0
import numpy as np
from brain.network import Network
import dataLoader
from engine import Engine
from dataInterface import DataInterface
from errorGraphs import ErrorGraphs

data_interface = DataInterface('Mnist_debug')

param = data_interface.read_conf()
data_interface.rename(param['nom_dossier_resultats'])
param_algo_descente = data_interface.read_conf('config_algo_descente.ini',
                                               'Parametres de descente')

# Chargement des données pour l'apprentissage
training_images, training_labels, testing_images, testing_labels = \
    dataLoader.load_data(param['file'], param['dataset'])

# Configuration des images d'entrainement
training_size = param['training_size']

# Configuration des images de test
testing_size = param['testing_size']

# Chargement des paramètres de gestion de l'apprentissage
nb_exp = param['nb_exp']
test_period = param['test_period']
randomize_learning_set = param['randomize_learning_set']

# Chargement des fonctions utilisées
error_fun = param['error_fun']
示例#6
0
import numpy as np
from brain.network import Network
from dataInterface import DataInterface
from errorGraphs import ErrorGraphs
import matplotlib.pyplot as plt
from ganPlot import GanPlot

data_interface = DataInterface(
    'C://Users//Froux//Documents//Projet_Long//Data//GanMnist//Courbes')
param_real, data_real = data_interface.load(
    "2018-01-31-204909_discriminator_real_score.csv")
param_fake, data_fake = data_interface.load(
    "2018-01-31-204909_discriminator_fake_score.csv")

numbers_to_draw = param_fake['numbers_to_draw']

save_folder = param_fake['save_folder']

gan_plot = GanPlot(save_folder, numbers_to_draw)

gan_plot.save_courbes(param_fake, data_real, data_fake)