def GraficarLearningCurve(self, id_experiment, id_weigths_1_de_4,
                              id_weigths_2_de_4, id_weigths_3_de_4,
                              id_weigths_4_de_4):

        wr = WeigthsRepo(database_name=self.data_base,
                         id_experiment=id_experiment)

        data_weigths_1_de_4 = wr.GetWeithsInfoById(id_weigths_1_de_4)
        test_error_1_de_4 = data_weigths_1_de_4[13]
        training_error_1_de_4 = data_weigths_1_de_4[8]

        data_weigths_2_de_4 = wr.GetWeithsInfoById(id_weigths_2_de_4)
        test_error_2_de_4 = data_weigths_2_de_4[13]
        training_error_2_de_4 = data_weigths_2_de_4[8]

        data_weigths_3_de_4 = wr.GetWeithsInfoById(id_weigths_3_de_4)
        test_error_3_de_4 = data_weigths_3_de_4[13]
        training_error_3_de_4 = data_weigths_3_de_4[8]

        data_weigths_4_de_4 = wr.GetWeithsInfoById(id_weigths_4_de_4)
        test_error_4_de_4 = data_weigths_4_de_4[13]
        training_error_4_de_4 = data_weigths_4_de_4[8]

        test_x_size = np.asarray([175500, 351000, 526500, 684000], dtype=int)
        test_y_error = np.asarray([
            test_error_1_de_4, test_error_2_de_4, test_error_3_de_4,
            test_error_4_de_4
        ],
                                  dtype=np.float64)

        train_x_size = np.asarray([175500, 351000, 526500, 684000], dtype=int)
        train_y_error = np.asarray([
            training_error_1_de_4, training_error_2_de_4,
            training_error_3_de_4, training_error_4_de_4
        ],
                                   dtype=np.float64)

        my_xticks = [175500, 351000, 526500, 684000]
        plt.xticks(test_x_size, my_xticks)
        plt.xticks(train_x_size, my_xticks)

        plt.plot(
            test_x_size,
            test_y_error,
            'r--',
        )
        plt.plot(train_x_size, train_y_error, 'b--')
        plt.ylabel('Error')
        plt.xlabel('Tamaño training set')

        plt.title('Learning curve(' + str(id_experiment) + ')')
        plt.show()
        return
    def BuildWeigthsErrorAndCost_TestSet(self, noRows_Testset, skipuntilidw,
                                         justOne):
        # Calculo total del costo y error por todos los datos, pero por cada conjunto de pesos generados
        # experiment_repo = Experiments.ExperimentsRepo.ExperimentsRepo(bd, id_experiment)
        # weigths_repo = WeigthsRepo.WeigthsRepo(bd, weigths_path)
        database_relative_path = self.data_base
        id_experiment = self.id_experiment
        wr = WeigthsRepo(database_name=database_relative_path,
                         id_experiment=id_experiment)
        wm = WeightsManager(database_name=database_relative_path,
                            weights_repo=wr)

        weigthsOfExperiment = wm.GetListOfWeightsByIdExperiment(id_experiment)

        print(
            '--------------------------- Test SET -------------------------------------------------'
        )

        print("Calculando Errores en TestSet y costos en Test set")

        for w in weigthsOfExperiment:
            idW = w[0]
            if idW < skipuntilidw:
                continue
            # Ahora cargamos los weights

            iws = wm.LoadWeightsXId(idW)
            random_droput = np.random.RandomState(12345)
            rng_droput = T.shared_randomstreams.RandomStreams(
                random_droput.randint(999899))

            # terminar de instanciar esto
            cnn = CNNGCresc(
                layers_metaData=layers_metaData,
                initWeights=iws,
                srng=rng_droput,
                no_channels_imageInput=1,
                isTraining=0,
                pDropOut=0.7  # antes 0.60
            )

            # exp 9 pDropout=0.65
            logger = LoggerRepo(id_experiment=id_experiment,
                                database_name=database_relative_path)
            experimentsManager = ExperimentsManager(database_relative_path)

            self.experiment = experimentsManager.LoadExpermentById(
                self.id_experiment)

            self.data_set_repo = DataSetRepo(
                list_PKL_files=self.experiment.pkl_test_referenceList,
                batch_size=self.experiment.batch_size,
                superbatch_Size=self.experiment.super_batch_size,
                no_rows=noRows_Testset)
            self.data_set_manager = DataSetManager(
                self.experiment.batch_size, self.experiment.super_batch_size,
                self.data_set_repo)

            validator = Validator_ValTest(
                data_set_manager=self.data_set_manager,
                logger=logger,
                cnn=cnn,
                weightsRepo=wr)

            averageError = validator.CalculateError()
            wm.UpdateTestErrorWeigth(idW, averageError)
            print("--------[Test Set] El error promedio es: " +
                  str(averageError))

            averageCost = validator.CalculateCost()
            wm.UpdateTestCostWeigth(idW, averageCost)
            print("--------[Test Set] El costo promedio es: " +
                  str(averageCost))

            if justOne == True:
                break
        print("End Test :)")
    def BuildResults_ValSet(self, noRows_valset, skipuntilidw, justOne):
        # Calculo total del costo y error por todos los datos, pero por cada conjunto de pesos generados
        #experiment_repo = Experiments.ExperimentsRepo.ExperimentsRepo(bd, id_experiment)
        #weigths_repo = WeigthsRepo.WeigthsRepo(bd, weigths_path)
        database_relative_path = self.data_base
        id_experiment = self.id_experiment
        wr = WeigthsRepo(database_name=database_relative_path,
                         id_experiment=id_experiment)
        wm = WeightsManager(database_name=database_relative_path,
                            weights_repo=wr)

        weigthsOfExperiment = wm.GetListOfWeightsByIdExperiment(id_experiment)

        print(
            '--------------------------- Validation SET -------------------------------------------------'
        )

        print("Calculando Errores en validationSet y costos en validation set")
        csvfile = open(
            r'D:\Gyo\Dev\Thesis\dist2\analisys\DataSet_analisys_resultts.csv',
            'w',
            newline='')
        csvwriter = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_NONE)

        for w in weigthsOfExperiment:
            idW = w[0]
            if idW < skipuntilidw:
                continue
            # Ahora cargamos los weights

            iws = wm.LoadWeightsXId(idW)
            random_droput = np.random.RandomState(12345)
            rng_droput = T.shared_randomstreams.RandomStreams(
                random_droput.randint(999899))

            # terminar de instanciar esto
            cnn = CNNGCresc(
                layers_metaData=layers_metaData,
                initWeights=iws,
                srng=rng_droput,
                no_channels_imageInput=1,
                isTraining=0,
                pDropOut=0.7  # antes 0.60
            )

            # exp 9 pDropout=0.65
            logger = LoggerRepo(id_experiment=id_experiment,
                                database_name=database_relative_path)
            experimentsManager = ExperimentsManager(database_relative_path)

            self.experiment = experimentsManager.LoadExpermentById(
                self.id_experiment)

            self.data_set_repo = DataSetRepo(
                list_PKL_files=self.experiment.pkl_validation_referenceList,
                batch_size=self.experiment.batch_size,
                superbatch_Size=self.experiment.super_batch_size,
                no_rows=noRows_valset)
            self.data_set_manager = DataSetManager(
                self.experiment.batch_size, self.experiment.super_batch_size,
                self.data_set_repo)

            validator = Validator_ValTest(
                data_set_manager=self.data_set_manager,
                logger=logger,
                cnn=cnn,
                weightsRepo=wr,
                removeRandom=True)

            result = validator.CalculateResults()
            rrr = zip(result[0], result[1], result[2])
            for rr in rrr:
                csvwriter.writerow([rr[0], rr[1], rr[2]])
            csvfile.close()

            if justOne == True:
                break

        print("End Validation :)")
from Core.ExperimentsManager import ExperimentsManager
from Core.Predictor import Predictor
from Core.Trainer import Trainer
from Core.WeightsManager import WeightsManager
from IGenericServices.ExperimentsRepo import ExperimentsRepo
from IGenericServices.LoggerRepo import LoggerRepo
from IGenericServices.WeightsRepo import WeigthsRepo
from PIL import Image

database_relative_path = "../BD/FR2.0.db"

idExperiment = 10
idW = 265

#Ahora cargamos los weights
wr = WeigthsRepo(database_name=database_relative_path,
                 id_experiment=idExperiment)
wm = WeightsManager(database_name=database_relative_path, weights_repo=wr)

iws = wm.LoadWeightsXId(idW)
random_droput = np.random.RandomState(12345)
rng_droput = T.shared_randomstreams.RandomStreams(
    random_droput.randint(999899))

#terminar de instanciar esto
cnn = CNNGCresc(layers_metaData=layers_metaData,
                initWeights=iws,
                srng=rng_droput,
                no_channels_imageInput=1,
                isTraining=1,
                pDropOut=0.7)