def train(self):

        self.death = self.load_dead()
        self.recovered = self.load_recovered()
        self.data = self.load_confirmed() - self.recovered - self.death

        size = len(self.data)
        bounds = [(1e-20, .2), (1e-20, .2), (5, size - 5), (1e-20, .2),
                  (1 / 120, 1.), (1e-20, .4), (1e-20, .4), (1e-20, .4),
                  (1e-20, .4), (1e-20, .4)]

        maxiterations = 5500
        f = self.create_lossOdeint()
        de = DE(f, bounds, maxiters=maxiterations)  #,popsize=100)
        i = 0
        with tqdm(total=maxiterations * 1750 * maxiterations / 3500) as pbar:
            for step in de.geniterator():
                idx = step.best_idx
                norm_vector = step.population[idx]
                best_params = de.denormalize([norm_vector])
                pbar.update(i)
                i += 1
        p = best_params[0]

        if self.under:
            f = self.create_lossSub(p)
            bnds = ((.1, 4), (.1, 4), (.1, 4))
            x0 = [0.9, 0.9, 0.9]
            minimizer_kwargs = {"method": "L-BFGS-B", "bounds": bnds}
            optimal = basinhopping(f,
                                   x0,
                                   minimizer_kwargs=minimizer_kwargs,
                                   disp=True,
                                   niter=100)
            p2 = optimal.x
        else:
            p2 = [1, 1, 1]

        p = np.concatenate((p, p2))
        beta0, beta01, startT, beta2, sigma, a, b, c, d, mu, sub, subRec, subDth = p
        print("country {}".format(self.country))
        print("under notifications cases {:.2f}".format(p2[0]))
        print("under notifications recovered {:.2f}".format(p2[1]))
        print("under notifications deaths {:.2f}".format(p2[2]))

        today = datetime.today()
        endDate = today + timedelta(days=self.deltaDay)
        self.end_date = datetime.strftime(endDate, '%-m/%-d/%y')
        self.death = self.load_dead()
        self.recovered = self.load_recovered()
        self.data = self.load_confirmed() - self.recovered - self.death

        new_index, extended_actual, extended_death, extended_recovered, y0, y1, y2, y3, y4, y5 \
                = self.predict(p)

        #prepare dataframe to export
        df = pd.DataFrame(
            {
                'Susceptible': y0,
                'Exposed': y1,
                'Asymptomatic': y2,
                'Infected data': extended_actual,
                'Infected': y3,
                'Recovered': extended_recovered,
                'Predicted Recovered': y4,
                'Death data': extended_death,
                'Predicted Deaths': y5
            },
            index=new_index)

        if self.savedata:
            #save simulation results for comparison and use in another codes/routines
            df.to_pickle('./data/SEAIRDv5_Yabox_' + self.country + '.pkl')
            df.to_csv('./results/data/SEAIRDv5_Yabox_' + self.country + '.csv',
                      sep=",")

        del idx, norm_vector, best_params, df,\
            new_index, extended_actual, extended_death, extended_recovered, y0, y1, y2, y3, y4, y5

        return p
    def train(self):

        self.death = self.load_dead(self.country)
        self.recovered = self.load_recovered(self.country)
        self.data = self.load_confirmed(
            self.country) - self.recovered - self.death

        size = len(self.data)
        bounds = [(1e-12, .2), (1e-12, .2), (5, size - 5), (1e-12, .2),
                  (1 / 160, 0.4), (1 / 160, .4), (1 / 160, .4), (1e-12, .4),
                  (1e-12, .4), (1e-12, .4), (1e-12, .4), (1e-12, .4), (1., 1.),
                  (1., 1.), (1., 1.)]

        maxiterations = 8500
        f=self.create_lossOdeint(self.data, \
            self.death, self.recovered, self.s_0, self.e_0, self.a_0, self.i_0, self.r_0, self.d_0, self.startNCases, \
                 self.weigthCases, self.weigthRecov)
        de = DE(f, bounds, maxiters=maxiterations)  #,popsize=100)
        i = 0
        with tqdm(total=maxiterations * 1750 * maxiterations / 3500) as pbar:
            for step in de.geniterator():
                idx = step.best_idx
                norm_vector = step.population[idx]
                best_params = de.denormalize([norm_vector])
                pbar.update(i)
                i += 1
        p = best_params[0]

        if self.under:

            f=self.create_lossOdeint(self.data, \
                self.death, self.recovered, self.s_0, self.e_0, self.a_0, self.i_0, self.r_0, self.d_0, self.startNCases, \
                     self.weigthCases, self.weigthRecov)

            bnds = ((p[0], p[0]), (p[1], p[1]), (p[2], p[2]), (p[3], p[3]),
                    (p[4], p[4]), (p[5], p[5]), (p[6], p[6]), (p[7], p[7]),
                    (p[8], p[8]), (p[9], p[9]), (p[10], p[10]), (p[11], p[11]),
                    (.5, 1.05), (.5, 1.05), (.5, 1.05))

            x0 = [
                p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8], p[9],
                p[10], p[11], 0.9, 0.9, 0.9
            ]
            minimizer_kwargs = {"method": "L-BFGS-B", "bounds": bnds}
            optimal = basinhopping(f,
                                   x0,
                                   minimizer_kwargs=minimizer_kwargs,
                                   disp=True)

            #parameter list for optimization
            beta0, beta01, startT, beta2, sigma, sigma2, sigma3, gamma, b, gamma2, d, mu, sub, subRec, subDth = optimal.x

            print("country {}".format(self.country))
            print("under notifications cases {:.2f}".format(sub))
            print("under notifications recovered {:.2f}".format(subRec))
            print("under notifications deaths {:.2f}".format(subDth))
            p = optimal.x

        else:
            beta0, beta01, startT, beta2, sigma, sigma2, sigma3, gamma, b, gamma2, d, mu, sub, subRec, subDth = p


        new_index, extended_actual, extended_death, extended_recovered, y0, y1, y2, y3, y4, y5 \
                = self.predict(beta0, beta01, startT, beta2, sigma, sigma2, sigma3, gamma, b, gamma2, d, mu, sub, subRec, subDth, \
                    self.data, self.death, self.recovered, self.country, self.s_0, \
                    self.e_0, self.a_0, self.i_0, self.r_0, self.d_0, self.predict_range)

        #prepare dataframe to export
        df = pd.DataFrame(
            {
                'Susceptible': y0,
                'Exposed': y1,
                'Asymptomatic': y2,
                'Infected data': extended_actual,
                'Infected': y3,
                'Recovered': extended_recovered,
                'Predicted Recovered': y4,
                'Death data': extended_death,
                'Predicted Deaths': y5
            },
            index=new_index)

        if self.savedata:
            #save simulation results for comparison and use in another codes/routines
            df.to_pickle('./data/SEAIRDv5_Yabox_' + self.country + '.pkl')
            df.to_csv('./results/data/SEAIRDv5_Yabox_' + self.country + '.csv',
                      sep=",")

        del idx, norm_vector, best_params, df,\
            new_index, extended_actual, extended_death, extended_recovered, y0, y1, y2, y3, y4, y5

        return p