예제 #1
0
def detect_ground(profile):
    """Automatic detection of ground (end of snowpack).

    :param snowmicropyn.Profile profile: The profile to detect ground in.
    :return: Distance where ground was detected.
    :rtype: float
    """

    force = profile.samples.force
    distance = profile.samples.distance

    ground = distance.iloc[-1]

    if force.max() >= profile.overload:
        i_ol = force.argmax()
        i_threshhold = np.where(
            distance.values >= distance.values[i_ol] - 20)[0][0]
        f_mean = np.mean(force.iloc[0:i_threshhold])
        f_std = np.std(force.iloc[0:i_threshhold])
        threshhold = f_mean + 5 * f_std

        while force.iloc[i_ol] > threshhold:
            i_ol -= 10

        ground = distance.iloc[i_ol]

    log.info('Detected ground at {:.3f} mm in profile {}'.format(
        ground, profile))
    return ground
예제 #2
0
def get_ROP(connection, sku):
    # R= NORMSINV(service level) x Standard Dev of demand for SKU
    z = norm.ppf(0.95, loc=0, scale=1)

    sql = '''select STR_TO_DATE(concat_ws("-",month(transaction.date),year(transaction.date),"01"), "%m-%Y-%d") as monthofsale,sum(quantity)
                    from transaction join transaction_sku on transaction.transaction_id = transaction_sku.transaction_id
                    join product on transaction_sku.sku = product.sku
                    where product.sku='{}'
                    and transaction.reason = 'Sale'
                    group by monthofsale,prod_name
                    order by transaction.date;
    '''.format(sku)
    series = read_sql(sql,
                      con=connection,
                      parse_dates=0,
                      index_col=["monthofsale"])
    sales = series.values
    if len(sales) == 0:
        return 0
    sigma_d = np.std(sales)
    sigma_l = 0.7  #stddev for historical lead time
    mu_d = np.mean(sales)
    mu_l = 1.8
    ROP = (mu_d * mu_l) + (z * (sqrt((mu_l**2 * sigma_d**2) +
                                     (mu_d**2 * sigma_l**2))))

    print(sku, sigma_d)
    print(mu_d)

    return float(ROP)
예제 #3
0
def calculate_fts(fts=[], dates=[]):

    if fts:
        pre_fts = [100]
        new_fts = []
        pre_val = 100
        for index, ft in enumerate(fts[1:]):
            pre_fts.append(pre_val * (ft + 1))
            pre_val = pre_val * (ft + 1)
        pre_fts.reverse()
        for index, ft in enumerate(pre_fts):
            if index < len(pre_fts) - 1:
                val = float(
                    round(
                        float((ft - pre_fts[index + 1]) / pre_fts[index + 1]),
                        5))
                new_fts.append(val)
            else:
                new_fts.append(0.0)
        new_fts.reverse()
        fts = new_fts
        week_ser_fts = Series(data=fts[-7:], index=dates[-7:], name="")
        week_mdd_ser = BasicStats.under_water_series(week_ser_fts)
        week_max_drawdown = min(week_mdd_ser)
        vami = BasicStats.vami_arr(week_ser_fts)
        day_7_history = vami[-1] - 1
        ser_fts = Series(data=fts, index=dates, name="")
        mdd_ser = BasicStats.under_water_series(ser_fts)
        total_max_drawdown = min(mdd_ser)
        vami = BasicStats.vami_arr(ser_fts)
        entire_history = vami[-1] - 1
        week_fts = fts[-7:]
        week_volatility = np.std(week_fts)
        total_volatility = np.std(fts)

        return {
            "week_volatility": "%s%%" % str(round(week_volatility * 100, 3)),
            "week_max_drawdown": "%s%%" % str(round(week_max_drawdown, 3)),
            "total_volatility": "%s%%" % str(round(total_volatility * 100, 3)),
            "total_max_drawdown": "%s%%" % str(round(total_max_drawdown, 3)),
            "entire_history": float(round(entire_history * 100, 4)),
            "day_7_history": float(round(day_7_history * 100, 4))
        }
    else:
        return {}
예제 #4
0
def plot_forecasts(series, forecasts, n_test):
    # plot the entire dataset in blue
    plt.figure()
    plt.plot(series.values, label='实际值')
    # forecasts[0][1] = 360
    # forecasts[0][2] = 330

    xiaxian = []
    shangxian = []
    zhi = 5

    n = len(forecasts[0])
    sigma = np.std(forecasts[0], ddof=1)

    zsigman = zhi * float(sigma) / sqrt(n)
    print('******')
    print(zsigman)
    print('******')
    for i in range(len(forecasts[0])):
        # xiaxian.append(forecasts[0][i] - zhi)
        # shangxian.append(forecasts[0][i] + zhi)
        shangxian.append(forecasts[0][i] + zsigman)
        xiaxian.append(forecasts[0][i] - zsigman)

    # plot the forecasts in red
    off_s = len(series) - n_test - 1
    off_e = off_s + len(forecasts[0]) + 1
    xaxis = [x for x in range(off_s, off_e)]
    yaxis = [series.values[off_s]] + forecasts[0]
    plt.plot(xaxis, yaxis, color='red', label='预测值')

    xaxis1 = [x - 1 for x in range(off_s, off_e)]
    yaxis1 = [series.values[off_s]] + shangxian
    plt.plot(xaxis1, yaxis1, color='y', linestyle='--', label='上限')

    xaxis2 = [x + 1 for x in range(off_s, off_e)]
    yaxis2 = [series.values[off_s]] + xiaxian
    plt.plot(xaxis2, yaxis2, color='g', linestyle='--', label='下限')

    plt.xlabel('时间')
    plt.ylabel('数据值')
    plt.legend()
    # show the plot
    #  plt.show()

    # filename = 'yu.png'
    # if filename is not None:
    #     plt.savefig(filename)
    # else:
    #     plt.show()

    return shangxian, xiaxian
예제 #5
0
def detect_surface(profile):
    """Automatic detection of surface (begin of snowpack).

    :param profile: The profile to detect surface in.
    :return: Distance where surface was detected.
    :rtype: float
    """

    # Cut off ca. 1 mm
    distance = profile.samples.distance.values[250:]
    force = profile.samples.force.values[250:]

    force = downsample(force, 20)
    distance = downsample(distance, 20)

    force = smooth(force, 242)

    y_grad = np.gradient(force)
    y_grad = downsample(y_grad, 3)
    x_grad = downsample(distance, 3)

    max_force = np.amax(force)

    try:
        for i in np.arange(100, x_grad.size):
            std = np.std(y_grad[:i - 1])
            mean = np.mean(y_grad[:i - 1])
            if y_grad[i] >= 5 * std + mean:
                surface = x_grad[i]
                break

        if i == x_grad.size - 1:
            surface = max_force

        log.info('Detected surface at {:.3f} mm in profile {}'.format(
            surface, profile))
        return surface

    except ValueError:
        log.warning('Failed to detect surface')
        return max_force
예제 #6
0
def single_elaboration(total, max):
    total_list_average = []
    total_list_std = []
    total_list_generation = []
    for experiment in total:
        gen = 0
        list_average = []
        list_std = []
        list_generation = []
        for generation in experiment[1]:
            lll = []
            for el in generation:
                lll.append(float(el.replace(",", "")))
            if len(lll) > 100:
                print("Be carefull, more than 100")
            list_average.append(np.average(np.array(lll)))
            list_std.append(np.std(np.array(lll)))
            list_generation.append(gen)
            gen += 1
        total_list_average.append(list_average)
        total_list_std.append(list_std)
        total_list_generation.append(list_generation)

    total_list_average_maybe = []
    total_list_std_maybe = []
    for i in range(len(total_list_average[0])):
        list_appo = []
        list_appo_two = []
        for j in range(len(total_list_average)):
            list_appo.append(total_list_average[j][i])
            list_appo_two.append(total_list_std[j][i])
        total_list_average_maybe.append(np.average(np.array(list_appo)))
        total_list_std_maybe.append(np.average(np.array(list_appo_two)))

    scaled_version = []
    for el in total_list_average_maybe:
        scaled_version.append((((el - 0) * (1 - 0)) / (max - 0)) + 0)
        # scaled_version.append(el)

    return scaled_version, total_list_generation[0]
예제 #7
0
 def _getAnnualReturnStd(self, a_series: Series = Series()) -> float:
     return self.__roundFloat(np.std(a_series) * np.sqrt(252))
 def __init__(self, y_stocks: list):
     self._a_float = 3 * math.log(y_stocks[0].TimeSpan.MonthCount)
     self._a_suffix = y_stocks[0].Column
     self._a_ts = y_stocks[0].TimeSpan
     self._a_length = len(y_stocks)
     iso_weight: float = round(1.0 / len(y_stocks), 3)
     self._stocks = y_stocks
     self._weights = np.array(len(y_stocks) * [iso_weight], dtype=float)
     self._basics = PortfolioBasics(y_stocks, self._a_float, self._legend_place)
     self._stats = PortfolioStats(self._weights, self._basics)
     self._final = PortfolioFinal(y_stocks, self._a_float, self._legend_place)
     print('Volatility\t\t\t\t\t', self._final.Volatility)
     print('Annual Expected Return\t\t', self._final.AnnualExpectedReturn)
     print('Risk Free Rate\t\t\t\t', self._final.RiskFreeRate)
     print('Free 0.005 Sharpe Ratio\t\t', self._final.Free005SharpeRatio)
     print('Kurtosis\n', self._final.KurtosisSeries)
     print('Skewness\n', self._final.SkewnessSeries)
     print('Frequency\n', self._final.Frequency)
     self._final.Plot().show()
     exit(1234)
     self._dataSimpleCorrelation = self._stats.SimpleReturnsNan.corr()
     self._dataSimpleCovariance = self._stats.SimpleReturnsNan.cov()
     self._dataSimpleCovarianceAnnual = self._dataSimpleCovariance * 252
     self._dataSimpleSummary = self._stats.SimpleReturnsNanSummary
     self._dataWeightedReturns = self._stats.SimpleWeightedReturns
     # axis =1 tells pandas we want to add the rows
     self._portfolio_weighted_returns = round(self._dataWeightedReturns.sum(axis=1), 5)
     print('7', self._portfolio_weighted_returns.head())
     print('7', self._stats.SimpleWeightedReturnsSum.head())
     #self._dataWeightedReturns['PORTFOLIOWeighted'] = portfolio_weighted_returns
     portfolio_weighted_returns_mean = round(self._portfolio_weighted_returns.mean(), 5)
     print('port_ret mean', portfolio_weighted_returns_mean)
     print(round(self._stats.SimpleWeightedReturnsSum.mean(), 5))
     portfolio_weighted_returns_std = round(self._portfolio_weighted_returns.std(), 5)
     print('port_ret std', portfolio_weighted_returns_std)
     self._portfolio_weighted_returns_cum: Series = round((self._portfolio_weighted_returns + 1).cumprod(), 5)
     #self._dataWeightedReturns['PORTFOLIOCumulative'] = self._portfolio_weighted_returns_cum
     print('$', self._dataWeightedReturns.head())
     self._portfolio_weighted_returns_geom = round(np.prod(self._portfolio_weighted_returns + 1) ** (252 / self._portfolio_weighted_returns.shape[0]) - 1, 5)
     print('geometric_port_return', self._portfolio_weighted_returns_geom)
     self._portfolio_weighted_annual_std = round(np.std(self._portfolio_weighted_returns) * np.sqrt(252), 5)
     print('port_ret annual', self._portfolio_weighted_annual_std)
     self._portfolio_weighted_sharpe_ratio = round(self._portfolio_weighted_returns_geom / self._portfolio_weighted_annual_std, 5)
     print('port_sharpe_ratio', self._portfolio_weighted_sharpe_ratio)
     print('%', self._stats.Returns.head())
     self._data_returns_avg = self._getDataReturnsAverage(self._stats.Returns)
     print('^', self._data_returns_avg.head())
     daily_log_pct_changes: DataFrame = np.log(self._stats.Returns.pct_change() + 1) #avant portfolio
     daily_log_pct_changes.columns = daily_log_pct_changes.columns + 'LogReturn'
     print('&', daily_log_pct_changes.head())
     daily_log_volatilities: DataFrame = (daily_log_pct_changes.std() * np.sqrt(252)).to_frame()
     daily_log_volatilities.columns = ['Volatility']
     print('*', daily_log_volatilities)
     port_daily_simple_ret: float = round(np.sum(self._stats.SimpleReturnsNan.mean()*self._weights), 5)
     port_weekly_simple_ret: float = round(4.856 * port_daily_simple_ret, 5)
     port_monthly_simple_ret: float = round(21 * port_daily_simple_ret, 5)
     port_quarterly_simple_ret: float = round(63 * port_daily_simple_ret, 5)
     port_yearly_simple_ret: float = round(252 * port_daily_simple_ret, 5)
     print('port_daily_simple_ret', str(100*port_daily_simple_ret) + '%')
     print('port_weekly_simple_ret', str(100*port_weekly_simple_ret) + '%')
     print('port_monthly_simple_ret', str(100*port_monthly_simple_ret) + '%')
     print('port_quarterly_simple_ret', str(100*port_quarterly_simple_ret) + '%')
     print('port_yearly_simple_ret', str(100*port_yearly_simple_ret) + '%')
     self._setPortfolioInfo()
     self._optimizer = PortfolioOptimizer(self._legend_place, self._a_float, self._stats, self._basics.Data)
     self._stock_market_index = SnP500Index('yahoo', "^GSPC", self._a_ts)
     self._linear_reg = PortfolioLinearReg(self._stock_market_index, self._stats.Returns)
     print(f'The portfolio beta is {self._linear_reg.Beta}, for each 1% of index portfolio will move {self._linear_reg.Beta}%')
     print('The portfolio alpha is ', self._linear_reg.Alpha)
     print('_', self._basics.DataLogReturns.head())
     cov_mat_annual = self._basics.DataLogReturns.cov() * 252
     print('-', cov_mat_annual)
def evaluate_forecasts(test, forecasts, n_lag, n_seq, shangxian, xiaxian):
    global time, value
    mse = []
    for i in range(n_seq):
        actual = [row[i] for row in test]
        predicted = [forecast[i] for forecast in forecasts]
        rmse = sqrt(mean_squared_error(actual, predicted))
        #rmse=abs(actual-predicted)
        mse.append(rmse)
        print('t+%d RMSE: %f' % ((i + 1), rmse))

    beta = 0.8
    threshold = []
    la = 4
    for i in range(len(mse) - 1):  #平滑处理
        mse[i + 1] = mse[i] * beta + (1 - beta) * mse[i + 1]

    lb = 5
    i = 0
    z = 0.2
    while i * la + lb < len(mse):
        #lb>la
        t = np.mean(
            mse[i * la:(i + 1) * la],
            dtype=float) + z * np.std(mse[i * la:(i + 1) * la], dtype=float)
        for j in range(i * la, (i + 1) * la):
            threshold.append(t)
        i = i + 1
    t = np.mean(
        mse[i * la:(i + 1) * la],
        dtype=float) + z * np.std(mse[i * la:(i + 1) * la], dtype=float)
    for j in range(i * la, len(mse)):
        threshold.append(t)
    plt.figure()
    plt.step(list(
        range(
            len(series.values) - n_test + 5,
            5 + len(series.values) - n_test + len(threshold))),
             threshold,
             label='threshold',
             color="#8dd3c7",
             where="pre",
             lw=2)
    #  plt.plot(list(range(len(series.values) - n_test+5, 5+len(series.values) - n_test + len(threshold))),threshold,label='threshold')
    plt.plot(list(
        range(
            len(series.values) - n_test + 5,
            5 + len(series.values) - n_test + len(mse))),
             mse,
             label='mse')
    plt.legend()
    filename = '4.png'

    if filename is not None:
        plt.savefig(filename)
    else:
        plt.show()

    for i in range(len(forecasts[0])):
        if shangxian[i] < series.values[i + len(series.values) - n_test]:
            time = i + len(series.values) - n_test + 8
            value = abs(shangxian[i] -
                        (series.values[i + len(series.values) - n_test])) - 6
            # print("遥测数据出现异常状态的时间发生在:%d"%(i + len(series.values) - n_test))
            #print("异常值偏离正常值%.5f"%(abs(shangxian[i]-(series.values[i+len(series.values) - n_test]))))
            break
        if xiaxian[i] > series.values[i + len(series.values) - n_test] + 1:
            time = i + len(series.values) - n_test + 8
            value = abs(xiaxian[i] -
                        (series.values[i + len(series.values) - n_test])) - 8
            # print("遥测数据出现异常状态的时间发生在:%d"%(i + len(series.values) - n_test))

            #print("异常值偏离正常值%.5f"%(abs(xiaxian[i]-(series.values[i+len(series.values) - n_test]))))

            break

    return time, value
예제 #10
0
def analise_distances(path, number, bigOrSmall):
    path = path + "/" + str(number) + "/"
    names = []
    for i in os.listdir(path):
        if bigOrSmall:
            name_to_check = "trajectory-generatedPoints-"
        else:
            name_to_check = "trajectory-generate-aSs-"
        # if os.path.isfile(os.path.join(path, i)) and 'trajectory-generatedPoints-' in i and ".zip" in i:
        if os.path.isfile(os.path.join(
                path, i)) and name_to_check in i and ".zip" in i:
            names.append(i)

    names = sorted_nicely(names)

    numb = 0

    total_distances_angle = []
    total_distances = []

    logging.debug("Analysing Trajectories...")
    for i in tqdm.tqdm(range(len(names))):
        name = names[i]

        trajectories_label, json_file = rean_info(path + name)

        # ----------- distance bearings

        # real points
        lat_real = []
        lng_real = []
        # generated points
        lat_generated = []
        lng_generated = []

        label_real = []
        label_generated = []
        label_trajectory = []

        # last point trajectory
        lat_last = []
        lng_last = []
        for labels in trajectories_label:
            for el in json_file[labels]["real"]:
                if el[0] not in lat_real:
                    lat_real.append(el[0])
                    lng_real.append(el[1])
                    label_real.append(json_file[labels]["id"])

            for el in json_file[labels]["generated"]:
                lat_generated.append(el[0])
                lng_generated.append(el[1])
                label_generated.append(json_file[labels]["id"])

            appo_lat = []
            appo_lgn = []
            for el in json_file[labels]["trajectory"]:
                appo_lat.append(el[0])
                appo_lgn.append(el[1])

            lat_last.append(appo_lat[len(appo_lat) - 1])
            lng_last.append(appo_lgn[len(appo_lgn) - 1])
            label_trajectory.append(json_file[labels]["id"])

        distance_per_trajectories = {}

        # for the trajectories I have
        for i in range(len(label_real)):

            # compute real bearing for the current trajectory
            real_bearing = compute_bearing(lat_last[i], lng_last[i],
                                           lat_real[i], lng_real[i])

            # find index of the point generated corresponding to this trajectory
            index = [
                j for j, x in enumerate(label_generated) if x == label_real[i]
            ]

            index_last_point = [
                j for j, x in enumerate(label_trajectory) if x == label_real[i]
            ]

            distances = []
            for ind in index:
                bearing = compute_bearing(lat_last[index_last_point[0]],
                                          lng_last[index_last_point[0]],
                                          lat_generated[ind],
                                          lng_generated[ind])
                distances.append(fabs(bearing - real_bearing))
            array = np.array(distances)

            distance_per_trajectories.update({
                i:
                (np.max(array), np.min(array), np.mean(array), np.std(array),
                 np.median(array))
            })
        total_distances_angle.append(distance_per_trajectories)

        # ----------- distance points

        # real points
        lat_real = []
        lng_real = []
        # generated points
        lat_generated = []
        lng_generated = []

        label_real = []
        label_generated = []
        for labels in trajectories_label:
            for el in json_file[labels]["real"]:
                if el[0] not in lat_real:
                    lat_real.append(el[0])
                    lng_real.append(el[1])
                    label_real.append(json_file[labels]["id"])

            for el in json_file[labels]["generated"]:
                if el[0] not in lat_generated:
                    lat_generated.append(el[0])
                    lng_generated.append(el[1])
                    label_generated.append(json_file[labels]["id"])

        distance_per_trajectories = {}
        # now for every trajectory compute the distance of the generated distance
        for i in range(len(label_real)):
            index = [
                j for j, x in enumerate(label_generated) if x == label_real[i]
            ]
            distances = []
            for ind in index:
                distances.append(
                    float(
                        compute_distance(lat_real[i], lng_real[i],
                                         lat_generated[ind],
                                         lng_generated[ind])))

            array = np.array(distances)
            distance_per_trajectories.update({
                i:
                (np.max(array), np.min(array), np.mean(array), np.std(array),
                 np.median(array))
            })
        total_distances.append(distance_per_trajectories)

        numb += 1
    return total_distances, total_distances_angle
예제 #11
0
    def run(self):
        folders = how_many_fatherFolder(self.path)
        folders = [s for s in folders if not re.search('txt', s)]
        folders = [s for s in folders if not re.search('jpg', s)]
        folders = [s for s in folders if not re.search('png', s)]

        for experiemnt in folders:
            logging.debug("Folder under analysis -> " + str(experiemnt))
            second_path = self.path + experiemnt + "/"
            res = how_many_folder(second_path)
            folders = [s for s in folders if not re.search('txt', s)]
            folders = [s for s in folders if not re.search('jpg', s)]
            folders = [s for s in folders if not re.search('png', s)]
            num_folder = len(res)
            logging.debug("Folder to analise -> " + str(num_folder))

            for el in res:
                logging.debug("Folder under analysis -> " + str(el))
                path_here = second_path + str(el) + "/"

                names = []
                for i in os.listdir(path_here):
                    if os.path.isfile(
                            os.path.join(path_here, i)
                    ) and 'trajectory-generate-aSs-' in i and ".zip" in i:
                        names.append(i)

                names = sorted_nicely(names)

                pops = Populations()
                # find the trajectories ID and Points
                trajectories = self.read_trajectory_info(path_here +
                                                         "trajectory.zip")
                for tra in trajectories:
                    pops.add_population(Population(tra))

                # analysing the fitness
                logging.debug("Analysing the fitness...")
                max_agent, max_classifier = self.find_max_values_fitness(
                    path_here)
                agent_generations_info, classifier_generations_info = self.read_fitness(
                    path_here, max_agent, max_classifier)

                x = np.arange(len(agent_generations_info))
                y_agent = []
                std_agent = []
                for element in agent_generations_info:
                    y_agent.append(element.mean)
                    std_agent.append(element.std)
                y_classifier = []
                std_classifier = []
                for element in classifier_generations_info:
                    y_classifier.append(element.mean)
                    std_classifier.append(element.std)

                # print fitnes
                self.print_fitnes(x, y_agent, std_agent, y_classifier,
                                  std_classifier, path_here)

                total_distances = []
                total_distances_msd = []
                std_distances = []
                last_generations_values = []
                logging.debug("Analysing Trajectories...")
                for i in tqdm.tqdm(range(len(names))):
                    name = names[i]

                    # obtain info from the file
                    individuals = self.read_info(path_here + name)

                    if i == len(names) - 1:
                        for ind in individuals:
                            for el in ind.array:
                                last_generations_values.append(el)

                    msds = []
                    for ind in individuals:
                        msds.append(ind.MSD)
                    total_distances.append(np.mean(np.array(msds)))
                    std_distances.append(np.std(np.array(msds)))

                    # store the msd per trajectory
                    distance_per_trajectories = {}
                    for j in range(number_of_trajectories):
                        distances = []
                        for indiv in individuals:
                            if indiv.trajectoryID == pops.get_population(
                                    j).tra.trajectoryID:
                                distances.append(indiv.MSD)

                        array = np.array(distances)
                        MSD = (np.sum(array)) / len(array)
                        distance_per_trajectories.update({j: MSD})
                    total_distances_msd.append(distance_per_trajectories)

                # print graph msd per trajectory
                self.print_graph_msd_per_trajectory(total_distances_msd,
                                                    path_here)

                # print graph total msd
                self.print_graph_msd_total(total_distance, std_distances,
                                           path_here)

                # save the last value
                array = np.array(last_generations_values)
                MSD = (np.sum(array)) / len(array)

                with open(path_here + "/MSD.txt", "w") as text_file:
                    text_file.write(str(MSD))
예제 #12
0
 def __init__(self, vector, max_value):
     self.vector = np.array(vector)
     self.vector = (self.vector - 0) / (max_value - 0)
     self.mean = np.mean(self.vector)
     self.std = np.std(self.vector)
예제 #13
0
def lin_fit(x, y):
    m, c = np.polyfit(x, y, 1)
    y_fit = x * m + c
    std = np.std(y - y_fit)

    return x, y_fit, m, c, std
예제 #14
0
            index_last_point = [
                j for j, x in enumerate(label_trajectory) if x == label_real[i]
            ]

            distances = []
            for ind in index:
                bearing = computeBearing(lat_last[index_last_point[0]],
                                         lng_last[index_last_point[0]],
                                         lat_generated[ind],
                                         lng_generated[ind])
                distances.append(fabs(bearing - real_bearing))
            array = np.array(distances)

            distance_per_trajectories.update({
                i:
                (np.max(array), np.min(array), np.mean(array), np.std(array))
            })
        total_distances.append(distance_per_trajectories)
        # # real points
        # lat_real = []
        # lng_real = []
        # for el in json_file[trajectories_label[0]]["real"]:
        #     lat_real.append(el[0])
        #     lng_real.append(el[1])
        #
        # # generated points
        # lat_generated = []
        # lng_generated = []
        # for label in trajectories_label:
        #     for el in json_file[label]["generated"]:
        #         lat_generated.append(el[0])
                'segment': segment,
                'accuracy': acc_test,
                'f1-score': f1sc_test
            },
            ignore_index=True)

        print(" accuracy train " + str(acc_train) + ' vs ' + str(acc_test) +
              ' test')
        print("f1-score train " + str(f1sc_train) + ' vs ' + str(f1sc_test) +
              ' test')

    df_all.to_csv(csv_file, mode='a', header=False)
    df_all = df_all.iloc[0:0]

    acc_avr = np.mean(np.array(accuracies))
    acc_std = np.std(np.array(accuracies))

    f1_avr = np.mean(np.array(f1scores))
    f1_std = np.std(np.array(f1scores))
    df_results = df_results.append(
        {
            'bursts': bursts[ind_interval],
            'channel': channel,
            'segment': segment,
            'acc avr': acc_avr,
            'acc std_dev': acc_std,
            'f1-sc avr': f1_avr,
            'f1-sc std_dev': f1_std
        },
        ignore_index=True)