コード例 #1
0
def cluster_estimation_study(folder_experiment, cluster_dir):
    for dirName, subdirList, fileList in os.walk(folder_experiment):
        biggest_clusters_avg = np.array([])
        n_clusters_avg = np.array([])

        num_robots = "-1"
        rho = -1.0
        alpha = -1.0
        elements = dirName.split("_")
        for e in elements:
            if e.startswith("robots"):
                num_robots = e.split("#")[-1]
            if e.startswith("rho"):
                rho = float(e.split("#")[-1])
            if e.startswith("alpha"):
                alpha = float(e.split("#")[-1])

        #         print(num_robots+' '+str(rho)+' '+str(alpha))
        if num_robots == "-1" or rho == -1.0 or alpha == -1:
            continue
        else:
            print(dirName)
            runs = len([f for f in fileList if f.endswith('position.tsv')])
        #         print(runs)

        [_, df_experiment] = utils.load_pd_positions(dirName, "experiment")
        positions_concatenated = df_experiment.values[:, 1:]  # [robots, times]
        [num_robot, num_times] = positions_concatenated.shape
        positions_concatenated = np.array(
            [x.split(',') for x in positions_concatenated.ravel()],
            dtype=float)
        positions_concatenated = positions_concatenated.reshape(
            num_robot, num_times, 2)
        position_concatenated_split = np.split(positions_concatenated, runs)

        for single_run in position_concatenated_split:
            #         print('single run processing')
            biggest_clusters_per_time, n_clusters = get_connected_components(
                single_run)
            biggest_clusters_avg = np.vstack([
                biggest_clusters_avg, biggest_clusters_per_time
            ]) if biggest_clusters_avg.size else biggest_clusters_per_time
            n_clusters_avg = np.vstack([
                n_clusters_avg, n_clusters
            ]) if n_clusters_avg.size else n_clusters

        biggest_clusters_avg = np.mean(biggest_clusters_avg, axis=0)
        n_clusters_avg = np.mean(n_clusters_avg, axis=0)

        print('Plotting')
        plot_info_clusters(biggest_clusters_avg, n_clusters_avg, str(rho),
                           str(alpha), num_robots, cluster_dir)
コード例 #2
0
def distance_from_origin_distribution(main_folder, folder_experiments,
                                      powerlaw_dir):
    Ncolors = 200
    colormap = plt.cm.viridis  # LinearSegmentedColormap
    #     print("colormap.N", colormap.N)
    Ncolors = min(colormap.N, Ncolors)
    mapcolors = [
        colormap(int(x * colormap.N / Ncolors)) for x in range(Ncolors)
    ]
    #     print(len(mapcolors))

    for dirName, subdirList, fileList in os.walk(main_folder + '/' +
                                                 folder_experiments):

        num_robots = "-1"
        rho = -1.0
        alpha = -1.0
        elements = dirName.split("_")
        for e in elements:
            if e.startswith("robots"):
                num_robots = e.split("#")[-1]
            if (e.startswith("rho")):
                rho = float(e.split("#")[-1])
            if (e.startswith("alpha")):
                alpha = float(e.split("#")[-1])

        if (num_robots == "-1" or rho == -1.0 or alpha == -1):
            continue

        rho_str = str(rho)
        alpha_str = str(alpha)
        #     print("rho", rho_str)
        #     print("alpha", alpha_str)
        #     print(dirName)

        df_experiment = pd.DataFrame()

        [_, df_experiment] = utils.load_pd_positions(dirName, "experiment")

        #     print(number_of_experiments)
        positions_concatenated = df_experiment.values[:, 1:]
        [num_robot, num_times] = positions_concatenated.shape
        positions_concatenated = np.array(
            [x.split(',') for x in positions_concatenated.ravel()],
            dtype=float)
        positions_concatenated = positions_concatenated.reshape(
            num_robot, num_times, 2)
        # print("positions_concatenated.shape", positions_concatenated.shape)

        distances = utils.distance_from_the_origin(positions_concatenated)
        # print("distances.shape", distances.shape)
        fig = plt.figure(figsize=(10, 5), dpi=160)
        plt.xlim((0.001, 100))
        plt.ylim((0.0001, 100))
        for i, d in enumerate(distances):
            fit = powerlaw.Fit(d, xmin=0.00001)
            # print(i)
            fit.plot_pdf(linewidth=2, color=mapcolors[i])
        plt.ylabel('p(x)')
        plt.xlabel('distance from origin')
        plt.title(
            "origin distance distribution with %s robots, alpha=%s, rho=%s" %
            (num_robots, alpha, rho))
        file_name = "powerlaw_%s_rho_%s_alpha_%s.png" % (num_robots, rho,
                                                         alpha)
        plt.savefig(powerlaw_dir + '/' + file_name)
        plt.close(fig)
コード例 #3
0
def evaluate_history_WMSD_and_time_diffusion(main_folder, folder_experiments, baseline_dir, msd_type, b_edges,
                                             result_time_dir, distance_heatmap_dir):
    print("MSD Evaluation and/or Density Maps Generation")

    for dirName, subdirList, fileList in os.walk(main_folder + '/' + folder_experiments):

        # print(dirName)
        num_robots = "-1"
        rho = -1.0
        alpha = -1.0
        elements = dirName.split("_")
        for e in elements:
            if e.startswith("robots"):
                num_robots = e.split("#")[-1]
            if e.startswith("rho"):
                rho = float(e.split("#")[-1])
            if e.startswith("alpha"):
                alpha = float(e.split("#")[-1])

        #         print(num_robots+' '+str(rho)+' '+str(alpha))
        if num_robots == "-1" or rho == -1.0 or alpha == -1:
            continue

        # print("dirName: ", dirName)
        runs = len([f for f in fileList if
                    (os.path.isfile(os.path.join(dirName, f)) and f.endswith('position.tsv'))])
        # print("runs: ", runs)

        rho_str = str(rho)
        alpha_str = str(alpha)
        # print("rho", rho_str)
        # print("alpha", alpha_str)

        total_experiment_wmsd = []
        baseline_experiment_wmsd = []

        #     print(alpha_str)

        folder_baseline = baseline_dir + "alpha#%s_rho#%s_baseline_1800" % (alpha_str, rho_str)
        # if not os.path.isdir(main_folder + '/' + folder_baseline):
        #     print("folder_baseline is not an existing directory")
        #     exit(-1)

        number_of_experiments = 0
        df_experiment = pd.DataFrame()
        df_baseline = pd.DataFrame()

        #         print("W_size=", window_size)
        [number_of_experiments, df_experiment] = utils.load_pd_positions(dirName, "experiment")
        [_, df_baseline] = utils.load_pd_positions(folder_baseline, "baseline")

        #     print(number_of_experiments)
        positions_concatenated = df_experiment.values[:, 1:]
        [num_robot, num_times] = positions_concatenated.shape
        positions_concatenated = np.array([x.split(',') for x in positions_concatenated.ravel()], dtype=float)
        positions_concatenated = positions_concatenated.reshape(num_robot, num_times, 2)
        #         print(positions_concatenated.shape)

        if config.comparison_plots_flag:
            baseline_concatenated = df_baseline.values[:, 1:]
            [num_robot, num_times] = baseline_concatenated.shape
            baseline_concatenated = np.array([x.split(',') for x in baseline_concatenated.ravel()], dtype=float)
            baseline_concatenated = baseline_concatenated.reshape(num_robot, num_times, 2)

            for window_size in range(1, 10):
                w_displacement_array = np.array([])
                base_w_displacement_array = np.array([])

                if msd_type == 'windowed':
                    base_win_disp = utils.window_displacement(baseline_concatenated, window_size)
                    win_disp = utils.window_displacement(positions_concatenated, window_size)
                else:
                    # win_disp = utils.fixed_window_displacement(positions_concatenated, window_size)
                    # base_win_disp = utils.fixed_window_displacement(baseline_concatenated, window_size)
                    win_disp = utils.time_mean_square_displacement(positions_concatenated)
                    base_win_disp = utils.time_mean_square_displacement(baseline_concatenated)

                w_displacement_array = np.vstack(
                    [w_displacement_array, win_disp]) if w_displacement_array.size else win_disp
                base_w_displacement_array = np.vstack(
                    [base_w_displacement_array, base_win_disp]) if base_w_displacement_array.size else base_win_disp
                # mean_wmsd = win_disp.mean()

                total_experiment_wmsd.append(w_displacement_array)
                baseline_experiment_wmsd.append(base_w_displacement_array)

                if msd_type == 'time_msd':
                    break
            # utils.plot_both_wmsd(windowed, baseline_experiment_wmsd, total_experiment_wmsd, alpha_str, rho_str, num_robots,
            #                     title, result_time_dir)
            utils.plot_both_wmsd(baseline_experiment_wmsd, total_experiment_wmsd, alpha_str, rho_str, num_robots,
                                 result_time_dir, msd_type)

        if config.density_maps_flag:
            distances = utils.distance_from_the_origin(positions_concatenated)
            occurrences = utils.get_occurrences(distances, b_edges, runs)

            if not config.open_space_flag and config.density_maps_flag:
                utils.time_plot_histogram(occurrences.T, b_edges[1:], alpha_str, rho_str, num_robots,
                                          distance_heatmap_dir)
コード例 #4
0
def avg_connection_degree_heatmap(folder_experiment,
                                  avg_connection_degree_dir):
    for dirName, subdirList, fileList in os.walk(folder_experiment):

        num_robots = "-1"
        rho = -1.0
        alpha = -1.0
        elements = dirName.split("_")
        for e in elements:
            if e.startswith("robots"):
                num_robots = e.split("#")[-1]
            if (e.startswith("rho")):
                rho = float(e.split("#")[-1])
            if (e.startswith("alpha")):
                alpha = float(e.split("#")[-1])

        #         print(num_robots+' '+str(rho)+' '+str(alpha))
        if (num_robots == "-1" or rho == -1.0 or alpha == -1):
            continue
        else:
            # print(dirName)
            runs = len([f for f in fileList if f.endswith('position.tsv')])
        #         print(runs)

        [_, df_experiment] = utils.load_pd_positions(dirName, "experiment")

        positions_concatenated = df_experiment.values[:, 1:]
        [num_robot, num_times] = positions_concatenated.shape
        positions_concatenated = np.array(
            [x.split(',') for x in positions_concatenated.ravel()],
            dtype=float)
        positions_concatenated = positions_concatenated.reshape(
            num_robot, num_times, 2)
        position_concatenated_split = np.split(positions_concatenated, runs)
        # print("positions_concatenated.shape: ", positions_concatenated.shape)

        # CONNECTIONS
        connection_number_history = np.array([])
        for single_run in position_concatenated_split:
            connection_number_history = np.vstack(
                (connection_number_history, get_connections(single_run)
                 )) if connection_number_history.size else get_connections(
                     single_run)

        # print(connection_number_history.shape)

        # ORIGIN DISTANCE
        origin_distance = utils.distance_from_the_origin(
            positions_concatenated).T

        connection_in_time = np.ones(
            (config.bin_edges.size - 1, origin_distance.shape[1])) * -1

        for idx, distance_t in enumerate(origin_distance.T):
            #     print(distance_t)
            for edge_idx in range(config.bin_edges.size - 1):
                #         print(bin_edges[edge_idx],bin_edges[edge_idx+1])
                #         print("\t",edge_idx)

                if edge_idx < config.bin_edges.size - 1 or not config.open_space_flag:
                    where_index = np.where(
                        np.logical_and(
                            distance_t >= config.bin_edges[edge_idx],
                            distance_t < config.bin_edges[edge_idx + 1]))
                    connection_in_time[edge_idx, idx] = np.mean(
                        connection_number_history[where_index])

                else:
                    where_index = np.where(
                        distance_t >= config.bin_edges[edge_idx])
                    connection_in_time[edge_idx, idx] = np.mean(
                        connection_number_history[where_index])

        connection_heatmap(config.bin_edges, connection_in_time, str(alpha),
                           str(rho), num_robots, avg_connection_degree_dir)
コード例 #5
0
def avg_connection_plot_different_population_sizes(folder_experiment,
                                                   avg_connection_degree_dir):
    for a in config.alpha_array:
        for r in config.rho_array:
            #         print("a",a,"r",r)
            robot_arr = []
            fig = plt.figure(figsize=(20, 10), dpi=80)

            #         for dirName, subdirList, fileList in os.walk(folder_experiment):
            for dirName in natsorted(os.listdir(folder_experiment)):
                dirPath = os.path.join(folder_experiment, dirName)

                num_robots = "-1"
                elements = dirName.split("_")
                for e in elements:
                    if e.startswith("robots"):
                        num_robots = e.split("#")[-1]
                    if e.startswith("rho"):
                        rho = float(e.split("#")[-1])
                    if e.startswith("alpha"):
                        alpha = float(e.split("#")[-1])

                #         print(num_robots+' '+str(rho)+' '+str(alpha))
                #             print(alpha, rho)
                if num_robots == "-1" or rho != r or alpha != a:
                    continue
                else:
                    #                 print(dirName)

                    robot_arr += [int(num_robots)]
                    #                 print(num_robots, int(num_robots))
                    runs = len([
                        f for f in os.listdir(dirPath)
                        if (os.path.isfile(os.path.join(dirPath, f))
                            and f.endswith('position.tsv'))
                    ])
                #         print(runs)

                rho_str = str(rho)
                alpha_str = str(alpha)

                [_, df_experiment
                 ] = utils.load_pd_positions(dirPath, "experiment")

                positions_concatenated = df_experiment.values[:, 1:]
                [num_robot, num_times] = positions_concatenated.shape
                positions_concatenated = np.array(
                    [x.split(',') for x in positions_concatenated.ravel()],
                    dtype=float)
                positions_concatenated = positions_concatenated.reshape(
                    num_robot, num_times, 2)

                print("runs", runs)
                print("positions_concatenated.shape:",
                      positions_concatenated.shape)
                position_concatenated_split = np.split(positions_concatenated,
                                                       runs)

                connection_number_history = np.array([])
                for single_run in position_concatenated_split:
                    connection_number_history = np.vstack(
                        (connection_number_history,
                         get_connections(single_run))
                    ) if connection_number_history.size else get_connections(
                        single_run)

                connection_number_history_mean = np.mean(
                    connection_number_history, axis=0)
                #             print(num_times)
                #             print(connection_number_history_mean.shape)
                #             print(len(robot_arr))
                plt.plot(np.arange(num_times),
                         connection_number_history_mean,
                         linewidth=2,
                         label=num_robots,
                         color=mapcolors[len(robot_arr)])

            plt.title("Average connection with " + r" $\bf{\rho}:$" + rho_str +
                      " and " + r"$\bf{\alpha}:$" + alpha_str)
            plt.ylabel('mean connection link')
            plt.xlabel('time')
            #     plt.legend(loc='lower right')
            if not config.open_space_flag:
                plt.yticks(np.arange(0, 9, 0.5))
            else:
                plt.yticks(np.arange(0, 20, 0.5))
            #     plt.grid(which='minor')
            plt.grid()
            plt.legend(loc=2)  # , bbox_to_anchor=(0.95, 0.5))
            file_name = "average_connection_plot_rho_%s_alpha_%s.png" % (
                rho_str, alpha_str)
            plt.savefig(avg_connection_degree_dir + '/' + file_name)
            plt.close(fig)
コード例 #6
0
def evaluate_WMSD_heatmap(main_folder, folder_experiments, baseline_dir,
                          msd_type, heatmap_dir):
    for window_size in range(1, 10):

        total_dict = dict()
        number_dict = dict()

        for dirName, subdirList, fileList in os.walk(main_folder + '/' +
                                                     folder_experiments):

            num_robots = "-1"
            rho = -1.0
            alpha = -1.0
            elements = dirName.split("_")
            for e in elements:
                if e.startswith("robots"):
                    num_robots = e.split("#")[-1]
                    if num_robots not in total_dict:
                        total_dict[num_robots] = dict()
                        number_dict[num_robots] = dict()

                if (e.startswith("rho")):
                    rho = float(e.split("#")[-1])
                if (e.startswith("alpha")):
                    alpha = float(e.split("#")[-1])

            #     print(str(count) + " : " + dirName)
            if num_robots == "-1" or rho == -1.0 or alpha == -1:
                continue

            rho_str = str(rho)
            alpha_str = str(alpha)
            #     print("rho", rho_str)
            #     print("alpha", alpha_str)
            #     print(dirName)
            if rho_str not in total_dict[num_robots]:
                total_dict[num_robots][rho_str] = dict()
                number_dict[num_robots][rho_str] = dict()
            #         print(total_dict)

            total_experiment_wmsd = []
            baseline_experiment_wmsd = []

            # folder_baseline = "baseline_2020-02-14/2020-02-14_robots#1_alpha#%s_rho#%s_baseline_1800" %(alpha_str, rho_str)
            folder_baseline = baseline_dir + "alpha#%s_rho#%s_baseline_1800" % (
                alpha_str, rho_str)

            number_of_experiments = 0
            df_experiment = pd.DataFrame()
            df_baseline = pd.DataFrame()

            #         print("W_size=", window_size)
            [number_of_experiments,
             df_experiment] = utils.load_pd_positions(dirName, "experiment")
            [_, df_baseline] = utils.load_pd_positions(folder_baseline,
                                                       "baseline")

            #     print(number_of_experiments)
            positions_concatenated = df_experiment.values[:, 1:]
            [num_robot, num_times] = positions_concatenated.shape
            positions_concatenated = np.array(
                [x.split(',') for x in positions_concatenated.ravel()],
                dtype=float)
            positions_concatenated = positions_concatenated.reshape(
                num_robot, num_times, 2)

            baseline_concatenated = df_baseline.values[:, 1:]
            [num_robot, num_times] = baseline_concatenated.shape
            baseline_concatenated = np.array(
                [x.split(',') for x in baseline_concatenated.ravel()],
                dtype=float)
            baseline_concatenated = baseline_concatenated.reshape(
                num_robot, num_times, 2)

            w_displacement_array = np.array([])
            base_w_displacement_array = np.array([])

            if msd_type == 'windowed':
                base_msd = utils.window_displacement(baseline_concatenated,
                                                     window_size)
                msd = utils.window_displacement(positions_concatenated,
                                                window_size)
            elif msd_type == 'fixed':
                msd = utils.fixed_window_displacement(positions_concatenated,
                                                      window_size)
                base_msd = utils.fixed_window_displacement(
                    baseline_concatenated, window_size)
            elif msd_type == 'time_msd':
                msd = utils.time_mean_square_displacement(
                    positions_concatenated)
                base_msd = utils.time_mean_square_displacement(
                    baseline_concatenated)

            w_displacement_array = np.vstack([
                w_displacement_array, msd
            ]) if w_displacement_array.size else msd
            base_w_displacement_array = np.vstack([
                base_w_displacement_array, base_msd
            ]) if base_w_displacement_array.size else base_msd
            mean_wmsd = msd.mean()

            total_dict[num_robots][rho_str][alpha_str] = mean_wmsd
            number_dict[num_robots][rho_str][alpha_str] = number_of_experiments
            total_experiment_wmsd.append(w_displacement_array)
            baseline_experiment_wmsd.append(base_w_displacement_array)

            #         print(heatmap_dir)
            #             print(total_dict)
            total_dict = utils.sort_nested_dict(total_dict)
            utils.plot_heatmap(total_dict, msd_type, window_size, heatmap_dir)

            if msd_type == 'time_msd':
                break