def evaluate(environment, num_robots, num_runs, is_simulation, comm_model_path, granularity, mission_duration, plot_comm_map, fixed_robot, test_set_size, log_folder): """Create pickle files containing data to be plotted. It processes the dat files containing the collected data from the robots. It outputs a pickle file, containing errors, variances, and GP comm_map, according to run, environment, robot. Args: environment (str): name of environment used for saving data. num_robots (int): number of robots. num_runs (int): number of repeated experiments. is_simulation(bool): True, if data generated from simulation. comm_model_path (str): path to the XML file containing communication model parameters. granularity (int): second for each epoch to plot data. mission_duration (int): seconds for total mission. plot_comm_map (bool): If True, plot communication map. fixed_robot (tuple of float): x,y of robot in meters. filter (bool): If True, consider ony information found by Pairing TSP algorithm test_set_size (int): number of samples in the test set log_folder (str): folder where logs are saved. """ log_folder = os.path.expanduser(log_folder) # Reading of the communication parameters necessary to produce correct test set. comm_model = CommModel(comm_model_path) # Reading environment image. environment_yaml_path = os.getcwd() + '/envs/' + environment + '.yaml' sel_pol = gflags.FLAGS.point_selection_policy im_array, resolution = read_environment(environment_yaml_path) im_array = specular(im_array) test_type = gflags.FLAGS.test_type # Generation of test set. if is_simulation: if test_type != "discarded_set": random.seed(0) np.random.seed(0) dimX, dimY, XTest, YTest = create_test_set( im_array, comm_model, test_set_size, False if test_type == "random" else True, resolution) runs = range(num_runs) errors = {} variances_all = {} times_all = {} for set in sets: #if set == "complete": continue print "Set: ", set filter = True if set == "filtered" else False errors[set] = {} variances_all[set] = {} times_all[set] = {} for run in runs: if run != 0 and plot_comm_map: break parsed = [] if test_type == "discarded_set" and set == "filtered": parsed_test_set = [] for robot in range(num_robots): dataset_filename = log_folder + str(run) + '_' + environment + \ '_' + str(robot) + '_' + str(num_robots) + \ '_' + str(int(comm_model.COMM_RANGE)) + \ '_' + sel_pol + '.dat' parsed += parse_dataset( dataset_filename, filter, True if test_type == "normalized" else False) if test_type == "discarded_set" and set == "filtered": parsed_test_set += parse_dataset(dataset_filename, False, False) all_signal_data = create_dataset(parsed, set) if test_type == "discarded_set": dimX, dimY, XTest, YTest = create_test( im_array, parsed if set != "filtered" else parsed_test_set, resolution) print "Set length: " + str(len(all_signal_data)) if not plot_comm_map and \ len(all_signal_data) >= 10000: #with 5 runs and 10k samples for run, 64GB of RAM/swap memory are not enough print "Too many samples: the GP training would be too heavy. Discarding this set." break errors[set][run] = [] variances_all[set][run] = [] times_all[set][run] = [] all_secs = range(granularity, mission_duration + 1, granularity) for secs in all_secs: cur_signal_data = [] for datum in all_signal_data: if datum.timestep <= secs: cur_signal_data.append(datum) print "Run: " + str(run) + " - number of data: ", len( cur_signal_data) start = time.time() comm_map = GPmodel(dimX, dimY, comm_model.COMM_RANGE, False) comm_map.update_model(cur_signal_data) end = time.time() predictions_all = comm_map.predict(XTest) predictions = map(lambda x: x[0], predictions_all) variances = map(lambda x: x[1], predictions_all) std_devs = map(lambda x: math.sqrt(x), variances) conf_95 = map(lambda x: 1.96 * x, std_devs) errors[set][run].append( (mean_squared_error(YTest, predictions), math.sqrt(mean_squared_error(YTest, predictions)))) variances_all[set][run].append( (np.mean(variances), np.std(variances), np.mean(std_devs), np.std(std_devs), np.mean(conf_95), np.std(conf_95))) times_all[set][run].append(end - start) if plot_comm_map: print "Drawing the Communication map..." if set == "complete": extension = '.png' elif set == "pre_processing": extension = '_P.png' else: extension = '_C.png' communication_figures = plot_prediction_from_xy_center_3d( im_array, fixed_robot, comm_map, dimX, dimY, comm_model, resolution, True, cur_signal_data) communication_map_figure_filename = os.getcwd() + '/figs/COMM_MAP_' + str(num_robots) + \ '_' + environment + '_' + str(int(comm_model.COMM_RANGE)) + \ '_' + str(run) + '_' + str(secs) + '_' + sel_pol + extension communication_figures[0].savefig( communication_map_figure_filename, bbox_inches='tight') print "Done." if len(communication_figures) > 1: print "Drawing the Variance map..." communication_map_figure_filename = os.getcwd() + '/figs/COMM_MAP_' + str(num_robots) + \ '_' + environment + '_' + str(int(comm_model.COMM_RANGE)) + \ '_' + str(run) + '_' + str(secs) + '_' + sel_pol + \ '_' + 'VAR' + extension communication_figures[1].savefig( communication_map_figure_filename, bbox_inches='tight') print "Done." print '----------------------------------------------------------------------------' print errors print '----------------------------------------------------------------------------' print variances_all print '----------------------------------------------------------------------------' print times_all print '----------------------------------------------------------------------------' f = open( log_folder + str(num_robots) + '_' + environment + '_' + str(int(comm_model.COMM_RANGE)) + '_' + gflags.FLAGS.point_selection_policy + '.dat', "wb") pickle.dump((errors, variances_all, times_all), f) f.close()
if k < 0 or k >= I: return False for w in xrange(jj - border, jj + border + 1): if w < 0 or w >= J: return False if (im_array[k][w] == 0): return False return True if __name__ == '__main__': os.chdir("/home/andrea/catkin_ws/src/strategy/") #for gflags argv = gflags.FLAGS(sys.argv) communication_model_path = gflags.FLAGS.communication_model_path comm_model = CommModel(communication_model_path) env_name = (os.path.splitext(gflags.FLAGS.phys_graph)[0]).split("_")[0] environment_yaml_path = os.getcwd() + '/envs/' + env_name + '.yaml' im_array = read_environment(environment_yaml_path) image_array = im_array resize_factor = 0.1 dimX = np.size(im_array, 1) * resize_factor dimY = np.size(im_array, 0) * resize_factor I = np.size(im_array, 0) J = np.size(im_array, 1) os.chdir("/home/andrea/Desktop/hbss/scriptGraph/scripts") #PARSE file with points of the graph POINTS = []
def plot(environment, num_robots, comm_model_path, granularity, mission_duration): """Plot graphs about MSE and traveled distance. Args: environment (str): name of environment used for saving data. num_robots (int): number of robots. comm_model_path (str): path to the XML file containing communication model parameters. granularity (int): second for each epoch to plot data. mission_duration (int): seconds for total mission. """ comm_model = CommModel(comm_model_path) sel_pol = gflags.FLAGS.point_selection_policy f = open(gflags.FLAGS.log_folder + str(num_robots) + '_' + environment + \ '_' + str(int(comm_model.COMM_RANGE)) + '_' + sel_pol + '.dat', "rb") errors, variances_all, times_all = pickle.load(f) f.close() x = range(granularity, mission_duration + 1, granularity) x = map(lambda x: x / 60.0, x) no_complete = False mse_avg = {} rmse_avg = {} mse_yerr = {} rmse_yerr = {} var_avg = {} rvar_avg = {} var_yerr = {} rvar_yerr = {} conf_avg = {} conf_yerr = {} times_avg = {} times_yerr = {} for set in sets: #if set == "complete": continue if set == "complete" and len( errors[set]) == 0: #if "complete" is empty no_complete = True continue #print "Set: " + set mse_avg[set] = [] rmse_avg[set] = [] mse_yerr[set] = [] rmse_yerr[set] = [] var_avg[set] = [] rvar_avg[set] = [] var_yerr[set] = [] rvar_yerr[set] = [] conf_avg[set] = [] conf_yerr[set] = [] times_avg[set] = [] times_yerr[set] = [] for stamp in range(len(x)): cur_mse_avg = 0.0 cur_rmse_avg = 0.0 cur_mse_values = [] cur_rmse_values = [] cur_var_avg = 0.0 cur_rvar_avg = 0.0 cur_conf_avg = 0.0 cur_var_values = [] cur_rvar_values = [] cur_conf_values = [] cur_times_avg = 0.0 cur_times_values = [] for run in errors[set].keys(): cur_mse_avg += errors[set][run][stamp][0] cur_rmse_avg += errors[set][run][stamp][1] cur_mse_values.append(errors[set][run][stamp][0]) cur_rmse_values.append(errors[set][run][stamp][1]) cur_var_avg += variances_all[set][run][stamp][0] cur_rvar_avg += variances_all[set][run][stamp][2] cur_conf_avg += variances_all[set][run][stamp][4] cur_var_values.append(variances_all[set][run][stamp][0]) cur_rvar_values.append(variances_all[set][run][stamp][2]) cur_conf_values.append(variances_all[set][run][stamp][4]) cur_times_avg += times_all[set][run][stamp] cur_times_values.append(times_all[set][run][stamp]) cur_mse_avg = cur_mse_avg / len(errors[set].keys()) cur_rmse_avg = cur_rmse_avg / len(errors[set].keys()) cur_var_avg = cur_var_avg / len(errors[set].keys()) cur_conf_avg = cur_conf_avg / len(errors[set].keys()) cur_rvar_avg = cur_rvar_avg / len(errors[set].keys()) cur_times_avg = cur_times_avg / len(errors[set].keys()) mse_avg[set].append(cur_mse_avg) rmse_avg[set].append(cur_rmse_avg) mse_yerr[set].append(np.std(cur_mse_values)) rmse_yerr[set].append(np.std(cur_rmse_values)) var_avg[set].append(cur_var_avg) rvar_avg[set].append(cur_rvar_avg) conf_avg[set].append(cur_conf_avg) var_yerr[set].append(np.std(cur_var_values)) rvar_yerr[set].append(np.std(cur_rvar_values)) conf_yerr[set].append(np.std(cur_conf_values)) times_avg[set].append(cur_times_avg) times_yerr[set].append(np.std(cur_times_values)) plot_values( x, rmse_avg, rmse_yerr, "RMSE", no_complete, os.getcwd() + '/figs/RMSE_' + str(num_robots) + '_' + environment + '_' + str(int(comm_model.COMM_RANGE)) + '_' + sel_pol + '.pdf') plot_values( x, mse_avg, mse_yerr, "MSE", no_complete, os.getcwd() + '/figs/MSE_' + str(num_robots) + '_' + environment + '_' + str(int(comm_model.COMM_RANGE)) + '_' + sel_pol + '.pdf') plot_values( x, conf_avg, conf_yerr, "95% Confidence Width", no_complete, os.getcwd() + '/figs/95CONF_' + str(num_robots) + '_' + environment + '_' + str(int(comm_model.COMM_RANGE)) + '_' + sel_pol + '.pdf') plot_values( x, var_avg, var_yerr, "Pred. Variance", no_complete, os.getcwd() + '/figs/VAR_' + str(num_robots) + '_' + environment + '_' + str(int(comm_model.COMM_RANGE)) + '_' + sel_pol + '.pdf') plot_values( x, rvar_avg, rvar_yerr, "Pred. Std. Dev.", no_complete, os.getcwd() + '/figs/STDEV_' + str(num_robots) + '_' + environment + '_' + str(int(comm_model.COMM_RANGE)) + '_' + sel_pol + '.pdf') plot_values( x, times_avg, times_yerr, "GP Training Time", no_complete, os.getcwd() + '/figs/TIME_' + str(num_robots) + '_' + environment + '_' + str(int(comm_model.COMM_RANGE)) + '_' + sel_pol + '.pdf')
def evaluate(pr, d_list, tot_proc, environment, num_robots, num_runs, is_simulation, comm_model_path, granularity, mission_duration, plot_comm_map, fixed_robot, test_set_size, log_folder): """Create pickle files containing data to be plotted. It processes the dat files containing the collected data from the robots. It outputs a pickle file, containing errors, variances, and GP comm_map, according to run, environment, robot. Args: pr (int): identification number of the process. d_list (dict): shared dictionary between processes. tot_proc (int): total number of created processes. environment (str): name of environment used for saving data. num_robots (int): number of robots. num_runs (int): number of repeated experiments. is_simulation(bool): True, if data generated from simulation. comm_model_path (str): path to the XML file containing communication model parameters. granularity (int): second for each epoch to plot data. mission_duration (int): seconds for total mission. plot_comm_map (bool): If True, plot communication map. fixed_robot (tuple of float): x,y of robot in meters. test_set_size (int): number of samples in the test set. log_folder (str): folder where logs are saved. """ log_folder = os.path.expanduser(log_folder) # Reading of the communication parameters necessary to produce correct test set. comm_model = CommModel(comm_model_path) # Reading environment image. environment_yaml_path = os.getcwd() + '/envs/' + environment + '.yaml' im_array, resolution = read_environment(environment_yaml_path) im_array = specular(im_array) # Generation of test set. if is_simulation: random.seed(0) np.random.seed(0) dimX, dimY, XTest, YTest = create_test_set(im_array, comm_model, test_set_size, resolution) runs = range(num_runs) for proc in range(tot_proc): if proc == pr: runs = np.array_split( runs, tot_proc )[proc] #splitting the runs list in order to parallelize its analysis print "proc: " + str(pr) + ", runs: " + str(runs) time.sleep(pr) errors = {} variances_all = {} times_all = {} for run in runs: all_signal_data = [] for robot in range(num_robots): dataset_filename = log_folder + str( run) + '_' + environment + '_' + str( robot) + '_' + str(num_robots) + '_' + str( int(comm_model.COMM_RANGE)) + '.dat' all_signal_data += parse_dataset(dataset_filename) errors[run] = [] variances_all[run] = [] times_all[run] = [] all_secs = range(granularity, mission_duration + 1, granularity) for secs in all_secs: cur_signal_data = [] for datum in all_signal_data: if datum.timestep <= secs: cur_signal_data.append(datum) print "Run: " + str(run) + " - number of data: ", len( cur_signal_data) start = time.time() comm_map = GPmodel(dimX, dimY, comm_model.COMM_RANGE, False) comm_map.update_model(cur_signal_data) end = time.time() predictions_all = comm_map.predict(XTest) predictions = map(lambda x: x[0], predictions_all) variances = map(lambda x: x[1], predictions_all) std_devs = map(lambda x: math.sqrt(x), variances) conf_95 = map(lambda x: 1.96 * x, std_devs) errors[run].append( (mean_squared_error(YTest, predictions), math.sqrt(mean_squared_error(YTest, predictions)))) variances_all[run].append( (np.mean(variances), np.std(variances), np.mean(std_devs), np.std(std_devs), np.mean(conf_95), np.std(conf_95))) times_all[run].append(end - start) if plot_comm_map: if filter: extension = '_C.png' else: extension = '.png' print "Drawing the CM..." communication_figures = plot_prediction_from_xy_center_3d( im_array, fixed_robot, comm_map, dimX, dimY, comm_model, resolution, True, cur_signal_data) communication_map_figure_filename = os.getcwd( ) + '/figs/COMM_MAP' + str( num_robots) + '_' + environment + '_' + str( int(comm_model.COMM_RANGE)) + '_' + str( run) + '_' + str(secs) + extension communication_figures[0].savefig( communication_map_figure_filename, bbox_inches='tight') plt.close(communication_figures[0]) print "Done." if len(communication_figures) > 1: print "Drawing the Variance map..." communication_map_figure_filename = os.getcwd( ) + '/figs/COMM_MAP' + str( num_robots) + '_' + environment + '_' + str( int(comm_model.COMM_RANGE)) + '_' + str( run) + '_' + str( secs) + '_' + 'VAR' + extension communication_figures[1].savefig( communication_map_figure_filename, bbox_inches='tight') plt.close(communication_figures[1]) print "Done." #cleaning stuff plt.close('all') gc.collect() er = d_list[0].copy() er.update(errors) d_list[0] = er.copy() v = d_list[1].copy() v.update(variances_all) d_list[1] = v.copy() t = d_list[2].copy() t.update(times_all) d_list[2] = t.copy()
def plot(env, num_robots, comm_model_path, granularity, mission_duration): """Plot graphs about MSE and traveled distance. Args: env (str): name of environment used for saving data. num_robots (int): number of robots. comm_model_path (str): path to the XML file containing communication model parameters. granularity (int): second for each epoch to plot data. mission_duration (int): seconds for total mission. """ comm_model = CommModel(comm_model_path) f = open( gflags.FLAGS.log_folder + str(num_robots) + '_' + env + '_' + str(int(comm_model.COMM_RANGE)) + '.dat', "rb") errors, variances_all, times_all = pickle.load(f) f.close() x = range(granularity, mission_duration + 1, granularity) x = map(lambda x: x / 60.0, x) mse_avg = [] rmse_avg = [] mse_yerr = [] rmse_yerr = [] var_avg = [] rvar_avg = [] var_yerr = [] rvar_yerr = [] conf_avg = [] conf_yerr = [] times_avg = [] times_yerr = [] for stamp in range(len(x)): cur_mse_avg = 0.0 cur_rmse_avg = 0.0 cur_mse_values = [] cur_rmse_values = [] cur_var_avg = 0.0 cur_rvar_avg = 0.0 cur_conf_avg = 0.0 cur_var_values = [] cur_rvar_values = [] cur_conf_values = [] cur_times_avg = 0.0 cur_times_values = [] for run in errors.keys(): cur_mse_avg += errors[run][stamp][0] cur_rmse_avg += errors[run][stamp][1] cur_mse_values.append(errors[run][stamp][0]) cur_rmse_values.append(errors[run][stamp][1]) cur_var_avg += variances_all[run][stamp][0] cur_rvar_avg += variances_all[run][stamp][2] cur_conf_avg += variances_all[run][stamp][4] cur_var_values.append(variances_all[run][stamp][0]) cur_rvar_values.append(variances_all[run][stamp][2]) cur_conf_values.append(variances_all[run][stamp][4]) cur_times_avg += times_all[run][stamp] cur_times_values.append(times_all[run][stamp]) cur_mse_avg = cur_mse_avg / len(errors.keys()) cur_rmse_avg = cur_rmse_avg / len(errors.keys()) cur_var_avg = cur_var_avg / len(errors.keys()) cur_conf_avg = cur_conf_avg / len(errors.keys()) cur_rvar_avg = cur_rvar_avg / len(errors.keys()) cur_times_avg = cur_times_avg / len(errors.keys()) mse_avg.append(cur_mse_avg) rmse_avg.append(cur_rmse_avg) mse_yerr.append(np.std(cur_mse_values)) rmse_yerr.append(np.std(cur_rmse_values)) var_avg.append(cur_var_avg) rvar_avg.append(cur_rvar_avg) conf_avg.append(cur_conf_avg) var_yerr.append(np.std(cur_var_values)) rvar_yerr.append(np.std(cur_rvar_values)) conf_yerr.append(np.std(cur_conf_values)) times_avg.append(cur_times_avg) times_yerr.append(np.std(cur_times_values)) filter = gflags.FLAGS.filter_dat if filter: extension = '_C.pdf' else: extension = '.pdf' plot_values( x, rmse_avg, rmse_yerr, "RMSE", os.getcwd() + '/figs/RMSE_' + str(num_robots) + '_' + environment + '_' + str(int(comm_model.COMM_RANGE)) + extension) plot_values( x, mse_avg, mse_yerr, "MSE", os.getcwd() + '/figs/MSE_' + str(num_robots) + '_' + environment + '_' + str(int(comm_model.COMM_RANGE)) + extension) plot_values( x, conf_avg, conf_yerr, "95% Confidence Width", os.getcwd() + '/figs/95CONF_' + str(num_robots) + '_' + environment + '_' + str(int(comm_model.COMM_RANGE)) + extension) plot_values( x, var_avg, var_yerr, "Pred. Variance", os.getcwd() + '/figs/VAR_' + str(num_robots) + '_' + environment + '_' + str(int(comm_model.COMM_RANGE)) + extension) plot_values( x, rvar_avg, rvar_yerr, "Pred. Std. Dev.", os.getcwd() + '/figs/STDEV_' + str(num_robots) + '_' + environment + '_' + str(int(comm_model.COMM_RANGE)) + extension) plot_values( x, times_avg, times_yerr, "GP Training Time", os.getcwd() + '/figs/TIME' + str(num_robots) + '_' + environment + '_' + str(int(comm_model.COMM_RANGE)) + extension)
for p in procs: p.join() errors_tot = dict_list[0] variances_tot = dict_list[1] times_tot = dict_list[2] print '----------------------------------------------------------------------------------------------------' print errors_tot print '----------------------------------------------------------------------------------------------------' print variances_tot print '----------------------------------------------------------------------------------------------------' print times_tot print '----------------------------------------------------------------------------------------------------' fd = open( gflags.FLAGS.log_folder + str(gflags.FLAGS.num_robots) + '_' + gflags.FLAGS.environment + '_' + str( int( CommModel( gflags.FLAGS.communication_model_path).COMM_RANGE)) + '.dat', "wb") pickle.dump((errors_tot, variances_tot, times_tot), fd) fd.close() elif gflags.FLAGS.task == 'plot': plot(gflags.FLAGS.environment, gflags.FLAGS.num_robots, gflags.FLAGS.communication_model_path, gflags.FLAGS.granularity, gflags.FLAGS.mission_duration)
def evaluate(environment, num_robots, num_runs, is_simulation, comm_model_path, granularity, mission_duration, plot_comm_map, fixed_robot, test_set_size, log_folder): """Create pickle files containing data to be plotted. It processes the dat files containing the collected data from the robots. It outputs a pickle file, containing errors, variances, and GP comm_map, according to run, environment, robot. Args: environment (str): name of environment used for saving data. num_robots (int): number of robots. num_runs (int): number of repeated experiments. is_simulation(bool): True, if data generated from simulation. comm_model_path (str): path to the XML file containing communication model parameters. granularity (int): second for each epoch to plot data. mission_duration (int): seconds for total mission. plot_comm_map (bool): If True, plot communication map. fixed_robot (tuple of float): x,y of robot in meters. filter (bool): If True, consider ony information found by Carlo algorithm test_set_size (int): number of samples in the test set log_folder (str): folder where logs are saved. """ log_folder = os.path.expanduser(log_folder) # Reading of the communication parameters necessary to produce correct test set. comm_model = CommModel(comm_model_path) # Reading environment image. environment_yaml_path = os.getcwd() + '/envs/' + environment + '.yaml' im_array, resolution = read_environment(environment_yaml_path) im_array = specular(im_array) #parsing filter filter = gflags.FLAGS.filter_dat # Generation of test set. if is_simulation: # In simulation. random.seed(0) np.random.seed(0) dimX, dimY, XTest, YTest = create_test_set(im_array, comm_model,test_set_size, resolution) else: #only to not have warnings, setting is useful only for real robots dimX = [] dimY = [] XTest = [] YTest = [] runs = range(num_runs) errors = {} variances_all = {} times_all = {} for run in runs: all_signal_data = [] for robot in range(num_robots): dataset_filename = log_folder + str(run) + '_' + environment + '_' + str(robot) + '_' + str(num_robots) + '_' + str(int(comm_model.COMM_RANGE)) + '.dat' all_signal_data += parse_dataset(dataset_filename) #print "Length: " + str(len(all_signal_data)) errors[run] = [] variances_all[run] = [] times_all[run] = [] all_secs = range(granularity, mission_duration + 1, granularity) for secs in all_secs: cur_signal_data = [] for datum in all_signal_data: if datum.timestep <= secs: cur_signal_data.append(datum) print "Run: " + str(run) + " - number of data: ", len(cur_signal_data) start = time.time() comm_map = GPmodel(dimX, dimY, comm_model.COMM_RANGE, False) comm_map.update_model(cur_signal_data) end = time.time() predictions_all = comm_map.predict(XTest) predictions = map(lambda x: x[0], predictions_all) variances = map(lambda x: x[1], predictions_all) std_devs = map(lambda x: math.sqrt(x), variances) conf_95 = map(lambda x: 1.96 * x, std_devs) errors[run].append((mean_squared_error(YTest, predictions), math.sqrt(mean_squared_error(YTest, predictions)))) variances_all[run].append((np.mean(variances), np.std(variances), np.mean(std_devs), np.std(std_devs),np.mean(conf_95), np.std(conf_95))) times_all[run].append(end - start) if plot_comm_map: if filter: extension = '_C.png' else: extension = '.png' print "Drawing the CM..." communication_figures = plot_prediction_from_xy_center_3d(im_array, fixed_robot, comm_map, dimX, dimY, comm_model, resolution, True, cur_signal_data) communication_map_figure_filename = os.getcwd() + '/figs/COMM_MAP' + str(num_robots) + \ '_' + environment + '_' + str(int(comm_model.COMM_RANGE)) + \ '_' + str(run) + '_' + str(secs) + extension communication_figures[0].savefig(communication_map_figure_filename, bbox_inches='tight') #plt.close(communication_figures[0]) print "Done." if len(communication_figures) > 1: print "Drawing the Variance map..." communication_map_figure_filename = os.getcwd() + '/figs/COMM_MAP' + str(num_robots) +\ '_' + environment + '_' + str(int(comm_model.COMM_RANGE)) + \ '_' + str(run) + '_' + str(secs) + '_' + 'VAR' + extension communication_figures[1].savefig(communication_map_figure_filename, bbox_inches='tight') #plt.close(communication_figures[1]) print "Done." # cleaning stuff plt.close('all') gc.collect() print '----------------------------------------------------------------------------' print errors print '----------------------------------------------------------------------------' print variances_all print '----------------------------------------------------------------------------' print times_all print '----------------------------------------------------------------------------' f = open(log_folder + str(num_robots) + '_' + environment + '_' + str(int(comm_model.COMM_RANGE)) + '.dat', "wb") pickle.dump((errors, variances_all, times_all), f) f.close()