def aggregate() -> str: """Plots task accuracies for all federated tasks in a group Expects FLAGS.group_name to be set Returns: str: Absolut path to saved plot """ group_name = FLAGS.group_name dname = storage.create_output_subdir(group_name) fname = storage.fname_with_default_dir("plot_task_accuracies.png", dname) data = _prepare_aggregation_data(group_name) # Take highest length of values list as xlim_max xlim_max = max([len(values) for _, values, _ in data]) + 1 fpath = plot( data, title="", # TODO ylabel="Accuracy", fname=fname, ylim_max=1.0, xlim_max=xlim_max, ) logger.info("Data plotted and saved in file", filepath=fpath) return fpath
def aggregate() -> str: """Plots IID and Non-IID dataset performance comparision Expects FLAGS.group_name to be set Returns: str: Absolut path to saved plot """ group_name = FLAGS.group_name dname = storage.create_output_subdir(group_name) fname = storage.fname_with_default_dir("plot_final_task_accuracies.png", dname) (data, xticks_args) = _prepare_aggregation_data(group_name) assert len(data) == 2, "Expecting a list of two curves" fpath = plot( data, title="", # TODO xlabel="IID / Non-IID", ylabel="Accuracy", fname=fname, ylim_max=1.0, xlim_max=12, xticks_args=xticks_args, legend_loc="upper right", ) logger.info("Data plotted and saved in file", filepath=fpath) return fpath
def participant_history() -> List[str]: """Plot participant selection histories for group name flag. For each task result in the group name flag extract the task metrics (number of participants, task label, hist metrics), transform them into heatmap data as participant indices x training rounds and plot/save them as participant selection history. Returns: ~typing.List[str]: File paths for saved plots. """ group_name: str = FLAGS.group_name dir_name: str = create_output_subdir(dname=group_name) file_pre_name: str = fname_with_default_dir( fname="plot_participant_history_{}.png", dname=dir_name) file_paths: List[str] = list() # Getting history metrics data from results.json hist_metrics_group: List[Tuple[int, str, List[List[Metrics]]]] = get_hist_metrics( group_name=group_name) # Creates heatmap data for each task metric in group metrics matrices: List[Tuple[str, ndarray]] = list(map(heatmap_data, hist_metrics_group)) for task_matrix in matrices: label: str = task_matrix[0] matrix: ndarray = task_matrix[1] file_path: str = plot_history_data( matrix=matrix, title="Participant Selection History", file_name=file_pre_name.format(label), save=True, show=False, ) file_paths.append(file_path) logger.info("Task data plotted and saved in file", filepath=file_paths) return file_paths
def aggregate() -> str: """Plots learning rate for federated tasks in a group Expects FLAGS.group_name to be set Returns: str: Absolut path to saved plot """ group_name = FLAGS.group_name dname = storage.create_output_subdir(group_name) fname = storage.fname_with_default_dir("plot_learning_rates.png", dname) data = _prepare_aggregation_data(group_name) ylim_max: float = 0 xlim_max = 0 for _, lrs, ylabel in data: if ylabel is not None: xlim_max = max(ylabel + [xlim_max]) for lr in lrs: ylim_max = max(lr, ylim_max) ylim_max *= 1.1 xlim_max += 1 assert data, "Expecting a list with at least one item" fpath = plot( data, title="Optimizer learning rates for federated training tasks", xlabel="round", ylabel="learning rate", fname=fname, ylim_max=ylim_max, xlim_max=xlim_max, legend_loc="upper right", ) logger.info("Data plotted and saved in file", filepath=fpath) return fpath
def _plot_fashion_mnist_dist(): dists = fashion_mnist_100p() xs = np.arange(100) plt.figure() legend = [] for b, dist in dists: legend.append(str(b)) plt.plot(xs, np.array(dist), "o", markersize=1.0) plt.legend(legend, loc="upper left") plt.xlabel("Partition ID") plt.ylabel("Examples") dname = storage.create_output_subdir("partition_volume_distributions") fname = storage.fname_with_default_dir("plot-part-vol.png", dname) plt.savefig(fname=fname, format=FORMAT) # FIXME: Matplotlib is currently using agg, which is a non-GUI # backend, so cannot show the figure. # plt.show() return fname