def main(): # parse command line parser = argparse.ArgumentParser( description='Display the load of each processor.' ) parser.add_argument( 'jobsCSV', help='The name of the CSV file which contains jobs information' ) parser.add_argument( '--output', '-o', help='The output file (format depending on the given extension, pdf' 'is RECOMMENDED). For example: figure.pdf' ) args = parser.parse_args() j = JobSet.from_csv(args.jobsCSV) # create data structure from input args fig = plt.figure() # create figure ax = fig.gca() # extract axes plot_load(j, ax, str(args.jobsCSV)) # build visualization # show figure if args.output is not None: plt.savefig(args.output) else: plt.show()
def main(): print(" >>> Analyzing the jobs") schedulers = [ 'qarnotNodeSched', 'qarnotNodeSchedAndrei', 'qarnotNodeSchedReplicate3LeastLoaded', 'qarnotNodeSchedReplicate10LeastLoaded', 'qarnotNodeSchedFullReplicate' ] #schedulers2 = ["standard_scheduler", "locationBased_scheduler", "replicate3LeastLoaded_scheduler", "replicate10LeastLoaded_scheduler"] #schedulers = ['qarnotNodeSched', 'qarnotNodeSchedStatic'] max_time = 200000 jobsets = {} jobsets_time_interval = {} current_dir = os.getcwd() workload_name = current_dir.split('/')[-1] print(" >>> Workload: " + workload_name) for scheduler in schedulers: max_time = 200000 # To remove the burn and staging jobs prepare_file(scheduler, 'jobs') # To keep only the staging jobs #prepare_file(scheduler, 'dyn-staging') # To plot several gantt charts together jobs = JobSet.from_csv(scheduler + "/out_jobs_processed.csv") jobsets[scheduler] = jobs # To remove the burn and staging jobs and filter by max_time #prepare_file_by_interval_time(scheduler, 200000) #jobs_time_interval = JobSet.from_csv(scheduler + "/out_jobs_processed_by_time.csv") #jobsets_time_interval[scheduler] = jobs_time_interval #To plot the Gantt charts # To plot the full Gantt Chart (3 graphs) #plot_all_charts(scheduler, jobs) # It plots the simple GanttChart #plot_gantt_chart(scheduler, jobs, workload_name, 0, max_time) # It plots the simple GanttChart looking for jobs with finish_time reduced by max_time #plot_gantt_chart(scheduler, jobs_time_interval, workload_name, 1, max_time) # To compute the waiting_time and slowdown for the real jobs prepare_file_metrics(scheduler, 1) prepare_file_metrics(scheduler, 2) prepare_file_metrics(scheduler, 3) # To plot the data sets dependencies data_sets_analyzes(workload_name) # Bar chart
def compare_jobsets_fragmentation(files): width = 10 height = 10 fig, axe = plt.subplots(nrows=3, figsize=(width, height)) frag_serie = pd.Series() for f in files: js = JobSet.from_csv(f, resource_bounds=(0, 239)) frag = js.fragmentation() label = f.split("/")[-1:][0] mean_frag = frag.mean() frag_serie.set_value(label, round(mean_frag, 2)) label = label + '(mean frag: {0:.2f})'.format(mean_frag) plot_fragmentation(frag, axe, label) axe[0].legend() return frag_serie
def main(): # parse command line parser = argparse.ArgumentParser(description="Display the load of each processor.") parser.add_argument("jobsCSV", help="The name of the CSV file which contains jobs information") parser.add_argument( "--output", "-o", help="The output file (format depending on the given extension, pdf" "is RECOMMENDED). For example: figure.pdf", ) args = parser.parse_args() j = JobSet.from_csv(args.jobsCSV) # create data structure from input args fig = plt.figure() # create figure ax = fig.gca() # extract axes plot_load(j, ax, str(args.jobsCSV)) # build visualization # show figure if args.output is not None: plt.savefig(args.output) else: plt.show()
# coding: utf-8 import matplotlib.pyplot as plt from evalys.jobset import JobSet #matplotlib.use('WX') js = JobSet.from_csv('jobs.csv') print(js.df.describe()) js.df.hist() fig, axe = plt.subplots() js.gantt(axe, "test") plt.show()
def test_jobset_import_export(self): from evalys.jobset import JobSet js = JobSet.from_csv("./examples/jobs.csv") js.to_csv("/tmp/jobs.csv") js0 = JobSet.from_csv("/tmp/jobs.csv") assert js.df.equals(js0.df)
from evalys.jobset import JobSet import matplotlib.pyplot as plt import argparse parser = argparse.ArgumentParser(description="Visualisation tool for Batsim\ out_jobs.csv file") parser.add_argument('file', metavar='file', type=str, help='path to the csv file.') args = parser.parse_args() js = JobSet.from_csv(args.file) js.plot(with_details=True) plt.show()
def main(): parser = argparse.ArgumentParser(description='visualisation tool for scheduling trace and Batsim output file.') parser.add_argument('inputCSV', nargs='+', help='The input CSV file(s)') parser.add_argument('--gantt', '-g', action='store_true', default=False, help='Generate Gantt charts') parser.add_argument('--series', '-s', nargs='?', default=None, const='all', help='Generate timeseries on cumulative metrics. Available metrics are: {}'.format(available_series)) parser.add_argument('--output', '-o', nargs='?', help='The output Gantt chart file depending on the extension (PDF format is RECOMMENDED). For example: figure.pdf') parser.add_argument('--gantt_diff', '-d', action='store_true', default=False, help='Generate a gantt diff comparison between inputs (no more than 3 recommended') args = parser.parse_args() if NO_GRAPHICS and not args.output: print("No available display: please provide an output using the -o,--output option") exit(1) # generate subplot nb_subplot = 0 if args.gantt: nb_subplot += len(args.inputCSV) if args.gantt_diff: nb_subplot += 1 if args.series: nb_subplot += 1 if not args.gantt and not args.gantt_diff and not args.series: print("You must select at least one option (use -h to see available options)") exit(1) fig, ax_list = plt.subplots(nb_subplot, sharex=True,) #sharey=True) # manage unique ax probleme try: iter(ax_list) except: ax_list = [ax_list] else: ax_list = list(ax_list) # backup all ax all_ax = list(ax_list) # reserve last plot for series if args.series: ax_series = ax_list[-1:][0] ax_list = ax_list[:-1] # reserve last remaining plot for gantt diff if args.gantt_diff: ax_shape = ax_list[-1:][0] ax_list = ax_list[:-1] # generate josets from CSV inputs jobsets = {} index = 0 for inputCSV in sorted(args.inputCSV): js = JobSet.from_csv(inputCSV) file_name = os.path.basename(inputCSV) file_name = unique_file_name(jobsets, file_name) jobsets[file_name] = js if args.gantt: js.gantt(ax_list[index], file_name) index += 1 if args.gantt_diff: plot_gantt_general_shape(jobsets, ax_shape) if args.series: plot_series(args.series, jobsets, ax_series) # set axes and resources x_axes_min_value = min({m.df.submission_time.min() for m in jobsets.values()}) x_axes_max_value = max({m.df.finish_time.max() for m in jobsets.values()}) y_axes_min_value = min([js.res_bounds[0] for js in jobsets.values()]) y_axes_max_value = max([js.res_bounds[1] for js in jobsets.values()]) x_size = x_axes_max_value - x_axes_min_value y_size = y_axes_max_value - y_axes_min_value print("x = ({},{})".format(x_axes_min_value, x_axes_max_value)) print("y = ({},{})".format(y_axes_min_value, y_axes_max_value)) print("x size = {}".format(x_size)) print("y size = {}".format(y_size)) for ax in all_ax: ax.set_xlim((x_axes_min_value, x_axes_max_value)) #ax.set_ylim((y_axes_min_value, y_axes_max_value)) # Layout and cosmetic changes # plt.subplots_adjust(left=0.0, right=1.0, bottom=0.0, top=1.0) fig.set_tight_layout(True) y_inches = max(y_size * len(all_ax) * 0.15, 8) if args.gantt_diff: y_inches += y_size * 0.15 fig.set_size_inches(y_inches * 1.7, y_inches, forward=True) if args.output is not None: plt.savefig(args.output) else: plt.show()
from evalys.jobset import JobSet import matplotlib.pyplot as plt import json import numpy as np js = JobSet.from_csv("../visualization/output/imt_jobs.csv") js.plot(with_details=True) #Loading spaces load log. with open('../visualization/output/imt_storages_load.json') as f: loads = json.load(f) x_axis = loads["time"] index = np.arange(len(x_axis)) del loads["time"] y_axis = loads["qb0_disk"] fig, subplots = plt.subplots(3, 1, sharex=True) current_plot = 0 min_size = 0 max_size = 0 #Compute max storage size for storageSpace, load in loads.items(): if max(load) > max_size: max_size = max(load) # Generating graph for each storage space for storageSpace, load in loads.items(): subplots[current_plot].set_title(storageSpace) subplots[current_plot].set_ylabel("load in bytes") subplots[current_plot].set_ylim(min_size, max_size)
def main(): parser = argparse.ArgumentParser( description= 'visualisation tool for scheduling trace and Batsim output file.') parser.add_argument('inputCSV', nargs='+', help='The input CSV file(s)') parser.add_argument('--gantt', '-g', action='store_true', default=False, help='Generate Gantt charts') parser.add_argument( '--series', '-s', nargs='?', default=None, const='all', help= 'Generate timeseries on cumulative metrics. Available metrics are: {}' .format(available_series)) parser.add_argument( '--output', '-o', nargs='?', help= 'The output Gantt chart file depending on the extension (PDF format is RECOMMENDED). For example: figure.pdf' ) parser.add_argument( '--gantt_diff', '-d', action='store_true', default=False, help= 'Generate a gantt diff comparison between inputs (no more than 3 recommended' ) args = parser.parse_args() if NO_GRAPHICS and not args.output: print( "No available display: please provide an output using the -o,--output option" ) exit(1) # generate subplot nb_subplot = 0 if args.gantt: nb_subplot += len(args.inputCSV) if args.gantt_diff: nb_subplot += 1 if args.series: nb_subplot += 1 if not args.gantt and not args.gantt_diff and not args.series: print( "You must select at least one option (use -h to see available options)" ) exit(1) fig, ax_list = plt.subplots( nb_subplot, sharex=True, ) #sharey=True) # manage unique ax probleme try: iter(ax_list) except: ax_list = [ax_list] else: ax_list = list(ax_list) # backup all ax all_ax = list(ax_list) # reserve last plot for series if args.series: ax_series = ax_list[-1:][0] ax_list = ax_list[:-1] # reserve last remaining plot for gantt diff if args.gantt_diff: ax_shape = ax_list[-1:][0] ax_list = ax_list[:-1] # generate josets from CSV inputs jobsets = {} index = 0 for inputCSV in sorted(args.inputCSV): js = JobSet.from_csv(inputCSV) file_name = os.path.basename(inputCSV) file_name = unique_file_name(jobsets, file_name) jobsets[file_name] = js if args.gantt: js.gantt(ax=ax_list[index], title=file_name) index += 1 if args.gantt_diff: plot_gantt_general_shape(jobsets, ax_shape) if args.series: plot_series(args.series, jobsets, ax_series) # set axes and resources x_axes_min_value = min( {m.df.submission_time.min() for m in jobsets.values()}) x_axes_max_value = max({m.df.finish_time.max() for m in jobsets.values()}) y_axes_min_value = min([js.res_bounds[0] for js in jobsets.values()]) y_axes_max_value = max([js.res_bounds[1] for js in jobsets.values()]) x_size = x_axes_max_value - x_axes_min_value y_size = y_axes_max_value - y_axes_min_value print("x = ({},{})".format(x_axes_min_value, x_axes_max_value)) print("y = ({},{})".format(y_axes_min_value, y_axes_max_value)) print("x size = {}".format(x_size)) print("y size = {}".format(y_size)) for ax in all_ax: ax.set_xlim((x_axes_min_value, x_axes_max_value)) #ax.set_ylim((y_axes_min_value, y_axes_max_value)) # Layout and cosmetic changes # plt.subplots_adjust(left=0.0, right=1.0, bottom=0.0, top=1.0) fig.set_tight_layout(True) y_inches = max(y_size * len(all_ax) * 0.15, 8) if args.gantt_diff: y_inches += y_size * 0.15 fig.set_size_inches(y_inches * 1.7, y_inches, forward=True) if args.output is not None: plt.savefig(args.output) else: plt.show()
import tkinter as tk from evalys.jobset import JobSet from evalys import visu js = JobSet.from_csv("./expe-out/out_jobs.csv") visu.gantt.plot_gantt(js) tk.mainloop()