def __write_heatmaps(self): """ Creates a json file with the information from the state matrices. The file can be used to create heatmap visualizations for the individual matrices. The json structure is as follows: {matrix_name_1: { data: nested list of values min_value: minimum value over the lists max_value: max value over the lists x_label: the column title y_label: the row title n_rows: the number of rows in the matrix n_cols: the number of columns in the matrix nfo_type: the information category; either 'jobs', 'tracking', 'machines' } matrix_name_1: { ... } ... } :return: """ f_name = f'{self.log_dir}/heatmaps/{str(self.state.n_steps)}.json' f_matrices = open(create_folders(f_name), 'w') f_matrices_data = self.state.operation_graph.to_dict() dump(self.state.matrices.to_dict(), f_matrices) f_matrices.close()
def __write_machines(self, action_log): f_name = f'{self.log_dir}/machines/{str(self.state.n_steps)}.json' f_mach = open(create_folders(f_name), 'w') f_mach_data = self.state.machines.to_dict() legal_acts, act, current_mach, mode_object = action_log.split('|') mode = make_tuple(mode_object) if type(mode) == int: # routing job_nr = mode next_ops = self.state.operation_graph.get_next_ops(job_nr) self.__fill_routing_data(legal_acts, current_mach, next_ops, f_mach_data, act) elif mode is None: # scheduling f_mach_data["links"] = { "source": int(current_mach), "target": int(current_mach), "label": f"Scheduled {act}" if not self.sim_manager.is_wait_processing(make_tuple(act)[1]) else f"Decided to wait" } else: # routing at breakdown; mode contains the op to be routed op_routed = mode self.__fill_routing_data(legal_acts, current_mach, [op_routed], f_mach_data, act) dump(f_mach_data, f_mach) f_mach.close()
def __init__(self, filepath, seed, state: State, simulation_manager): if filepath != '': self.log_dir = '/'.join(filepath.split('/')[:-1]) if os.path.isdir(self.log_dir): rmtree(self.log_dir) create_folders(f'{self.log_dir}/dummy') # write logger object self.logfile = f'{filepath}_s{seed}.log' self.on = True else: self.on = False self.seed = seed self.state = state self.sim_manager = simulation_manager self.action_log = None self.schedule = []
def __write_events(self): events_path = f'{self.log_dir}/events/{str(self.state.n_steps)}.txt' if os.path.exists(events_path): os.remove(events_path) f_events = open(create_folders(events_path), 'a') f_events_data = self.sim_manager.event_heap.get_heap_representation() for event in f_events_data.split('|'): f_events.write(event + '\n') f_events.close() self.sim_manager.event_heap.flush_event_log()
def __write_nfo(self, decision_queues): nfo_dict = decision_queues # beware of mutability... nfo_dict['system_time'] = int(self.state.system_time) sched_mode_map = { 0: 'Sequencing', 1: 'Routing', 2: 'Breakdown Handling' } nfo_dict['scheduling_mode'] = sched_mode_map[ self.state.scheduling_mode] f_name = f'{self.log_dir}/nfo/{str(self.state.n_steps)}.json' f_nfo = open(create_folders(f_name), 'w') dump(nfo_dict, f_nfo) f_nfo.close()
def __write_kpis(self): t = self.state.system_time utl_total = self.state.machines.utilization_times utl_rate = utl_total / t if t > 0 else utl_total bft = self.state.machines.buffer_times bft_sum = self.state.machines.buffer_times.sum() bf_rel_load = bft / bft_sum if bft_sum > 0 else bft machines_df = pd.DataFrame({ 'Machine Index': list(range(1, utl_rate.shape[0] + 1)), 'Utilization Rate': utl_rate, 'Relative Buffer Times': bf_rel_load }) f_name = f'{self.log_dir}/kpis/{str(self.state.n_steps)}_kpi_m.csv' machines_df.to_csv(create_folders(f_name), index=False, header=True) w_remaining = self.state.trackers.job_remaining_time work_time_last = self.state.trackers.job_last_processed_time work_time_start = self.state.trackers.job_start_times work_release_time = self.state.trackers.job_visible_dates ops_left = self.state.trackers.n_remaining_ops minimum_completion_time = np.array([ t + w_remaining[i] if ops_left[i] > 0 else work_time_last[i] for i in range(w_remaining.shape[0]) ]) start_relative_flow_time = minimum_completion_time - work_time_start release_relative_flow_time = minimum_completion_time - work_release_time jobs_released = release_relative_flow_time < minimum_completion_time jobs_df = pd.DataFrame({ 'Job Index': list(range(0, minimum_completion_time.shape[0])), 'Estimated Completion': minimum_completion_time, 'Start Rel. Flow Time': start_relative_flow_time, 'Release Rel. Flow Time': release_relative_flow_time, 'Jobs Visible': jobs_released }) jobs_df.to_csv( f'{self.log_dir}/kpis/{str(self.state.n_steps)}_kpi_j.csv', index=False, header=True)
def __write_graphs(self): f_name = f'{self.log_dir}/graphData/{str(self.state.n_steps)}.json' f_prec = open(create_folders(f_name), 'w') f_prec_data = self.state.operation_graph.to_dict() dump(f_prec_data, f_prec) f_prec.close()
def __write_gantt(self): f_name = f'{self.log_dir}/scheduling/{str(self.state.n_steps)}.csv' pd.DataFrame(self.schedule).to_csv(create_folders(f_name), index=False, header=False)