Ejemplo n.º 1
0
def get_metric_data(fp):
    #for use with queue and delay data
    #sort all metric data from same tsc_id
    if not os.path.exists(fp):
        assert 0, 'Supplied path ' + str(fp) + ' does not exist.'

    tsc_data = {
        tsc_id: sorted(os.listdir(fp + '/' + tsc_id))
        for tsc_id in os.listdir(fp)
    }
    sim_runs_data = []

    #until all data has been popped
    k = list(tsc_data.keys())[0]
    while len(tsc_data[k]) > 0:
        #get file path for each intersection from same sim run
        same_run_data = [
            fp + '/' + tsc_id + '/' + tsc_data[tsc_id].pop(0)
            for tsc_id in tsc_data
        ]
        same_run_data = [load_data(f) for f in same_run_data]
        #sum across time axis, each element of array
        #represents the sum of all tsc_id metric
        sim_runs_data.append(np.sum(same_run_data, axis=0))

    return np.stack(sim_runs_data)
Ejemplo n.º 2
0
 def load_weights(self, path):
     path += '.p'
     if os.path.exists(path):
         weights = load_data(path)
         self.set_weights(weights, 'online')
     else:
         #raise not found exceptions
         assert 0, 'Failed to load weights, supplied weight file path '+str(path)+' does not exist.'
Ejemplo n.º 3
0
def get_folder_data(fp):
    #all the travel times can be
    #grouped together by extending list
    if not os.path.exists(fp):
        assert 0, 'Supplied path '+str(fp)+' does not exist.'
    data = []
    for f in os.listdir(fp):
        data.extend(load_data(fp+'/'+f))
    return np.array(data)
Ejemplo n.º 4
0
 def load_replays(self):
     for _id in self.agent_ids:
         replay_fp = self.replay_fp+_id+'.p' 
         if os.path.isfile(replay_fp):
             data = load_data(replay_fp)
             rewards = []
             for traj in data:
                 for exp in traj:
                     rewards.append(abs(exp['r']))
                 self.exp_replay[_id].append(traj) 
             # find largest reward to reward normalization
             print('mean '+str(np.mean(rewards))+' std '+str(np.std(rewards))+' median '+str(np.median(rewards)))
             self.rl_stats[_id]['r_max'] = max(rewards)
             print(str(self.idx)+' LARGEST REWARD '+str(self.rl_stats[_id]['r_max']))
             print('SUCCESSFULLY LOADED REPLAY FOR '+str(_id))
         else:
             print('WARNING, tried to load experience replay at '+str(replay_fp)+' but it does not exist, continuing without loading...')
Ejemplo n.º 5
0
def graph_hyper_params(labels, colours, fp, save_dir):
    tsc = os.listdir(fp)
    tsc_hp = {}
                                                                                                                   
    #get data
    for t in tsc:
        tsc_fp = fp+t+'/'
        data = [ load_data(tsc_fp+f) for f in os.listdir(tsc_fp)]                                                 
        tsc_hp[t] = np.stack([ [np.mean(d), np.std(d)] for d in data]).T

    #create appropriate graph
    n = len(tsc)
    if n == 1:
        f, axes = plt.subplots()
        axes = [axes]
    else:
        nrows = 2 
        ncols = int(n/nrows) if n%nrows == 0 else int((n+1)/nrows)
        f, axes = plt.subplots(nrows=nrows,ncols=ncols)
        axes = axes.flat
    
    if n%nrows != 0:
        f.delaxes(axes[-1])

    XTITLE = 'Mean\nTravel Time '+r"$(s)$"
    YTITLE = ('Standard\nDeviation\nTravel Time '+r"$(s)$", 80)

    #graph each tsc hyperparemeter
    for ax, t, i in zip(axes, tsc, range(len(tsc))):
        #order hp performance from low to high 
        #w.r.t mean+std
        mean_data = tsc_hp[t][0] 
        std_data = tsc_hp[t][1]
        data = sorted([ (m+s, m, s) for m,s in zip(mean_data, std_data) ], key = lambda x:x[0] )
        data = np.stack([ [d[1], d[2]] for d in data]).T
        mean_data = data[0]
        std_data = data[1]

        #rainbow_colours = mp.cm.rainbow(np.linspace(0, 1, len(mean_data)))                          

        rg_colours = mp.cm.brg(np.linspace(1.0, 0.5, len(mean_data)))                          
        if i%ncols == 0 and i >= len(tsc)/2:
            xtitle = XTITLE 
            ytitle = YTITLE 
        elif i%ncols == 0:
            xtitle = ''
            ytitle = YTITLE
        elif i >= len(tsc)/2:
            xtitle = XTITLE
            ytitle = ''
        else:
            xtitle = ''
            ytitle = ''
        #graph each tsc hp performance 
        graph( ax, mean_data, scatter( ax, mean_data, std_data, rg_colours, ['']*len(mean_data)), 
                                  xtitle=xtitle,                                 
                                  ytitle_pad = ytitle,       
                                  title=str(labels[t]),        
                                  xlim = [0.0, max(mean_data)*1.05],                          
                                  ylim= [0.0, max(std_data)*1.05],                            
                                  grid=True)                                                  

    #axis colourbar
    cax = f.add_axes([0.915, 0.1, 0.05, 0.85])
    cmap = mp.cm.brg
    cm = LinearSegmentedColormap.from_list('rg', rg_colours, N=rg_colours.shape[0])
    norm = mp.colors.Normalize(vmin=0.5, vmax=1.0)
    cb = mp.colorbar.ColorbarBase(cax, cmap=cm,
                                norm=norm,
                                orientation='vertical')

    #color bar axis text
    #print([ l._text for l in cb.ax.get_yticklabels()])
    #cb_labels = ['']*rg_colours.shape[0]
    cb_labels = [ l._text for l in cb.ax.get_yticklabels()]
    cb_labels[0] = 'Best'
    cb_labels[-1] = 'Worst'
    cb.ax.set_yticklabels(cb_labels)

    f.suptitle('Hyperparameter Performance')                                                            
    save_graph(f, save_dir+'tsc_hp.pdf', 600, 14, 24.9)
    plt.show()                                                                            

    #now compare all tsc hp sets together in one graph
    #prepare data
    data_order = sorted(tsc_hp.keys())
    #tsc_color = colours[:len(data_order)]
    mean_data, std_data, colors, tsc_labels = [], [], [], []
    for d in data_order:
        n = len(tsc_hp[d][0])
        mean_data.extend(tsc_hp[d][0])
        std_data.extend(tsc_hp[d][1])
        tsc_labels.extend(labels[d])
        #colors.extend([c]*len(tsc_hp[d][0]))
        colors.extend( [colours[d]]*n )
    #graph all hp data all together
    f, ax = plt.subplots(1,1)
    graph( ax, mean_data, scatter( ax, mean_data, std_data, colors, ['']*len(mean_data)),
                              xtitle=XTITLE,
                              ytitle_pad = YTITLE,
                              title='Traffic Signal Control\nHyperparameter Comparison',
                              xlim = [0.0, 200.0],
                              ylim= [0.0, 200.0],
                              #xlim = [0.0, max(mean_data)*1.05],
                              #ylim= [0.0, max(std_data)*1.05],
                              #legend=(0.82, 0.72),                                   
                              #colours=colours,
                              grid=True)

    #colorbar

    #add legend manually because we only
    #want one for each tsc
    patches = []
    for d in data_order:
        c = colours[d]
        patches.append( mpatches.Patch(color=c, label=labels[d]) )
    plt.legend(handles=patches, framealpha=1.0)
    save_graph(f, save_dir+'hp.pdf', 600, 14, 24.9)
    plt.show()
Ejemplo n.º 6
0
def stack_folder_files(fp):
    data = [ load_data(fp+f) for f in os.listdir(fp)]
    return np.stack(data)
Ejemplo n.º 7
0
def get_hp_results(fp):
    travel_times = []
    for f in os.listdir(fp):
        travel_times.extend(load_data(fp+f))

    return travel_times