def histograms(self, start_time, stop_time, depth, overlapping=True): L = stop_time.year - start_time.year + 1 modslopes, noiseterm = self.sn_at_time(start_time, L, depth, overlapping=overlapping) ns = np.std(noiseterm) plt.hist(modslopes / ns, 20, normed=True, color=cm.Oranges(.8), alpha=.5) lab = str(start_time.year) + "-" + str(stop_time.year) da.fit_normals_to_data(modslopes / ns, color=cm.Oranges(.9), lw=3, label=lab + " Model projections") plt.hist(noiseterm / ns, 20, normed=True, color=cm.Purples(.8), alpha=.5) da.fit_normals_to_data(noiseterm / ns, color=cm.Purples(.9), lw=3, label="Noise") plt.xlabel("S/N") plt.ylabel("Normalized Frequency")
def dial(regression_output, ax, DVTitle, arrow_index): # Create bins to plot (equally sized) size_of_groups = np.ones(len(regression_output) * 2) # Create a pieplot, half white, half colored by logistic regression results white_half = np.ones(len(regression_output)) * .5 color_half = (.81 - regression_output) / .81 color_pallet = np.concatenate([color_half, white_half]) cs = cm.Purples(color_pallet) pie_wedge_collection = plt.pie(size_of_groups, colors=cs) i = 0 for pie_wedge in pie_wedge_collection[0]: pie_wedge.set_edgecolor(cm.Purples(color_pallet[i])) i = i + 1 #ax.set_title(DVTitle) # add a circle at the center my_circle = plt.Circle((0, 0), 0.3, color='white') #p=plt.gcf() ax.add_artist(my_circle) # create the arrow, pointing at specified index arrow_angle = (arrow_index / float(len(regression_output))) * 3.14159 arrow_x = 0.2 * math.cos(arrow_angle) arrow_y = 0.2 * math.sin(arrow_angle) ax.arrow(0,0,-arrow_x,arrow_y, width=.02, head_width=.05, \ head_length=.1, fc='k', ec='k')
def da_colors(typ): d = {} d["h85"] = cm.Oranges(.8) #cm.Dark2(0.) d["piC"] = cm.Greens(.7) #cm.Dark2(.2) d["gpcp"] = cm.Purples(.5) #cm.Dark2(.4) d["cmap"] = cm.Reds(.8) d["precl"] = cm.Purples(.9) return d[typ]
def get_dataset_color(dataset,depth=None): """Set the colors to ensure a uniform scheme for each dataset """ dataset=string.lower(dataset) d={} d["dai"]=cm.Blues(.5) d["tree"]=cm.summer(.3) d["cru"]=cm.Blues(.9) #models d["picontrol"]=cm.Purples(.8) d["h85"]="k" d["tree_noise"]=cm.PiYG(.2) #Soil moisture d["merra2"]={} d["merra2"]["30cm"]=cm.copper(.3) d["merra2"]["2m"]=cm.copper(.3) d["gleam"]={} d["gleam"]["30cm"]=cm.Reds(.3) d["gleam"]["2m"]=cm.Reds(.7) if depth is None: return d[dataset] else: return d[dataset][depth]
def compare_pre_post_1100_noise(X,L=31,latbounds=None): time1=('1100-1-1','1399-12-31') c1=cm.Purples(.8) time2=('1400-1-1','2005-12-31') if latbounds is not None: obs=X.obs(latitude=latbounds) mma = MV.average(X.model(latitude=latbounds),axis=0) mma = mask_data(mma,obs[0].mask) solver = Eof(mma) obs = mask_data(obs,solver.eofs()[0].mask) truncnoise=solver.projectField(obs)[:,0]*da.get_orientation(solver) noisy1=truncnoise(time=time1) noisy2=truncnoise(time=time2) else: noisy1=X.noise(time=time1) noisy2=X.noise(time=time2) c2=cm.viridis(.1) plt.subplot(121) Plotting.Plotting.time_plot(noisy1,c=c1) Plotting.Plotting.time_plot(noisy2,c=c2) plt.ylabel("Projection") plt.title("(a): Noise time series") plt.subplot(122) plt.hist(b.bootstrap_slopes(noisy1,L),color=c1,normed=True,alpha=.5) da.fit_normals_to_data(b.bootstrap_slopes(noisy1,L),c=c1,label="1100-1400") plt.hist(b.bootstrap_slopes(noisy2,L),color=c2,normed=True,alpha=.5) da.fit_normals_to_data(b.bootstrap_slopes(noisy2,L),c=c2,label="1400-2005") plt.legend() plt.title("(b): 31-year trend distributions") return np.std(b.bootstrap_slopes(noisy1,L)),np.std(b.bootstrap_slopes(noisy2,L))
def obs_SN(self, start_time, stop_time, depth, overlapping=True): self.project_soilmoisture("MERRA2") self.project_soilmoisture("GLEAM") L = stop_time.year - start_time.year + 1 modslopes, noiseterm = self.sn_at_time(start_time, L, depth, overlapping=overlapping) ns = np.std(noiseterm) plt.hist(modslopes / ns, 20, normed=True, color=cm.Oranges(.8), alpha=.5) lab = str(start_time.year) + "-" + str(stop_time.year) da.fit_normals_to_data(modslopes / ns, color=cm.Oranges(.9), lw=3, label=lab + " trends in H85 projections onto fingerprint") plt.hist(noiseterm / ns, 20, normed=True, color=cm.Purples(.8), alpha=.5) da.fit_normals_to_data( noiseterm / ns, color=cm.Purples(.9), lw=3, label=str(L) + "-year trends in piControl projection onto fingerprint") plt.xlabel("S/N") plt.ylabel("Normalized Frequency") merra = self.OBS_PROJECTIONS["MERRA2"][depth](time=(start_time, stop_time)) gleam = self.OBS_PROJECTIONS["GLEAM"][depth](time=(start_time, stop_time)) merrasig = cmip5.get_linear_trends(merra) / ns plt.axvline(merrasig, label="MERRA2", c="b", lw=3) gleamsig = cmip5.get_linear_trends(gleam) / ns plt.axvline(gleamsig, label="GLEAM", c="r", lw=3) plt.legend()
def plot_compare_phi_psi(neurons, epsilon_values, tau_y_values, psi_values): '''This only works for summary type saves.''' fig, axs = plt.subplots(1, len(psi_values), sharey=True, figsize=(5*len(psi_values)+2, 5)) cm_section = np.linspace(0.3, 1, len(tau_y_values)) colours = [] colours.append([ cm.Blues(x) for x in cm_section ]) colours.append([ cm.Oranges(x) for x in cm_section ]) colours.append([ cm.Purples(x) for x in cm_section ]) colours.append([ cm.Greens(x) for x in cm_section ]) for j, epsilon in enumerate(epsilon_values): for k, tau_y in enumerate(tau_y_values): label = "$\\tau_y={}$, $\epsilon={}$".format(tau_y,epsilon) E = [] phi_values = [] for i in range(len(psi_values)): E.append([]) phi_values.append([]) for neuron in neurons: if neuron.hyper['psi'] in psi_values: phi_values[psi_values.index(neuron.hyper['psi'])].append(neuron.hyper['phi']) for log in neuron.logs: if log[0]['tau_y'] == tau_y and log[0]['epsilon'] == epsilon: E[psi_values.index(neuron.hyper['psi'])].append(log[2]-log[3]) for i in range(len(psi_values)): if i == 0: axs[i].plot(phi_values[i], E[i], label=label, color=colours[j][k]) axs[i].set_ylabel('$E$') else: axs[i].plot(phi_values[i], E[i], color=colours[j][k]) axs[i].set_title('$\psi={}$'.format(psi_values[i])) axs[i].set_xlabel('$\phi$') fig.legend(loc='center left', bbox_to_anchor=(1, 0.5)) # fig.legend(loc='upper center', bbox_to_anchor=(0.5, -0.02), fancybox=True, shadow=True, ncol=6) fig.tight_layout() plt.show() return fig
def colorregions(region): """Set the colors to ensure a uniform scheme for each region """ d={} d["ALL"]="k" d["NHDA"]=cm.gray(.5) d["NADA"]=cm.Purples(.5) d["OWDA"]=cm.Blues(.5) d["MXDA"]=cm.PiYG(.1) d["ANZDA"]=cm.PiYG(.8) d["MADA"]=cm.Oranges(.5) d["GDA"]="k" return d[region]
def get_colors(color_c=3, color_step=100): cmap_colors = np.vstack(( cm.Oranges(np.linspace(0.4, 1, color_step)), cm.Reds(np.linspace(0.4, 1, color_step)), cm.Greys(np.linspace(0.4, 1, color_step)), cm.Purples(np.linspace(0.4, 1, color_step)), cm.Blues(np.linspace(0.4, 1, color_step)), cm.Greens(np.linspace(0.4, 1, color_step)), cm.pink(np.linspace(0.4, 1, color_step)), cm.copper(np.linspace(0.4, 1, color_step)), )) return cmap_colors[np.arange(color_c * color_step) % (color_step * 8)]
def __init__(self,data_pred,data_gt,colors,img,types,gif_name = "test.gif", plot_ = False, save = True): self.img = img self.xs_pred = data_pred[:,:,0] self.ys_pred = data_pred[:,:,1] self.xs_gt = data_gt[:,:,0] self.ys_gt = data_gt[:,:,1] self.types = types self.nb_agents = self.xs_pred.shape[0] self.margin = 1 self.nb_frames = self.xs_pred.shape[1] self.gif_name = gif_name self.plot_ = plot_ self.save = save self.fps = 1 self.colors = colors self.lin_size = 100 lin = np.linspace(0.6, 0.8, self.lin_size) self.color_dict = { "bicycle":cm.Blues(lin), "pedestrian":cm.Reds(lin), "car":cm.Greens(lin), "skate":cm.Greys(lin), "cart":cm.Purples(lin), "bus":cm.Oranges(lin) } self.colors = [self.color_dict[type_][np.random.randint(self.lin_size)] for type_ in self.types] self.history = 4 self.get_plots()
def p(x, mask): # why is the resolution so bad? #plt.imsave("mat.png", x, format="png", cmap=cm.hot) fig, ax = plt.subplots(figsize=(20, 20)) color = cm.Purples(plt.Normalize()(x)) grey = color.copy() #grey[:,:,0] = 0.6 #grey[:,:,1] = 0.6 #grey[:,:,2] = 0.65 #grey[:,:,0] = 0.9 #grey[:,:,1] = 0.9 #grey[:,:,2] = 0.95 #mask = mask[:,:,np.newaxis] #im = ax.imshow(mask * color + (1-mask) * grey) im = ax.imshow(x, cmap=cm.Purples) plt.Axes(fig, [0, 0, 1, 1]) ax.patch.set_edgecolor("black") ax.patch.set_linewidth("1") ax.axis("off") #plt.show() #import pdb; pdb.set_trace() plt.savefig("mat.png", bbox_inches="tight", pad_inches=0)
# LOAD FILES FOR VARIABLE RATE # load_file2 = '/may12' # lwe2 = np.load(os.getcwd() + '/results' + load_file2 + '/lwe_may9.npy') # lwp2 = np.load(os.getcwd() + '/results' + load_file2 + '/lwp_may9.npy') # le2 = np.load(os.getcwd() + '/results' + load_file2 + '/l_e_may9.npy') # lp2 = np.load(os.getcwd() + '/results' + load_file2 + '/l_p_may9.npy') # assume at this point that files are formatted so that l_ is 2 x ib and l_2 is 2 x rate # SET UP STUFF TO PLOT b_c = (1, 0.5, 0.5) # Define burst colour. '#D3084C' b_light_c = (1, 0.8, 0.8) # Define second burst colour. '#CB7390' colour_samples = np.linspace(0.6, 0.95, 2) Oranges = [cm.Reds(ix) for ix in colour_samples] Purples = [cm.Purples(ix) for ix in colour_samples] fs = 20 fw = 'bold' y_axis1i = lp y_axis1ii = le + lp x_axis1 = np.arange(2, 6, 1) width = 0.35 y_axis2i = lp1 / lp10 y_axis2ii = le1 + lp1 x_axis2 = isis.sum(0)[0] / ( 8192 * 123 ) * 1000 #np.linspace(0.003, 0.018, 15) #np.linspace(0.004, 0.014, 10) # PLOT STUFF
geojson['properties']['category_more'] = list(cat_distinct) geojson['properties']['time_distrib'] = list(timeofday_distr) geojson['properties']['time_more'] = list(timeofday_distinct) geojson['properties']['days_distrib'] = list(dayofweek_distr) geojson['properties']['days_more'] = list(dayofweek_distinct) geojson['properties']['weight'] = float(theta) neighborhoods.append(geojson) neighborhoods.sort(key=lambda x: x['properties']['weight'], reverse=True) a = pretty_floats({"type": "FeatureCollection", "features": neighborhoods}) cat_colors = [ cmlib.Oranges(x) for x in np.linspace(0, 1, len(main_cats_plot)) ] time_colors = [cmlib.Greens(x) for x in np.linspace(0, 1, len(timeOfDay))] day_colors = [cmlib.Purples(x) for x in np.linspace(0, 1, len(dayOfWeek))] # select top regions based on volume MAX_REGIONS = args.max_regions results = sorted(enumerate(a['features']), key=lambda x: x[1]['properties']['weight'], reverse=True)[:MAX_REGIONS] with zipfile.ZipFile(city + '.zip', 'w') as myzip: for region_id, res in results: stats = res['properties'] fig, ax = plt.subplots(1, 4, figsize=(21, 6)) fig.suptitle("{} - Weight: {}%".format(stats['name'], int(100 * stats['weight'])), size=18)