def _init_plot(self, case): n_x = len(case.x) colors = cm.tab10(np.linspace(0, 1, len(self.windFarmModel_dict))) # @UndefinedVariable fig, axes = plt.subplots(1, n_x, sharey=False, figsize=(5 * n_x, 5)) fig.suptitle(case.case_name) for ax, xD in zip(axes, case.xD): ax.set_title('x/D = %.1f' % xD)
def sieve(self, table): # type: (Table)->None m = (0.22, 0.13, 0.1, 0.1) # left, bottom, right, top w, h = 1 - m[0] - m[2], 1 - m[1] - m[3] h1 = h * 0.6 fig = matplotlib.pyplot.figure(figsize=(6.4, 9.05)) ax1 = fig.add_axes((m[0], m[1] + (h - h1), w, h1)) ax2 = fig.add_axes((m[0], m[1], w, h - h1), sharex=ax1) interp_list = [("{}/{}".format(i.kind, i.axes), cast(AbstractInterpolator, i)) for i in self.interpolators] interp_for_variation = [] # type: List[Tuple[str, InterpType]] interp_for_variation.append(interp_list[0]) # first plot self.draw_data(ax1, table) for index, (label, interp) in enumerate(interp_list): ips = SievedInterpolations(table, interp).interpolations c = cm.tab10(index) for ip in ips.values(): interp_for_variation.append(("no name", ip)) ax1.plot(*(self._build_x_y(table, ip)), label=label, linewidth=0.5, c=c) label = "" # to remove label for the second and later lines # second plot ep, em = table["unc+"] / table["value"], -table["unc-"] / table["value"] ax2.plot(table.index, ep, color="black", label="relative uncertainty of data") ax2.plot(table.index, em, color="black") v, b = self.draw_variations(ax2, table, interp_for_variation, label="") ax2.plot([], [], " ", label=f"Variation={v:.2%}; Badness={b:.3}") self.set_labels(ax1, table, x=False, title="{file_name}") self.set_labels(ax2, table, y=f"Variation") ax1.set_xscale("linear") ax1.set_yscale("log") ax1.tick_params(labelbottom=False) ax1.legend() ax2.legend() self._save(fig)
def draw_variations(self, ax, table, interp_list, **kwargs): # type: (Axes, Table, List[Tuple[str, InterpType]], Any)->Tuple[float, float] """Plot variation among interpolators and returns the worst values.""" label0, i0 = interp_list[0] ux0, uy0 = self._build_x_y(table, i0) n_interp = len(interp_list) variations = [] # type: List[float] badnesses = [] # type: List[float] for n, (label, i) in enumerate(interp_list[1:]): if isinstance(i, Interpolation): interpolation = i else: interpolation = i.interpolate(table) ux = [] # type: List[float] uy = [] # type: List[float] ey = [] # type: List[float] v = [] # type: List[float] b = [] # type: List[float] for x, y0 in zip(ux0, uy0): try: y = interpolation(x) e = max(abs(interpolation.unc_m_at(x)), interpolation.unc_p_at(x)) except ValueError: continue ux.append(x) uy.append(y) ey.append(e) v.append((y - y0) / y0) b.append(abs(y - y0) / e) color = int( n / 2) if n_interp == len(self.interpolators) * 2 + 1 else n + 1 k = {"linewidth": 0.5, "label": label, "c": cm.tab10(color)} k.update(kwargs) ax.plot(ux, v, **k) variations.append(max(abs(x) for x in v)) badnesses.append(max(x for x in b)) return max(v for v in variations), max(b for b in badnesses)
def sample_clouds(run, result_set, cols=2): medoid_solutions = {} medoid_obj = {} sample_obj = {} medoid_df = pd.read_csv('active_results/risk_analysis/' + run + '_' + result_set + '_risk.csv', delimiter=',') medoid_obj[run] = medoid_df[['Deaths', 'Economic impact']] medoid_solutions[run] = medoid_df.drop( columns=['Deaths', 'Economic impact']) n_sol = len(medoid_df.index) rows = int(np.ceil(n_sol / cols)) color = iter(cm.tab10(np.linspace(0, 1, n_sol))) sample_obj[run] = {} fig, axes = plt.subplots(nrows=rows, ncols=cols, figsize=(20, 10)) for row in range(0, rows): for col in range(0, cols): i = row * cols + col sample_df = pd.read_csv('active_results/risk_analysis/' + run + '__' + str(i) + '.csv', delimiter=',') sample_obj[run][i] = sample_df[['Deaths', 'Output']] index = medoid_df.index[i] c = next(color) axes[row, col].scatter(medoid_obj[run].Deaths[index], -medoid_obj[run]["Economic impact"][index], color=c) axes[row, col].scatter(sample_obj[run][i].Deaths, -sample_obj[run][i].Output, color=c, alpha=0.1, s=10) return fig
def nuclear_top10_plot(): list_countries = nuclear_top10["country_or_area"].unique() n = len(list_countries) #ilość kolorów potrzebna do wykresu fig, ax = plt.subplots(figsize=(13, 8)) plt.xticks(fontsize=12, rotation=90) plt.yticks(fontsize=12) #ustawianie osi X, żeby wyświetlała wszystkie lata po kolei, a nie co 5. ax.set_xlim(0, 2014) ax.xaxis.set_major_locator(MultipleLocator(1)) #tworzenie siatki, żeby wykres był bardziej czytelny ax.grid(which='major', color='#CCCCCC', linestyle='--') ax.grid(which='minor', color='#CCCCCC', linestyle=':') #definiowanie linii kolorystycznej #https://matplotlib.org/3.3.3/gallery/color/colormap_reference.html color = iter(cm.tab10(np.linspace(0, 1, n))) for country in list(list_countries): country_data = nuclear_prod[nuclear_prod.country_or_area.isin( [country])].sort_values('year') c = next(color) plt.plot(country_data["year"], country_data["% udział"] * 100, label=country, c=c) plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0., fontsize=15) plt.ylabel("nuclear energy %", fontsize=20) plt.xlabel('Year', fontsize=20) plt.title('% of nuclear energy production per top 10 countries per year', fontsize=24) plt.xlim(1990, 2014) plt.show()
def saveplot(present: dict, whitelist: Whitelist, dt_min: datetime, dt_max: datetime, strict_xlim: bool = True): """ Save a plot of present names :param whitelist: Whitelist from whitelist_handler :param present: Dict of name:datetimes entries :param dt_min: Minimum datetime :param dt_max: Maximum datetime :param strict_xlim: """ fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(16, 9), gridspec_kw={'height_ratios': [6, 1]}) plt.subplots_adjust(bottom=0.05, top=0.95, hspace=0.15) # ax1.spines["top"].set_visible(False) # ax1.spines["right"].set_visible(False) # plt.title(dt_min.strftime('%d-%m-%Y %H:%M') + ' - ' + dt_max.strftime('%d-%m-%Y %H:%M')) # Configure the x-axis such that it takes dates. # fig.autofmt_xdate() ax1.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M')) ax1.xaxis.set_major_locator(mdates.HourLocator()) # if dt_max - dt_min <= datetime.timedelta(hours=4): # ax1.xaxis.set_major_locator(mdates.MinuteLocator(byminute=[0, 15, 30, 45])) # # elif dt_max - dt_min <= datetime.timedelta(hours=24): # ax1.xaxis.set_major_locator(mdates.HourLocator()) # # else: # # plt.gcf().autofmt_xdate(rotation=30, ha='center') # ax1.xaxis.set_major_locator(mdates.DayLocator()) # ax1.xaxis.set_minor_locator(mdates.HourLocator(byhour=12)) # ax1.xaxis.set_major_formatter(ticker.NullFormatter()) # ax1.xaxis.set_minor_formatter(mdates.DateFormatter('%A\n%d-%m-%y')) # plt.gca().xaxis.set_major_locator(mdates.HourLocator()) ax1.yaxis.set_ticks_position('left') ax1.xaxis.set_ticks_position('bottom') # For each name, plot its datetimes. # Each name get's a unique y coordinate. color = itertools.cycle(cm.tab10(np.linspace(0, 1, cm.tab10.N))) for i, id_ in enumerate(present): path_effects = [] if 'color' in whitelist.names[id_]: c = whitelist.names[id_]['color'] else: c = next(color) y = [i + 1, i + 1] lw = 15 if 'outline' in whitelist.names[id_]: path_effects.append(pe.withStroke(linewidth=lw, foreground='k')) lw -= 3 for a in present[id_]: # print(id_, a) ax1.plot(a, y, markersize=8, linewidth=lw, solid_capstyle='round', color=c, path_effects=path_effects) # Use the names as yticks ax1.set_yticks(range(1, len(present) + 1)) ax1.set_yticklabels([whitelist.names[id_]['name'] for id_ in present]) if strict_xlim or not present: ax1.set_xlim(dt_min, dt_max) ax1.set_ylim(0, len(present) + 1) ax1.grid() if not present: print('NO DATA') fig.text(0.5, 0.5, 'NO DATA', size=50, ha="center", va="bottom", color='#aaaaaa', weight='bold') ax2.text(-100, 250, 'Sponsored by', size=20, ha="right", va="bottom", color='#68686d', weight='bold') im = image.imread('./nedap.png') ax2.axis('off') ax2.imshow(im) # size = fig.get_size_inches()*300 # # plt.imshow(im, aspect='auto', zorder=-1) # print(size) # ax.figure.figimage(im, size[0]*0.5, 100, zorder=1) plt.savefig('slide_tmp.png', dpi=300) plt.close('all') print('Moving file') copy2('slide_tmp.png', 'slide.png') return
capsize=3, label='Measurements') else: ax.scatter(meas[:, 0], meas[:, 1], color='k', marker='o', zorder=0, s=10) class WindRosePlot(): def load_data(self, data_type, case_name): file_path = data_path + case_name + '_%s_%s.dat' % (data_type, 'WFeff') if os.path.isfile(file_path): return np.genfromtxt(file_path, skip_header=True) def __call__(self, case, result_dict, cLES='b', cRANS='g', lw=2): '''Plot wind farm efficiency as function of wind direction''' ax = plt.figure(figsize=(15, 5)).gca() colors = cm.tab10(np.linspace(0, 1, len(result_dict))) # @UndefinedVariable ref_data = {m: self.load_data(m, case.case_name) for m in ['WFdata', 'RANS', 'LES']} # Plot reference data for key, dat in ref_data.items(): if dat is None: continue elif key == 'WFdata': ax.fill_between(dat[:, 0], dat[:, 1] - dat[:, 2], dat[:, 1] + dat[:, 2], color='k', alpha=0.3, label='Measurements') elif key == 'RANS': ax.plot(dat[:, 0], dat[:, 1], color=cRANS, linewidth=lw, label='RANS') if dat.shape[1] == 2: ax.plot(dat[:, 0], dat[:, 2], color=cRANS, dashes=[5, 2], linewidth=lw, label='RANS (gaus avg)') elif key == 'LES':
for s1, spec in enumerate(my_spectra): plt.figure(figsize=(12, 6)) if spec == "TE": cross_freq_list = [ "%sx%s" % (f0, f1) for f0, f1 in product(freq_list, freq_list) ] else: cross_freq_list = [ "%sx%s" % (f0, f1) for f0, f1 in cwr(freq_list, 2) ] if spec == "TT": plt.semilogy() color = iter(cm.tab10(np.linspace(0, 1, 10))) for cross_freq in cross_freq_list: print(cross_freq) lb, Db, sigmab = np.loadtxt("%s/spectra_%s_%s.dat" % (like_product_dir, spec, cross_freq), unpack=True) id = np.where(lb < ell_max) c = next(color) plt.errorbar(lb[id], Db[id], sigmab[id], color=c, label="%s %s" % (spec, cross_freq), fmt=".")
def makeFits(self, model): print(f'Fitting with curve fitting model "{model}"') qPoints = numpy.arange( 1, self.correlation.shape[1]) # All valid values for q D = None if model in self.fitCache: # This fitting's already been calculated self.fitCurves = self.fitCache[model][0] # pull curves back out D = self.fitCache[model][1] # as well as the diffusivity data msg = f'restored fitting for fitting model "{model}"' else: print('Calculating curve fitting over', qPoints.shape[0], 'curves') # q-correction-factor for fitting corrQ = (2 * numpy.pi * self.scalingFactor / ((self.correlation.shape[1]) * 2)) # Dirty hack to prevent any sneaky rows of zeros breaking the curve fitting # Think this is only caused by overtly small sample sets # copy correlation so we can edit it without messing with the data fitData = numpy.copy(self.correlation) empties = numpy.where(~fitData.any(axis=0))[0] print(f'Forward-filling empty q-curves at {empties}') for row in empties: fitData[:, row] = fitData[:, row + 1] # Just copy the next row, we're not doing anything with it anyway # Find fitted equation paramaters self.fitParams = fitCorrelationsToFunction( fitData, qPoints, model, qCorrection=corrQ, timeSpacings=self.deltas) # and generate plots with that data self.fitCurves = generateFittedCurves(self.fitParams, qPoints, timeSpacings=self.deltas, frameRate=self.fps, numFrames=self.numFrames, qCorrection=corrQ) # First curve doesn't get a fit, but good to preserve dimensions anyway, so insert a dummy row self.fitCurves = numpy.concatenate((numpy.zeros( (1, self.fitCurves.shape[1])), self.fitCurves), 0) msg = f'{model} fitting complete' if model != 'linear': # linear doesn't do diffusivity # do other models? I don't know. # extract diffusivity curve from fitting data too D = [seg[2] for seg in self.fitParams[0] if seg is not None] self.fitCache[model] = ( self.fitCurves, D ) # cache fitting data for reuse later, if necessary if D is not None: # plot diffusivity curve self.mpl_d.plot(qPoints, D, color=cm.tab10(0), label='Diffusivity') self.mpl_d.legend(fontsize='x-small') # remove the 'calculating' text from mpl_d if self.mpl_d.loaders: self.mpl_d.loaders.pop().remove() return msg