def plothisto(data_c, data_nc, binsize, ax): u"""Строим гистограмму для коллектора и неколлектора с заданным размером бина """ # Расчёт координат краёв бинов min_c = np.min(data_c) min_nc = np.min(data_nc) max_c = np.max(data_c) max_nc = np.max(data_nc) minedge = np.min((min_c, min_nc)) minedge = np.floor(minedge / binsize) * binsize maxedge = np.max((max_c, max_nc)) maxedge = np.floor(maxedge / binsize + 1) * binsize binedges = np.arange(minedge, maxedge + binsize, binsize) # Задайм свойства отображения гистограмм и линий для коллектора и неколлектора coll_props = { "linewidth": 1, "alpha": 0.5, "color": [1, 0.6, 0.2], 'edgecolor': 'w' } nc_props = { "linewidth": 1, "alpha": 0.5, "color": [0.41, 0.24, 0.98], 'edgecolor': 'w' } coll_fit_props = { "linewidth": 1, "alpha": 1, 'ls': '--', "color": [0.9, 0.39, 0.05] } nc_fit_props = { "linewidth": 1, "alpha": 1, 'ls': '--', "color": [0.41, 0.24, 0.98] } # Веса, чтобы сумма высот столбцов равнялась 1 weights_c = np.ones_like(data_c) / len(data_c) weights_nc = np.ones_like(data_nc) / len(data_nc) # Гистограммы ax.hist(data_nc, bins=binedges, normed=False, weights=weights_nc, **nc_props) ax.hist(data_c, bins=binedges, normed=False, weights=weights_c, **coll_props) # Подобранные pdf-ки ax.plot(binedges, binsize * norm.pdf(binedges, np.mean(data_c), np.std(data_c)), **coll_fit_props) ax.plot(binedges, binsize * norm.pdf(binedges, np.mean(data_nc), np.std(data_nc)), **nc_fit_props) # Это чтобы точки заменить на запятые ax.get_yaxis().set_major_formatter( tkr.FuncFormatter(lambda x, pos: str(x)[:str(x).index('.')] + ',' + str(x)[(str(x).index('.') + 1):])) return ax
def plot_contact_res_bsaasa(tablefile, asafile, output, skip_none_contact=True, size=(1440, 1400), dpi=72): res_bsa_asa_a, res_bsa_asa_b = get_res_bsa_vs_asa(tablefile, asafile, skip_none_contact) df = get_res_contact_area(tablefile, skip_none_contact) columns = df.columns indexes = df.index structurename = tablefile.split(os.sep)[-1].split('.')[0] inchsize = (int(size[0] / dpi), int(size[1] / dpi)) fig = plt.figure(figsize=inchsize) ax1 = plt.subplot2grid(inchsize, (0, 1), colspan=inchsize[0] - 2, rowspan=inchsize[1] - 1) ax2 = plt.subplot2grid(inchsize, (inchsize[0] - 1, 1), colspan=inchsize[0] - 2, rowspan=1) ax3 = plt.subplot2grid(inchsize, (0, 0), colspan=1, rowspan=inchsize[1] - 1) ax4 = plt.subplot2grid(inchsize, (0, inchsize[1] - 1), colspan=1, rowspan=inchsize[1] - 1) #cbar formatter: cbar_fmt = mtick.FuncFormatter(lambda x, pos: "{} Ų".format(x)) sns.heatmap(df, ax=ax1, annot=False, xticklabels=False, yticklabels=False, cmap='YlOrRd', cbar_ax=ax4, cbar_kws=dict(format=cbar_fmt)) #set cbar outline ax4.set_frame_on(True) X, Y = np.meshgrid(np.arange(0.5, len(columns)), np.arange(0.5, len(indexes))) ax1.scatter(X, Y, color='gray', s=3) sns.barplot(x=list(df.columns), y=list(res_bsa_asa_a[e] * 100 for e in df.columns), color='#005599', ax=ax2, label="BSA/ASA %") ax2.set_xticklabels(list(df.columns), rotation=90) ax2.set_ylim([0, 100]) ax2.set_yticks([]) ax2t = ax2.twinx() ax2t.set_yticks([0, 100]) ax2t.set_ylim([0, 100]) ax2t.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.0f%%')) legend = fig.legend(loc=(0.09, 0.11)) sns.barplot(y=list(df.index), x=list(res_bsa_asa_b[e] * 100 for e in df.index), color='#005599', ax=ax3) ax3.set_yticklabels(df.index) ax3.set_xlim([0, 100]) ax3.set_xticks([]) ax3t = ax3.twiny() ax3t.set_xticks([0, 100]) ax3t.set_xlim([0, 100]) ax3t.xaxis.set_major_formatter(mtick.FormatStrFormatter('%.0f%%')) ax1.set_ylabel("") plt.suptitle("Buried surface area in structure %s.\n%s" % (structurename, df.index.name), fontsize=20, y=0.92) fig.savefig(output, dpi=dpi)
def montecarlorisk(num_trials, annual_escalation, subsystem, output_file): ## define output location; if variable output_file is true then output goes to test.txt in working directory fhold = sys.stdout if output_file: f = open('./test.txt', 'w') sys.stdout = f ######################################################################################### ###################### Some basic values ############################### ######################################################################################### total_contingency = 81700.0 # total contingency in K$ nyears = 9 ## number of years with construction activity date_start = "2014-06-01" date_end = "2022-10-01" date_commissioning_start = "2020-10-01" date_base_year = "2013" date_year_start = "2014" date_year_end = "2022" annual_esc = 1.0 + annual_escalation # convert annual fractional escalation to factor yer = [ '2013', '2014', '2015', '2016', '2017', '2018', '2019', '2020', '2021', '2022' ] final_totals_distribution = [] #cost_lowest = np.zeros(1000) #cost_expected = np.zeros(1000) #cost_highest = np.zeros(1000) subsystem = subsystem.upper() if subsystem == 'DOE': fundingstring = " AND component in ('Camera', 'DOE Funded Commissioning', 'DOE Funded Operations') " projectname = "LSST DOE" elif subsystem == 'NSF': fundingstring = " AND component not in ('Camera', 'DOE Funded Commissioning', 'DOE Funded Operations', 'Other', 'Operations') " projectname = "LSST NSF" elif subsystem == 'ALL': fundingstring = "" projectname = "LSST" elif subsystem == 'DM': fundingstring = " AND component = 'Data Management' " projectname = 'DM' elif subsystem == 'TS': fundingstring = " AND component = 'Telescope & Site' " projectname = 'Telescope & Site' elif subsystem == 'CAM': fundingstring = " AND component = 'Camera' " projectname = 'CAM' elif subsystem == 'SE': fundingstring = " AND component = 'Systems Engineering' " projectname = 'Systems Engineering' elif subsystem == 'PMO': fundingstring = " AND component = 'Project Management Office' " projectname = 'Project Management' elif subsystem == 'EPO': fundingstring = " AND component = 'Education and Public Outreach' " projectname = 'EPO' elif subsystem == 'NSF_P6': fundingstring = " AND component in ('Telescope & Site', 'Systems Engineering', 'Project Management Office') " projectname = "LSST" ############################################################################## ################### Simple escalation model ############################################################################## escalate = array(10) sum = 0.0 escalate = {} # a dictionary escalate[date_base_year] = 1.0 for jj in range(nyears): escalate[yer[jj + 1]] = escalate[yer[jj]] * annual_esc sum += escalate[yer[jj + 1]] escalate['dist_sum'] = sum / nyears server = "https://jira.lsstcorp.org" auth_inf = (username, password) try: jira = JIRA(server=server, basic_auth=auth_inf) except: print( "ERROR: Jira authentication failed. Have you provided the correct username and password?" ) return query = "project=RM AND issuetype='RM-Risk' AND status='Active Risk/Opportunity' AND cf[13601] is EMPTY" + fundingstring + "ORDER BY cf[13108]" fields = "components,summary,customfield_13200,customfield_13404,customfield_13606,customfield_13107,customfield_13108,customfield_13110,customfield_13111,description" print(('\n\r Query to database \n\r\n\r' + query + '\n\r')) issues = jira.search_issues(query, maxResults=None, fields=fields) nrisks = len(issues) rows = [] mean_prob_lookup = { '0%-1%': 0.005, '0%-5%': 0.025, '5%-10%': 0.075, '10%-25%': 0.17, '25%-50%': 0.37, '50%-75%': 0.63, '75%-100%': 0.88 } rows = [] for i in range(len(issues)): rows.append({ 'riskid': int(''.join([i for i in issues[i].key if i.isdigit()])), 'projectsystem': xstr(issues[i].fields.components[0].name), 'current_probability': xstr(issues[i].fields.customfield_13200), 'current_expense_expected': issues[i].fields.customfield_13404, 'current_schedule_cost_expected': issues[i].fields.customfield_13606, 'meanprobability': mean_prob_lookup[issues[i].fields.customfield_13200.value], 'total_cost': 0.0, 'obligationmodel': xstr(issues[i].fields.customfield_13107), 'triggerdate': (datetime.datetime.strptime( issues[i].fields.customfield_13108, '%Y-%m-%d').date() if issues[i].fields.customfield_13108 else datetime.date(2000, 1, 1)), 'randomtrigger': (int(issues[i].fields.customfield_13110) if issues[i].fields.customfield_13110 else 0), 'risktitle': xstr(issues[i].fields.summary), 'riskdescription': xstr(issues[i].fields.description), 'randomperiod': xstr(issues[i].fields.customfield_13111) }) # setup lists nyears = [1 for i in range(nrisks)] riskheader = [' ' for i in range(3000)] riskid = [] # issue.key projectsystem = [] # issue.fields.components current_probability = [] # issue.fields.customfield_13200 current_expense_expected = [] # issue.fields.customfield_13404 current_schedule_cost_expected = [] # issue.fields.customfield_13606 meanprobability = [] # calculate from cf 13200 total_cost = [] # issue.fields.customfield_13606 + issue.customfield_13404 obligationmodel = [] # issue.fields.customfield_13107 triggerdate = [] # issue.fields.customfield_13108 randomtrigger = [ ] # issue.fields.customfield_13110 and issue.customfield_13111 risktitle = [] # issue.fields.summary riskdescription = [] # issue.fields.description randomperiod = [] ## Rule 0 - Accept all risks, simple passthrough ## print "\n\r Rule 1 - Accept only risks that have total cost of more than $1M \n\r" ## print "\n\r Rule 2 - Accept only risks that have expected exposure of more that $200K \n\r" ## print "\n\r Rule 3 - Accept risks that pass Rule 1 OR Rule 2 \n\r" ## Store the database values into arrays print('\n\r Summary of risks ordered by triggerdate \n\r\n\r') for ii in range(nrisks): lasttotalcost = (float(rows[ii]['current_expense_expected']) + float(rows[ii]['current_schedule_cost_expected'])) ############################################################################## ################### Use simple model of escalation to convert to as-spent dollars ############################################################################## if rows[ii]['obligationmodel'] == "trigger": yr = rows[ii]['triggerdate'].year yr = max(int(date_year_start), int(yr)) yr = min(int(date_year_end), int(yr)) lasttotalcost = lasttotalcost * escalate[str(yr)] else: lasttotalcost = lasttotalcost * escalate['dist_sum'] ############################################################################## if lasttotalcost >= 0.00: ## print("\n\r Rule 0 - Accept all risks, simple passthrough \n\r") ## Rule 1 - Accept only risks that have total cost of more than $1M ## if lasttotalcost >= 1000.00: ## Rule 2 - Accept only risks that have expected exposure of more that $200K ## if float(rows[ii]['meanprobability'])*lasttotalcost >= 200.0: ## Rule 3 - Accept risks that pass Rule 1 OR Rule 2 ## if float(rows[ii]['meanprobability'])*lasttotalcost >= 200.0 or lasttotalcost >= 1000.00: riskid.append(rows[ii]['riskid']) projectsystem.append(rows[ii]['projectsystem']) current_probability.append(rows[ii]['current_probability']) current_expense_expected.append( rows[ii]['current_expense_expected']) current_schedule_cost_expected.append( rows[ii]['current_schedule_cost_expected']) meanprobability.append(float(rows[ii]['meanprobability'])) obligationmodel.append(rows[ii]['obligationmodel']) triggerdate.append(rows[ii]['triggerdate']) randomtrigger.append(rows[ii]['randomtrigger']) risktitle.append(rows[ii]['risktitle']) riskdescription.append(rows[ii]['riskdescription']) total_cost.append(lasttotalcost) randomperiod.append(rows[ii]['randomperiod']) ## Print formatted output print( '{:>30} RM-{:4} {:>10} {:>22} {:>5} [{:>8.2f} {:>8.2f}] {:>8.2f} {:40} {:80}' .format( rows[ii]['projectsystem'], str(rows[ii]['riskid']), str(rows[ii]['triggerdate']), #rows[ii]['obligationmodel'][0:4], rows[ii]['obligationmodel'], #rows[ii]['randomtrigger'] % 1000, rows[ii]['randomtrigger'], lasttotalcost, rows[ii]['meanprobability'], float(rows[ii]['meanprobability']) * lasttotalcost, str(rows[ii]['risktitle']), str(rows[ii]['riskdescription']), )) nrisks = len(riskid) ## Print risks ordered by riskid print(('\n\r Summary of {:>3} risks ordered by riskid \n\r\n\r'.format( str(nrisks)))) hold_riskid, hold_projectsystem, hold_risktitle = (list(t) for t in zip( *sorted(zip(riskid, projectsystem, risktitle)))) for ii in range(nrisks): print('{:>30} RM-{:3} {:40}'.format(hold_projectsystem[ii], str(hold_riskid[ii]), hold_risktitle[ii])) ## Print risk description ordered by totalcost print(('\n\r Summary of {:>3} risks ordered by totalcost \n\r\n\r'.format( str(nrisks)))) hold_total_cost, hold_riskdescription, hold_projectsystem, hold_riskid, hold_meanprobability = ( list(t) for t in zip(*sorted(zip(total_cost, riskdescription, projectsystem, riskid, meanprobability), reverse=True))) for ii in range(nrisks): print('{:>30} RM-{:3} ${:8,.7}K [{:<4}] {:<100}'.format( hold_projectsystem[ii], str(hold_riskid[ii]), hold_total_cost[ii], hold_meanprobability[ii], hold_riskdescription[ii])) ## Figure 4 ## Interaction loop over risks. Also, plot fig 4 with the risk spend curve max_hold = 0.0 fig4 = plt.figure(4) ax1 = fig4.add_subplot(111) ################################################################### ############ Begin main Monte Carlo iteration loop ################ ################################################################### for ii in range(num_trials): delta_this_iteration = [] triggerdate_this_iteration = [] projectsystem_this_iteration = [] riskid_this_iteration = [] ################################################################### ############ Random loop over each risk ################ ################################################################### ## ## Each risk has a specified date of possible occurence. A risk can occur at a specified trigger date; # at some random time; or a risk may occur more than once over a specified range of dates. ## Trigger case for jj in range(nrisks): if obligationmodel[jj] == "Trigger date": choice = np.random.uniform(0.0, 1.0, 1) if choice <= meanprobability[jj]: addit = float(total_cost[jj]) else: addit = float(0.0) delta_this_iteration.append(addit) triggerdate_this_iteration.append(triggerdate[jj]) projectsystem_this_iteration.append(projectsystem[jj]) riskid_this_iteration.append(int(riskid[jj])) ## Random case elif obligationmodel[jj] == "Random occurrence(s)": nrandom = randomtrigger[jj] #print("random risk; nrandom = "+str(nrandom)) #periodcode = randomtrigger[jj] / 1000 #print("random risk periodcode = "+str(periodcode)) periodcode = 3 if randomperiod[jj] == 'Construction only': periodcode = 1 elif randomperiod[jj] == 'Commissioning only': periodcode = 2 elif randomperiod[jj] == 'Both Construction and Commissioning': periodcode = 3 date1 = date_start date2 = date_commissioning_start if periodcode == 1: # random during construction only date1 = date_start date2 = date_commissioning_start elif periodcode == 2: # random during commissioning only date1 = date_commissioning_start date2 = date_end elif periodcode == 3: # random throughout project date1 = date_start date2 = date_end for kk in range(nrandom): stime = time.mktime(time.strptime(date1, '%Y-%m-%d')) etime = time.mktime(time.strptime(date2, '%Y-%m-%d')) ptime = stime + np.random.uniform(etime - stime) randomdate = datetime.date.fromtimestamp(int(ptime)) #print(randomdate) choice = np.random.uniform(0.0, 1.0) if choice <= meanprobability[jj]: addit = float(total_cost[jj]) / float(nrandom) else: addit = float(0.0) delta_this_iteration.append(addit) triggerdate_this_iteration.append(randomdate) projectsystem_this_iteration.append(projectsystem[jj]) riskid_this_iteration.append(int(riskid[jj])) ## Distributed case elif obligationmodel[jj] == "Distributed occurrence": if ii == 0: # only on first pass through will triggerdate always have the proper value #print ii,jj,triggerdate[jj],triggerdate[jj].year ny = max( triggerdate[jj].year - 2014, 1 ) # risk is distributed over this many years but must be at least 1 nyears[jj] = min( ny, 8 ) # must store the corect values of nyears for each distributed risk for kk in range(nyears[jj]): year = 2015 + kk #kk starts at zero. Don't include short period in 2014 choice = np.random.uniform(0.0, 1.0, 1) if choice <= meanprobability[jj]: addit = float(total_cost[jj]) / float(nyears[jj]) else: addit = float(0.0) delta_this_iteration.append(addit) triggerdate_this_iteration.append( datetime.date(year, randrange(1, 12), 1) ) # random month in year, always assign the first day of the month projectsystem_this_iteration.append(projectsystem[jj]) riskid_this_iteration.append(int(riskid[jj])) else: sys.exit(" obligationmode not defined for risk " + str(projectsystem[jj]) + str(riskid[jj]) + " " + str(jj)) ################################################################### ############ End short random loop over risk ################ ################################################################### # Since random and distributed risks have been added the lists are no longer in date order. # Need to resort the two arrays by effective trigger dates using: list1, list2 = (list(t) for t in zip(*sorted(zip(list1, list2)))) - YIKES #print(riskid_this_iteration) triggerdate_this_iteration, delta_this_iteration, projectsystem_this_iteration, riskid_this_iteration = ( list(t) for t in zip(*sorted( zip(triggerdate_this_iteration, delta_this_iteration, projectsystem_this_iteration, riskid_this_iteration)))) #print(type(riskid_this_iteration),riskid_this_iteration) #print(" ") #print(delta) # Compute the running sum xx_this_iteration = np.cumsum(delta_this_iteration) len_xx = len(xx_this_iteration) ################################################################### ############# Some diagnostic output ############################# ################################################################### nprintout = 5 # number of simulations with diagnostic output diagnostic_steps = num_trials / nprintout if ii % diagnostic_steps == 0: print(('\n\r\n\r\n\r Diagnostic output for iteration ' + str(ii) + ' \n\r')) for mm in range(len_xx): header = riskheader[riskid_this_iteration[mm]] line = [ header, projectsystem_this_iteration[mm], riskid_this_iteration[mm], str(triggerdate_this_iteration[mm]), delta_this_iteration[mm], xx_this_iteration[mm] ] print('{:>6}{:>30} RM-{:3} {:>15} {:12.1f} {:12.1f}'.format( *line)) #print(line) # Store the grand totals # reserve the storage arrays on the first iteration if ii == 0: totals = np.zeros(len_xx) totals2 = np.zeros(len_xx) #print len(xx),len_xx,len(totals),len(totals2) totals += xx_this_iteration totals2 += xx_this_iteration * xx_this_iteration final_totals_distribution.append(xx_this_iteration[len_xx - 1] * 0.001) # Convert from K$ to M$ ## The step method plots the spend curve, plot only every 50th iteration line if ii % 50 == 0: #print len(triggerdate),len(xx) #print(triggerdate) #print(" ") #print(xx) pylab.step(triggerdate_this_iteration, total_contingency - xx_this_iteration, where='post') # plot the spend curve using step max_hold = max([max_hold, max(xx_this_iteration)]) gca().xaxis.set_major_formatter(ticker.FuncFormatter(format_date)) ################################################################### ########### End Monte Carlo iteration loop ############### ################################################################### ## Spend curve plot labeling dd1 = date2num(datetime.datetime.strptime('2014-01-01', "%Y-%m-%d").date()) dd2 = date2num(datetime.datetime.strptime('2022-12-31', "%Y-%m-%d").date()) yyy = 5.0 * ceil(total_contingency / 5.0) ax1.set_ylim(0.0, yyy) ax1.set_xlim(dd1, dd2) gcf().autofmt_xdate() # Plot some extra bold lines in the spend curve plot mean = totals / num_trials variance = totals2 / num_trials - mean * mean sigma = np.sqrt(variance) ax1.plot(triggerdate_this_iteration, total_contingency - mean, linewidth=5.0, color='blue') ax1.plot(triggerdate_this_iteration, total_contingency - mean + sigma, linewidth=5.0, color='black') ax1.plot(triggerdate_this_iteration, total_contingency - mean - sigma, linewidth=5.0, color='black') # Print tabular data #print "length of triggerdate",len(triggerdate_this_iteration),type(triggerdate_this_iteration) #print " mean ",len( mean ),type(mean) #print "length of sigma",len( sigma),type(sigma) for mm in range(len(triggerdate_this_iteration)): line = [ str(triggerdate_this_iteration[mm]), total_contingency - mean[mm], total_contingency - mean[mm] - sigma[mm], total_contingency - mean[mm] + sigma[mm] ] print('{:>15} , {:12.1f}, {:12.1f}, {:12.1f}'.format(*line)) # Plot the contingency funding curve in as spent USD if subsystem == 'NSF': fundingdates = [ datetime.date(2014, 0o7, 0o1), datetime.date(2014, 10, 0o1), datetime.date(2015, 10, 0o1), datetime.date(2016, 10, 0o1), datetime.date(2017, 10, 0o1), datetime.date(2018, 10, 0o1), datetime.date(2019, 10, 0o1), datetime.date(2020, 10, 0o1), datetime.date(2021, 10, 0o1) ] fundinglevels = [ 2600., 13100., 23600., 34100, 44600., 55100., 65600., 76100. ] print(fundingdates) print(fundinglevels) # pylab.step(fundingdates,fundinglevels,linewidth=5.0,color='red',where='post') ## ax1.set_ylim([0.0,80.]) pylab.title('%s Contingency spend curve in as-spent K-USD' % projectname) ax1.set_xlabel('Date') ax1.set_ylabel('Contingency Balance (as-spent K$)') ################################################################### ########### End of spend curve plot ############### ################################################################### # Total probability weighted cost weightedcost = 0.0 for kk in range(nrisks): weightedcost += total_cost[kk] * meanprobability[kk] weightedcost = locale.currency(weightedcost * 0.001, grouping=True) # convert to M$ ## weightedcost = weightedcost*.001 # Expected cost of risks from Monte Carlo expectedcost = locale.currency(mean[len_xx - 1], grouping=True) ## expectedcost = mean[len_xx-1] # Standard deviation of costs from Monte Carlo #deviationcost = sigma[nrisks-1] # 50,70,80,90,99% confidence level; output is formatted string hold50 = percentage(final_totals_distribution, 0.5) cellbound50 = locale.currency(hold50, grouping=True) # cellbound50 = hold50 hold70 = percentage(final_totals_distribution, 0.7) cellbound70 = locale.currency(hold70, grouping=True) # cellbound70 = hold70 hold80 = percentage(final_totals_distribution, 0.8) cellbound80 = locale.currency(hold80, grouping=True) # cellbound80 = hold80 hold90 = percentage(final_totals_distribution, 0.9) cellbound90 = locale.currency(hold90, grouping=True) # cellbound90 = hold90 hold99 = percentage(final_totals_distribution, 0.99) cellbound99 = locale.currency(hold99, grouping=True) # cellbound99 = hold99 # Write the output print("\n\r Total number of iterations %d " % num_trials) print("\n\r Total number of risks %d " % nrisks) print("\n\r Probability weighted total cost of risks: " + str(weightedcost) + "M") print("\n\r Cost at 50 percent confidence level: " + str(cellbound50) + "M") print("\n\r Cost at 70 percent confidence level: " + str(cellbound70) + "M") print("\n\r Cost at 80 percent confidence level: " + str(cellbound80) + "M") print("\n\r Cost at 90 percent confidence level: " + str(cellbound90) + "M") print("\n\r Cost at 99 percent confidence level: " + str(cellbound99) + "M") ## Prepare the data for plotting all plots except the spend curve (Figures 1, 2, and 3) final_totals_distribution.sort( ) # sorts input from lowest to highest value num_trials100 = num_trials / 100. niter = list(range(num_trials)) niter2 = [float(i) / num_trials for i in niter] niter3 = [100. - float(i) / num_trials100 for i in niter] ylim = 1000.0 if (num_trials > 10000): ylim = 1500. elif (num_trials <= 1000): ylim = 500. ## #######################################################################3 # Plotting package below for everything except spend curve #######################################################################3 ## Figure 1 ## fig = plt.figure(1) ax = fig.add_subplot(111) ax.hist(final_totals_distribution, bins=30) ax.set_ylim([0.0, ylim]) xlim = 20. * (int(max(final_totals_distribution) / 20.) + 1) ax.set_xlim([0.0, xlim]) pylab.title('%s Risk Monte Carlo' % projectname) ax.set_xlabel('Total Cost as-spent $M') ax.set_ylabel('Number of occurances') ax.grid(True) textstring = "Number of iterations: %d " % num_trials textstring0 = "Number of risks: %d " % nrisks textstring1 = "Prob weighted risk exposure: " + str(weightedcost) + "M" textstring2 = "Cost at 50% confidence: " + str(cellbound50) + "M" textstring3 = "Cost at 80% confidence: " + str(cellbound80) + "M" pylab.text(.1, .85, textstring, transform=ax.transAxes) pylab.text(.1, .80, textstring0, transform=ax.transAxes) pylab.text(.1, .75, textstring1, transform=ax.transAxes) pylab.text(.1, .70, textstring2, transform=ax.transAxes) pylab.text(.1, .65, textstring3, transform=ax.transAxes) ax2 = ax.twinx() ax2.set_ylabel('Cumulative fraction of occurances', color='r') ax2.plot(final_totals_distribution, niter2, c='r') ax2.set_ylim([0.0, 1.0]) # draw an arrow arga = {'color': 'r'} ax2.arrow(hold50, .50, 10., .00, shape='full', lw=2, head_length=3, head_width=.03, **arga) ## ## Figure 2 ## fig = plt.figure(2) ax = fig.add_subplot(111) pylab.title('%s Risk Monte Carlo' % projectname) ax.set_xlabel('Total Cost as-spent $M') ax.set_ylabel('Percent Probability{Cost > x }') ax.grid(True) # #Xbackground = [[.6, .6],[.5,.5]] # plot the probability line ax.plot(final_totals_distribution, niter3) ax.set_xlim([0.0, xlim]) ax.set_ylim([0.0, 100.0]) # draw the background ## ax.imshow(Xbackground, interpolation='bicubic', cmap=cm.copper, ## extent=(40.0,xlim, 0.0, 100.), alpha=.5) # alpha --> transparency # resample the x-axis xx = [] yy = [] nsteps = 110 delx = xlim / (nsteps - 10) for ii in range(nsteps): xx.append(ii * delx) yy = np.interp(xx, final_totals_distribution, niter3) for jj in range(0, nsteps - 5, 3): x1 = xx[jj - 1] x2 = xx[jj + 1] y2 = yy[jj] ## mybar(ax,x1,x2,y2) ax.bar(xx[jj], yy[jj], align='center', color='r') # draw a few arrows and vertical lines ax.arrow(hold50 + 10, 50, -10., .0, shape='full', lw=3, length_includes_head=True, head_width=2) ax.vlines(hold50, 0.0, 50, linewidth=4) ax.arrow(hold80 + 10, 20, -10., .0, shape='full', lw=3, length_includes_head=True, head_width=2) ax.vlines(hold80, 0.0, 20, linewidth=4) pylab.text(hold50 + 1, 52, textstring2) # 50% value pylab.text(hold80 + 1, 22, textstring3) # 80% value ax.set_aspect('auto') ## ## Figure 3 subplot 1 ## fig, axes = plt.subplots(nrows=2, ncols=1) fig.subplots_adjust(hspace=.75) ## fig.tight_layout() ax3 = fig.add_subplot(211) pylab.title('Histogram of %s risk costs (as-spent USD)' % projectname) ax3.set_xlabel('Cost as-spent $K') ax3.set_ylabel('Number of risks') ## yy = hist(total_cost,bins=20) ## ax3.set_xlim(0.0,yy[1].max()) ## ax3.set_ylim(0.0,yy[0].max()) ax3.autoscale(enable=True, axis='both', tight=None) labels = ax3.get_xticklabels() for label in labels: label.set_rotation(45) ax3.plot = hist(total_cost, bins=20) ## ## Figure 3 subplot 2 ## ax4 = fig.add_subplot(212) ax4.autoscale(enable=True, axis='both', tight=None) pylab.title('Histogram of %s prob-wght\'ed as-spent risk costs' % projectname) ax4.set_xlabel('Cost $K') ax4.set_ylabel('Number of risks') temp = [total_cost[ii] * meanprobability[ii] for ii in range(nrisks)] labels = ax4.get_xticklabels() for label in labels: label.set_rotation(45) ax4.plot = hist(temp, bins=20) plt.show() sys.stdout = fhold
def paga( adata, threshold=None, color=None, layout=None, layout_kwds={}, init_pos=None, root=0, labels=None, single_component=False, solid_edges='connectivities', dashed_edges=None, transitions=None, fontsize=None, fontweight='bold', text_kwds={}, node_size_scale=1, node_size_power=0.5, edge_width_scale=1, min_edge_width=None, max_edge_width=None, arrowsize=30, title=None, left_margin=0.01, random_state=0, pos=None, normalize_to_color=False, cmap=None, cax=None, colorbar=None, cb_kwds={}, frameon=None, add_pos=True, export_to_gexf=False, use_raw=True, colors=None, # backwards compat groups=None, # backwards compat show=None, save=None, ax=None): """Plot the abstracted graph through thresholding low-connectivity edges. This uses ForceAtlas2 or igraph's layout algorithms for most layouts [Csardi06]_. Parameters ---------- adata : :class:`~anndata.AnnData` Annotated data matrix. threshold : `float` or `None`, optional (default: 0.01) Do not draw edges for weights below this threshold. Set to 0 if you want all edges. Discarding low-connectivity edges helps in getting a much clearer picture of the graph. color : gene name or obs. annotation, optional (default: `None`) The node colors. Also plots the degree of the abstracted graph when passing {'degree_dashed', 'degree_solid'}. labels : `None`, `str`, `list`, `dict`, optional (default: `None`) The node labels. If `None`, this defaults to the group labels stored in the categorical for which :func:`~scanpy.api.tl.paga` has been computed. layout : {'fa', 'fr', 'rt', 'rt_circular', 'eq_tree', ...}, optional (default: 'fr') Plotting layout. 'fa' stands for ForceAtlas2, 'fr' stands for Fruchterman-Reingold, 'rt' stands for Reingold Tilford. 'eq_tree' stands for 'eqally spaced tree'. All but 'fa' and 'eq_tree' are igraph layouts. All other igraph layouts are also permitted. See also parameter `pos` and :func:`~scanpy.api.tl.draw_graph`. init_pos : `np.ndarray`, optional (default: `None`) Two-column array storing the x and y coordinates for initializing the layout. random_state : `int` or `None`, optional (default: 0) For layouts with random initialization like 'fr', change this to use different intial states for the optimization. If `None`, the initial state is not reproducible. root : `int`, `str` or list of `int`, optional (default: 0) If choosing a tree layout, this is the index of the root node or a list of root node indices. If this is a non-empty vector then the supplied node IDs are used as the roots of the trees (or a single tree if the graph is connected). If this is `None` or an empty list, the root vertices are automatically calculated based on topological sorting. transitions : `str` or `None`, optional (default: `None`) Key for `.uns['paga']` that specifies the matrix that - for instance `'transistions_confidence'` - that specifies the matrix that stores the arrows. solid_edges : `str`, optional (default: 'paga_connectivities') Key for `.uns['paga']` that specifies the matrix that stores the edges to be drawn solid black. dashed_edges : `str` or `None`, optional (default: `None`) Key for `.uns['paga']` that specifies the matrix that stores the edges to be drawn dashed grey. If `None`, no dashed edges are drawn. single_component : `bool`, optional (default: `False`) Restrict to largest connected component. fontsize : `int` (default: `None`) Font size for node labels. text_kwds : keywords for `matplotlib.text` See `here <https://matplotlib.org/api/_as_gen/matplotlib.axes.Axes.text.html#matplotlib.axes.Axes.text>`_. node_size_scale : `float` (default: 1.0) Increase or decrease the size of the nodes. node_size_power : `float` (default: 0.5) The power with which groups sizes influence the radius of the nodes. edge_width_scale : `float`, optional (default: 5) Edge with scale in units of `rcParams['lines.linewidth']`. min_edge_width : `float`, optional (default: `None`) Min width of solid edges. max_edge_width : `float`, optional (default: `None`) Max width of solid and dashed edges. arrowsize : `int`, optional (default: 30) For directed graphs, choose the size of the arrow head head's length and width. See :py:class: `matplotlib.patches.FancyArrowPatch` for attribute `mutation_scale` for more info. pos : `np.ndarray`, filename of `.gdf` file, optional (default: `None`) Two-column array/list storing the x and y coordinates for drawing. Otherwise, path to a `.gdf` file that has been exported from Gephi or a similar graph visualization software. export_to_gexf : `bool`, optional (default: `None`) Export to gexf format to be read by graph visualization programs such as Gephi. normalize_to_color : `bool`, optional (default: `False`) Whether to normalize categorical plots to `color` or the underlying grouping. cmap : color map The color map. cax : `matplotlib.Axes` A matplotlib axes object for a potential colorbar. cb_kwds : colorbar keywords See `here <https://matplotlib.org/api/colorbar_api.html#matplotlib.colorbar.ColorbarBase>`__, for instance, `ticks`. add_pos : `bool`, optional (default: `True`) Add the positions to `adata.uns['paga']`. title : `str`, optional (default: `None`) Provide a title. frameon : `bool`, optional (default: `None`) Draw a frame around the PAGA graph. show : `bool`, optional (default: `None`) Show the plot, do not return axis. save : `bool` or `str`, optional (default: `None`) If `True` or a `str`, save the figure. A string is appended to the default filename. Infer the filetype if ending on \\{'.pdf', '.png', '.svg'\\}. ax : `matplotlib.Axes` A matplotlib axes object. Returns ------- If `show==False`, one or more `matplotlib.Axis` objects. Adds `'pos'` to `adata.uns['paga']` if `add_pos` is `True`. Notes ----- When initializing the positions, note that - for some reason - igraph mirrors coordinates along the x axis... that is, you should increase the `maxiter` parameter by 1 if the layout is flipped. See also -------- tl.paga pl.paga_compare pl.paga_path """ if groups is not None: # backwards compat labels = groups logg.warn('`groups` is deprecated in `pl.paga`: use `labels` instead') if colors is None: colors = color # colors is a list that contains no lists groups_key = adata.uns['paga']['groups'] if ((isinstance(colors, Iterable) and len(colors) == len(adata.obs[groups_key].cat.categories)) or colors is None or isinstance(colors, str)): colors = [colors] if frameon is None: frameon = settings._frameon # labels is a list that contains no lists if ((isinstance(labels, Iterable) and len(labels) == len(adata.obs[groups_key].cat.categories)) or labels is None or isinstance(labels, (str, dict))): labels = [labels for i in range(len(colors))] if title is None and len(colors) > 1: title = [c for c in colors] elif isinstance(title, str): title = [title for c in colors] elif title is None: title = [None for c in colors] if colorbar is None: var_names = adata.var_names if adata.raw is None else adata.raw.var_names colorbars = [True if c in var_names else False for c in colors] else: colorbars = [False for c in colors] if ax is None: axs, panel_pos, draw_region_width, figure_width = utils.setup_axes( panels=colors, colorbars=colorbars) else: axs = ax if len(colors) == 1 and not isinstance(axs, list): axs = [axs] for icolor, c in enumerate(colors): if title[icolor] is not None: axs[icolor].set_title(title[icolor]) pos, sct = _paga_graph( adata, axs[icolor], layout=layout, colors=c, layout_kwds=layout_kwds, init_pos=init_pos, solid_edges=solid_edges, dashed_edges=dashed_edges, transitions=transitions, threshold=threshold, root=root, labels=labels[icolor], fontsize=fontsize, fontweight=fontweight, text_kwds=text_kwds, node_size_scale=node_size_scale, node_size_power=node_size_power, edge_width_scale=edge_width_scale, min_edge_width=min_edge_width, max_edge_width=max_edge_width, normalize_to_color=normalize_to_color, frameon=frameon, cmap=cmap, cax=cax, colorbar=colorbars[icolor], cb_kwds=cb_kwds, use_raw=use_raw, title=title[icolor], random_state=random_state, export_to_gexf=export_to_gexf, single_component=single_component, arrowsize=arrowsize, pos=pos) if colorbars[icolor]: bottom = panel_pos[0][0] height = panel_pos[1][0] - bottom width = 0.006 * draw_region_width / len(colors) left = panel_pos[2][2*icolor+1] + 0.2 * width rectangle = [left, bottom, width, height] fig = pl.gcf() ax_cb = fig.add_axes(rectangle) cb = pl.colorbar(sct, format=ticker.FuncFormatter(utils.ticks_formatter), cax=ax_cb) if add_pos: adata.uns['paga']['pos'] = pos logg.hint('added \'pos\', the PAGA positions (adata.uns[\'paga\'])') utils.savefig_or_show('paga', show=show, save=save) if len(colors) == 1 and isinstance(axs, list): axs = axs[0] return axs if show == False else None
def scatter_base(Y, colors='blue', sort_order=True, alpha=None, highlights=[], right_margin=None, left_margin=None, projection='2d', title=None, component_name='DC', component_indexnames=[1, 2, 3], axis_labels=None, colorbars=[False], sizes=[1], color_map='viridis', show_ticks=True, ax=None): """Plot scatter plot of data. Parameters ---------- Y : np.ndarray Data array. projection : {'2d', '3d'} Returns ------- axs : matplotlib.axis or list of matplotlib.axis Depending on whether supplying a single array or a list of arrays, return a single axis or a list of axes. """ if isinstance(highlights, dict): highlights_indices = sorted(highlights) highlights_labels = [highlights[i] for i in highlights_indices] else: highlights_indices = highlights highlights_labels = [] # if we have a single array, transform it into a list with a single array if type(colors) == str: colors = [colors] if len(sizes) != len(colors) and len(sizes) == 1: sizes = [sizes[0] for i in range(len(colors))] axs, panel_pos, draw_region_width, figure_width = setup_axes( ax=ax, colors=colors, colorbars=colorbars, projection=projection, right_margin=right_margin, left_margin=left_margin, show_ticks=show_ticks) for icolor, color in enumerate(colors): ax = axs[icolor] left = panel_pos[2][2*icolor] bottom = panel_pos[0][0] width = draw_region_width / figure_width height = panel_pos[1][0] - bottom Y_sort = Y if not is_color_like(color) and sort_order: sort = np.argsort(color) color = color[sort] Y_sort = Y[sort] if projection == '2d': data = Y_sort[:, 0], Y_sort[:, 1] elif projection == '3d': data = Y_sort[:, 0], Y_sort[:, 1], Y_sort[:, 2] if not isinstance(color, str) or color != 'white': sct = ax.scatter(*data, marker='.', c=color, alpha=alpha, edgecolors='none', # 'face', s=sizes[icolor], cmap=color_map) if colorbars[icolor]: width = 0.006 * draw_region_width left = panel_pos[2][2*icolor+1] + (1.2 if projection == '3d' else 0.2) * width rectangle = [left, bottom, width, height] fig = pl.gcf() ax_cb = fig.add_axes(rectangle) cb = pl.colorbar(sct, format=ticker.FuncFormatter(ticks_formatter), cax=ax_cb) # set the title if title is not None: ax.set_title(title[icolor]) # output highlighted data points for iihighlight, ihighlight in enumerate(highlights_indices): ihighlight = ihighlight if isinstance(ihighlight, int) else int(ihighlight) data = [Y[ihighlight, 0]], [Y[ihighlight, 1]] if '3d' in projection: data = [Y[ihighlight, 0]], [Y[ihighlight, 1]], [Y[ihighlight, 2]] ax.scatter(*data, c='black', facecolors='black', edgecolors='black', marker='x', s=10, zorder=20) highlight_text = (highlights_labels[iihighlight] if len(highlights_labels) > 0 else str(ihighlight)) # the following is a Python 2 compatibility hack ax.text(*([d[0] for d in data] + [highlight_text]), zorder=20, fontsize=10, color='black') if not show_ticks: ax.set_xticks([]) ax.set_yticks([]) if '3d' in projection: ax.set_zticks([]) axs.append(ax) # set default axis_labels if axis_labels is None: axis_labels = [[component_name + str(i) for i in idcs] for idcs in [component_indexnames for iax in range(len(axs))]] else: axis_labels = [[axis_labels[0], axis_labels[1]] for i in range(len(axs))] for iax, ax in enumerate(axs): ax.set_xlabel(axis_labels[iax][0]) ax.set_ylabel(axis_labels[iax][1]) if '3d' in projection: # shift the label closer to the axis ax.set_zlabel(axis_labels[iax][2], labelpad=-7) for ax in axs: # scale limits to match data ax.autoscale_view() return axs
def plot_tri_matrix(mat, figure=None, num='plot_part_of_this_matrix', size=None, cmap=pyplot.cm.RdBu_r, colourbar=True, color_anchor=None, node_labels=None, x_tick_rot=0, title=None): r"""Creates a lower-triangle of a square matrix. Very often found to display correlations or coherence. Parameters ---------- mat : square matrix node_labels : list of strings with the labels to be applied to the nodes. Defaults to '0','1','2', etc. fig : a matplotlib figure cmap : a matplotlib colormap. title : figure title (eg '$\alpha$') color_anchor : determines the clipping for the colormap. If None, the data min, max are used. If 0, min and max of colormap correspond to max abs(mat) If (a,b), min and max are set accordingly (a,b) Returns ------- fig: a figure object """ def channel_formatter(x, pos=None): thisidx = numpy.clip(int(x), 0, N - 1) return node_labels[thisidx] if figure is not None: fig = figure else: if num is None: fig = pyplot.figure() else: fig = pyplot.figure(num=num) if size is not None: fig.set_figwidth(size[0]) fig.set_figheight(size[1]) w = fig.get_figwidth() h = fig.get_figheight() ax_im = fig.add_subplot(1, 1, 1) N = mat.shape[0] idx = numpy.arange(N) if colourbar: if IMPORTED_MPL_TOOLKITS: divider = make_axes_locatable(ax_im) ax_cb = divider.new_vertical(size="10%", pad=0.1, pack_start=True) fig.add_axes(ax_cb) else: pass mat_copy = mat.copy() # Null the upper triangle, including the main diagonal. idx_null = numpy.triu_indices(mat_copy.shape[0]) mat_copy[idx_null] = numpy.nan # Min max values max_val = numpy.nanmax(mat_copy) min_val = numpy.nanmin(mat_copy) if color_anchor is None: color_min = min_val color_max = max_val elif color_anchor == 0: bound = max(abs(max_val), abs(min_val)) color_min = -bound color_max = bound else: color_min = color_anchor[0] color_max = color_anchor[1] # The call to imshow produces the matrix plot: im = ax_im.imshow(mat_copy, origin='upper', interpolation='nearest', vmin=color_min, vmax=color_max, cmap=cmap) # Formatting: ax = ax_im ax.grid(True) # Label each of the cells with the row and the column: if node_labels is not None: for i in range(0, mat_copy.shape[0]): if i < (mat_copy.shape[0] - 1): ax.text(i - 0.3, i, node_labels[i], rotation=x_tick_rot) if i > 0: ax.text(-1, i + 0.3, node_labels[i], horizontalalignment='right') ax.set_axis_off() ax.set_xticks(numpy.arange(N)) ax.xaxis.set_major_formatter(ticker.FuncFormatter(channel_formatter)) fig.autofmt_xdate(rotation=x_tick_rot) ax.set_yticks(numpy.arange(N)) ax.set_yticklabels(node_labels) ax.set_ybound([-0.5, N - 0.5]) ax.set_xbound([-0.5, N - 1.5]) # Make the tick-marks invisible: for line in ax.xaxis.get_ticklines(): line.set_markeredgewidth(0) for line in ax.yaxis.get_ticklines(): line.set_markeredgewidth(0) ax.set_axis_off() if title is not None: ax.set_title(title) if colourbar: # Set the ticks - if 0 is in the interval of values, set that, as well # as the min, max values: if min_val < 0: ticks = [color_min, min_val, 0, max_val, color_max] # set the min, mid and max values: else: ticks = [color_min, min_val, (color_max - color_min) / 2., max_val, color_max] # colourbar: if IMPORTED_MPL_TOOLKITS: cb = fig.colorbar(im, cax=ax_cb, orientation='horizontal', cmap=cmap, norm=im.norm, boundaries=numpy.linspace(color_min, color_max, 256), ticks=ticks, format='%.2f') else: # the colourbar will be wider than the matrix cb = fig.colorbar(im, orientation='horizontal', cmap=cmap, norm=im.norm, boundaries=numpy.linspace(color_min, color_max, 256), ticks=ticks, format='%.2f') fig.sca(ax) return fig
def plot_everything(*args): plt.figure(1) #{ Figure 1 ax3 = plt.subplot2grid((1, 1), (0, 0)) #{ linear rbf m = createMap('cyl') x_m_meshgrid, y_m_meshgrid = m(y_meshgrid, x_meshgrid) m.pcolor(x_m_meshgrid, y_m_meshgrid, doses_rbf_log, cmap=my_cm, vmin=pcolor_min, vmax=pcolor_max) cb = m.colorbar(location="bottom", label="Z", format=ticker.FuncFormatter(fmt)) # draw colorbar cb.set_label(x_label + ' ' + x_units) plt.title('RBF gaussian (eps={}), log10 scale, '.format(epsilon) + date_range, fontsize=13) x_m, y_m = m(lons_orig, lats_orig) # project points # CS = m.hexbin(x_m, y_m, C=numpy.array(doses), bins='log', gridsize=16, cmap=my_cm, mincnt=0, reduce_C_function=np.max, zorder=10, vmin=pcolor_min, vmax=pcolor_max) # cb = m.colorbar(location="bottom", label="Z") # draw colorbar # cb.set_label('log10('+x_label+') '+x_units) for image in images: latitude, longitude, tle_date = getLatLong(image.time) x, y = m(longitude, latitude) m.scatter(x, y, 20, marker='o', color='k', zorder=10) #} end of linear rbf plt.subplots_adjust(left=0.025, bottom=0.05, right=0.975, top=0.95, wspace=0.1, hspace=0.1) #} end of Figure 1 if small_plot: plt.figure(2) ax2 = plt.subplot2grid((1, 1), (0, 0)) #{ log-scale rbf m = createMap('cyl') x_m_meshgrid, y_m_meshgrid = m(y_meshgrid, x_meshgrid) m.pcolor(x_m_meshgrid, y_m_meshgrid, doses_rbf_log, cmap=my_cm, vmin=pcolor_min, vmax=pcolor_max) cb = m.colorbar(location="bottom", label="Z") # draw colorbar cb.set_label('log10(' + x_label + ') ' + x_units) plt.title('RBF gaussian (eps={}), log10 scale, '.format(epsilon) + date_range, fontsize=13) #} end of log-scale rbf plt.show()
def get_lab_plots(start_dt_str, end_dt_str, lplot_list, wrap_var, stage_sub=None, type_sub=None, outdir=None, opfile_suff=None): if opfile_suff: opfile_suff = '_' + opfile_suff else: opfile_suff = '' # Clean case of lplot_list and wrap var inputs lplot_list = [element.upper() for element in lplot_list] wrap_var = wrap_var[0].upper() + wrap_var[1:].lower() # Order of treatment stages in plots stage_order = [ 'Raw Influent', 'Grit Tank', 'Microscreen', 'MESH', 'AFBR', 'Duty AFMBR MLSS', 'Duty AFMBR Effluent', 'Research AFMBR MLSS', 'Research AFMBR Effluent' ] # Manage dates given by user start_dt, end_dt = manage_chart_dates(start_dt_str, end_dt_str) # Loop through the lab data types for ltype in lplot_list: if ltype.find('TSS') >= 0 or ltype.find('VSS') >= 0: ltype = 'TSS_VSS' if ltype == 'OD': ldata_cod = get_data(['COD'], start_dt_str=start_dt_str, end_dt_str=end_dt_str)['COD'] ldata_bod = get_data(['BOD'], start_dt_str=start_dt_str, end_dt_str=end_dt_str)['BOD'] ldata_long = ldata_cod.append(ldata_bod) else: ldata_long = get_data([ltype], start_dt_str=start_dt_str, end_dt_str=end_dt_str)[ltype] # ID variables for grouping by day # (for monitoring types that might have multiple observations in a day) id_vars_chrt = ['Date_Time', 'Stage', 'Type'] if ltype == 'COD': # Set plotting variables ylabel = 'COD Reading (mg/L)' type_list = ['Total', 'Soluble', 'Particulate'] share_yax = False if ltype == 'BOD': # Set plotting variables ylabel = 'BOD (mg/L)' share_yax = False # If BOD, convert to wide ldata_long.loc[:, 'Range'] = np.array([ string.split(': ')[1] for string in ldata_long['Type'].values ]) ldata_long.loc[:, 'Type'] = np.array([ string.split(': ')[0] for string in ldata_long['Type'].values ]) ldata_long = ldata_long[[ 'Date_Time', 'Stage', 'Type', 'Range', 'Value' ]] ldata_long.drop_duplicates(['Date_Time', 'Stage', 'Type', 'Range'], inplace=True) ldata_long.set_index(['Date_Time', 'Stage', 'Type', 'Range'], inplace=True) ldata_long = ldata_long.unstack('Range') # Get the error bar (symmetric) ldata_long['yerr'] = (ldata_long['Value']['Max Value'] - ldata_long['Value']['Min Value']) / 2 ldata_long.reset_index(inplace=True) ldata_long.columns = [ 'Date_Time', 'Stage', 'Type', 'Mean', 'Min', 'Max', 'yerr' ] type_list = ldata_long['Type'].unique() if ltype == 'OD': # Set plotting variables ylabel = 'OD Reading (mg/L)' # Type list can be arbitrary in this case type_list = ldata_long['Type'].unique() # Make sure type_sub types are in the dataset! if type_sub: type_list = [ type_el for type_el in type_list if type_el in type_sub ] else: type_list = \ ['Total','Soluble','Particulate'] + \ sorted( list( filter( lambda x: x not in ['Total','Soluble','Particulate'], type_list ) ) ) share_yax = False if ltype == 'TSS_VSS': # Set plotting variables ylabel = 'Suspended Solids (mg/L)' type_list = ['TSS', 'VSS'] share_yax = True if ltype == 'PH': # Set plotting variables ylabel = 'pH' ldata_long['Type'] = 'pH' type_list = ['pH'] share_yax = True if ltype == 'ALKALINITY': # Set plotting variables ylabel = 'Alkalinity (mg/L as ' + r'$CaCO_3$)' ldata_long['Type'] = 'Alkalinity' type_list = ['Alkalinity'] share_yax = True if ltype == 'VFA': # Set plotting variables ylabel = 'VFAs as mgCOD/L' type_list = ['Acetate', 'Propionate'] share_yax = False if ltype == 'AMMONIA': #Set plotting variables ylabel = r'$NH_3$' + ' (mg/L as N)' ldata_long['Type'] = 'Ammonia' type_list = ['Ammonia'] share_yax = True if ltype == 'TKN': # Set plotting variables ylabel = 'mgTKN/L' ldata_long['Type'] = 'TKN' type_list = ['TKN'] share_yax = True if ltype == 'SULFATE': # Set plotting variables ylabel = 'mg/L ' + r'$SO_4$' ldata_long['Type'] = 'Sulfate' type_list = ['Sulfate'] share_yax = True # Filter to the dates desired for the plots ldata_chart = ldata_long.loc[(ldata_long.Date_Time >= start_dt) & ( ldata_long.Date_Time < end_dt + timedelta(days=1))] # Filter to stages and types being subset to if stage_sub: ldata_chart = ldata_chart.loc[ldata_chart.Stage.isin(stage_sub)] if type_sub: ldata_chart = ldata_chart.loc[ldata_chart.Type.isin(type_sub)] # Get the stages for which there are data act_stages = ldata_chart.Stage.values # Reproduce stage order according to data availability stage_list = [stage for stage in stage_order if stage in act_stages] if wrap_var == 'Stage': wrap_list = stage_list hue_list = type_list hue_var = 'Type' elif wrap_var == 'Type': wrap_list = type_list hue_list = stage_list hue_var = 'Stage' else: print( 'cr2c_labdata: get_lab_plots: wrap_var can only be "Stage" or "Type"' ) sys.exit() # Set plot width and length according to the wrapping variable plot_wid = 5 * min(3, len(wrap_list)) wrap_wid = min(3, len(wrap_list)) plot_len = 6 * np.ceil(len(wrap_list) / 3) + 5 # Average all observations (by type and stage) taken on a day ldata_chart = ldata_chart.groupby(id_vars_chrt).mean() # Remove index! ldata_chart.reset_index(inplace=True) # Set plot facetting and layout lplot = sns.FacetGrid(ldata_chart, col=wrap_var, col_order=wrap_list, col_wrap=wrap_wid, hue=hue_var, hue_order=hue_list, sharey=share_yax) # Set date format dfmt = dates.DateFormatter('%m/%d/%y') # Set tickmarks for days of the month dlocator = dates.DayLocator(bymonthday=[1, 15]) # Format the axes in the plot panel for ax in lplot.axes.flatten(): ax.xaxis.set_major_locator(dlocator) ax.xaxis.set_major_formatter(dfmt) # Different format for PH vs other y-axes if ltype == 'PH': tkr.FormatStrFormatter('%0.2f') else: ax.yaxis.set_major_formatter( tkr.FuncFormatter(lambda x, p: format(int(x), ','))) # Plot values and set axis labels/formatting if ltype == 'BOD': lplot.map(plt.scatter, 'Date_Time', 'Mean', marker='o').add_legend() lplot.map(plt.errorbar, 'Date_Time', 'Mean', 'yerr', capsize=2) else: pts = lplot.map(plt.plot, 'Date_Time', 'Value', linestyle='-', marker="o", ms=4) lplot.set_titles('{col_name}') lplot.set_ylabels(ylabel) lplot.set_xlabels('') lplot.set_xticklabels(rotation=45) # Output plot to given directory plot_filename = "{0}{1}.png" os.chdir(outdir) # Add and position the legend if ltype in ['PH', 'ALKALINITY' ] and wrap_var == 'Stage' or ltype == 'BOD': plt.savefig(plot_filename.format(ltype, opfile_suff), bbox_inches='tight', width=plot_wid, height=plot_len) plt.close() else: handles, labels = ax.get_legend_handles_labels() lgd = ax.legend(handles=handles, labels=labels, loc='upper left', bbox_to_anchor=(1, 0.75)) plt.savefig(plot_filename.format(ltype, opfile_suff), bbox_extra_artists=(lgd, ), bbox_inches='tight', width=plot_wid, height=plot_len) plt.close()
def get_feed_sumst(self, stype, output_types, start_dt_str, end_dt_str, sum_period='DAY', plt_type=None, plt_colors=None, ylabel=None, get_nhours=None, outdir=None, opfile_suff=None): start_dt = dt.strptime(start_dt_str, '%m-%d-%y') end_dt = dt.strptime(end_dt_str, '%m-%d-%y') # Clean case of input arguments sum_period = sum_period.upper() if opfile_suff: opfile_suff = '_' + opfile_suff else: opfile_suff = '' plt_type = plt_type.upper() if type(output_types) == list: output_types = [ output_type.upper() for output_type in output_types ] else: output_types = output_types.upper() # Define op Sensor IDs according to query type (water or biogas) stype = stype.upper() if stype == 'GAS': sids = ['FT700', 'FT704'] if stype == 'WATER': sids = ['FT202', 'FT305'] if stype == 'TEMP': sids = ['AT304', 'AT310'] # Get output directory and string with all Sensor IDs from report if not outdir: tkTitle = 'Directory to output charts/tables to...' print(tkTitle) outdir = askdirectory(title=tkTitle) feeding_dat = get_data([stype] * 2, sids, [1, 1], ['HOUR', 'HOUR'], start_dt_str=start_dt_str, end_dt_str=end_dt_str) # Retrieve Sensor IDs from aggregated data all_sids = '_'.join(sids) # Get hourly flow totals for each sid for sid in sids: feeding_dat[sid] = feeding_dat[sid] * 60 # Convert Time variable to pd.datetime variable feeding_dat['Time'] = pd.to_datetime(feeding_dat['Time']) feeding_dat['Date'] = feeding_dat['Time'].dt.date # Filter to the dates desired for the plots feeding_dat = feeding_dat.loc[(feeding_dat.Time >= start_dt) & ( feeding_dat.Time < end_dt + timedelta(days=1))] # Get dataset aggregated by Day, Week or Month # Based on aggregation period, get the number of hours we are summing averages over (averages are in minutes) if sum_period == 'HOUR': xlabel = 'Time' nhours = 1 else: feeding_dat['Date'] = feeding_dat['Time'].dt.date if sum_period == 'DAY': xlabel = 'Date' nhours = 24 if sum_period == 'WEEK': xlabel = 'Weeks (since {})'.format(start_dt_str) feeding_dat[xlabel] = np.floor( (feeding_dat['Time'] - start_dt) / np.timedelta64(7, 'D')) nhours = 24 * 7 if sum_period == 'MONTH': xlabel = 'Months (since {}, as 30 days)'.format(start_dt_str) feeding_dat[xlabel] = np.floor( (feeding_dat['Time'] - start_dt) / np.timedelta64(30, 'D')) nhours = 24 * 7 * 30 if get_nhours == 1: for sid in sids: feeding_dat['Number Hours {}'.format(sid)] = \ np.where(feeding_dat[sid].values > 0, 1, 0) agg_sumst = feeding_dat.groupby(xlabel).sum() # Plot! if 'PLOT' in output_types: # Set the maximum number of tick labels nobs = len(agg_sumst.index.values) nlims = nobs if sum_period == 'DAY': nlims = 10 # Get the indices of the x-axis values according to these tick labels lim_len = int(np.floor(nobs / nlims)) tic_idxs = [lim * lim_len for lim in range(nlims)] tic_vals = [ agg_sumst.index.values[tic_idx] for tic_idx in tic_idxs ] if sum_period != 'DAY': tic_vals = [ '{} - {}'.format(int(tic_val), int(tic_val + 1)) for tic_val in tic_vals ] if plt_type == 'BAR': ax = agg_sumst[sids].plot.bar(stacked=False, width=0.8, color=plt_colors) plt.xticks(tic_idxs, tic_vals) else: ax = agg_sumst[sids].plot(color=plt_colors) plt.ylabel(ylabel) plt.legend() ax.yaxis.set_major_formatter( tkr.FuncFormatter(lambda y, p: format(int(y), ','))) plt.xticks(rotation=45) plt.tight_layout() # Output plots and/or sumstats csv files to directory of choice plot_filename = "op{}_{}{}.png".format(stype, all_sids, opfile_suff) plt.savefig(os.path.join(outdir, plot_filename), width=20, height=50) plt.close() if 'TABLE' in output_types: sumst_filename = "op{}_{}{}.csv".format(stype, all_sids, opfile_suff) agg_sumst.reset_index(inplace=True) agg_sumst = agg_sumst[[xlabel] + sids] agg_sumst.to_csv(os.path.join(outdir, sumst_filename), index=False, encoding='utf-8')
# first we'll do it the default way, with gaps on weekends fig, axes = plt.subplots(ncols=2, figsize=(8, 4)) ax = axes[0] ax.plot(date, r.adj_close, 'o-') ax.set_title("Default") fig.autofmt_xdate() # next we'll write a custom formatter N = len(r) ind = np.arange(N) # the evenly spaced plot indices等距绘图索引 def format_date(x, pos=None): thisind = np.clip(int(x + 0.5), 0, N - 1) # np.clip()修剪 这个函数可以控制thisind在0到N-1的整数 #避免date[thisind]出界,抛出 #IndexError: index 30 is out of bounds for axis 0 with size 30 # x=np.array([1,2,3,5,6,7,8,9]) # np.clip(x,3,8) 结果:array([3, 3, 3, 5, 6, 7, 8, 8]) return date[thisind].strftime('%Y-%m-%d') ax = axes[1] ax.plot(ind, r.adj_close, 'o-') fmt = ticker.FuncFormatter(format_date) ax.xaxis.set_major_formatter(fmt) ax.set_title("Custom tick formatter") fig.autofmt_xdate() plt.show()
axes_3 = fig.add_axes([0.7, 0.2, 0.2, 0.6]) # 分配子图在figure中的位置 axes_3.set_title('$F_{z}$ = 2.5 V/nm') # 子图的标题 image_3 = axes_3.imshow(charge_2D_3, extent=ratio, cmap='seismic', vmin=-6e-4, vmax=6e-4) axes_3.xaxis.set_major_locator(x_major_locator) # 使用x轴locator axes_3.yaxis.set_major_locator(y_major_locator) # 使用y轴locator axes_3.set_xticks(x_array, x_array_new) # 通过这个函数,我们可以将旧的刻度(x_array)替换成新的刻度(x_array_new) axes_3.set_yticks(y_array, y_array_new) # 记得要以数组的形式输入,同时数据的个数要一一对应 axes_3.set_xlabel(r'$[1\bar{1}0]$-direction (nm)') # x轴名称 axes_3.set_ylabel(r'[001]-direction (nm)') # y轴名称 # 以子图的形式,添加colorbar,以第一幅子图的colorbar为基准,规范数据 axes_4 = fig.add_axes([0.95, 0.25, 0.01, 0.5]) def fmt(x, pos): # 此函数可以用于改变刻度 return round(x * 10000, 2) fig.colorbar(image_1, cax=axes_4, orientation='vertical', format=ticker.FuncFormatter(fmt)) axes_4.set_title( r'${\times}10^{-4}$ e/${\AA}^3$') # matplotlib里面埃(Angstrom)的正确打法为: \AA # 由于'\t'是转义字符,所以如果我们要打乘号'\times',就在字符串前面加r,不然电脑会先识别成转义字符
def plotting(self, events, actions, job, input_zip, output_file, df, my_dpi, col): """ plotting ________________ Main function which is a driver which uses the matplotlib plotting ability to plot to the graph plotting is determined by the commands in config file each command with parameters determine which if-else statement to execute and then on the basis of those parameters executing blocks of if-else for plotting """ X = df.iloc[:, 0].values[::20] y = df.loc[:, job.headers[0]].values[::20] if job.legendOnly: if not os.path.exists(job.outputDir): os.mkdir(job.outputDir) colors = [ "red", "yellow", "green", "blue", "orange", "lime", "magenta", "violet", "black", "purple", "0.1", "0.2", "0.75", "0.8", "0.9", "pink" ] f = lambda m, c: plt.plot([], [], marker=m, color=c, ls="none")[0] handles = [f("_", colors[i]) for i in range(0, len(self.data))] labels = [i.replace("\t", " ") for i in self.data] legend = plt.legend(handles, labels, loc=3, ncol=3, framealpha=1, frameon=False, fontsize=12) plt.axis('off') def export_legend(legend, filename=os.path.join(job.outputDir, job.outputFilename), expand=[-50, -50, 50, 50]): fig = legend.figure fig.canvas.draw() bbox = legend.get_window_extent() bbox = bbox.from_extents(*(bbox.extents + np.array(expand))) bbox = bbox.transformed(fig.dpi_scale_trans.inverted()) fig.savefig(filename, dpi="figure", bbox_inches=bbox, pad_inches=0) export_legend(legend) if job.log > 0: logging.info("Creating Graph:" + job.outputFilename.split(".")[0]) plt.close("all") else: fig, ax = plt.subplots() fig.set_size_inches(w=job.imageWidth / my_dpi + 1, h=job.imageHeight / my_dpi + 1) if not os.path.exists(job.outputDir): os.mkdir(job.outputDir) if job.logAxis: ax.set_yscale("log") ax.yaxis.set_major_formatter(tick.FuncFormatter(self.y_fmt)) ax.yaxis.set_ticks_position("both") ax.yaxis.set_tick_params(labelright=True) plt.xlabel("Time(s)", fontsize=job.fontSize) plt.ylabel(job.headers[0], fontsize=job.fontSize) if job.titleOverride == None: plt.title(job.headers[0] + "_vs_Time_Action_Event_Plot", fontsize=job.fontSize) if job.log > 0: logging.info("Creating Graph:" + job.headers[0] + "_vs_Time_Action_Event_Plot") elif job.titleOverride == "None": if job.log > 0: logging.info("Creating Graph:" + job.outputFilename.split(".")[0]) else: plt.title(job.titleOverride, fontsize=job.fontSize) if job.log > 0: logging.info("Creating Graph:" + job.titleOverride) plt.xlim(0, max(X)) p = plt.plot(X, y) for i in range(0, len(self.timeData)): plt.axvline(self.timeData[i], color=col[i]) if job.showGridLines: plt.grid(b=True, which='major', color='r', linestyle='--') if not job.hideAELegend and not job.removeAllLegends: labs = job.headers plt.legend(p, labs) if "(" and ")" in job.outputFilename: job.outputFilename = job.outputFilename.split( "(")[0] + ".jpg" plt.savefig(os.path.join(job.outputDir, job.outputFilename), dpi=my_dpi) plt.close("all") else: ax.get_yaxis().set_major_locator( MaxNLocator(nbins=10, min_n_ticks=8)) ax.get_xaxis().set_major_locator( MaxNLocator(nbins=15, min_n_ticks=10)) ax.yaxis.set_major_formatter(tick.FuncFormatter(self.y_fmt)) ax.yaxis.set_ticks_position("both") ax.yaxis.set_tick_params(labelright=True) plt.xlabel("Time(s)", fontsize=job.fontSize) plt.ylabel(job.headers[0], fontsize=job.fontSize) if job.titleOverride == None: plt.title(job.headers[0] + "_vs_Time_Action_Event_Plot", fontsize=job.fontSize) if job.log > 0: logging.info("Creating Graph:" + job.headers[0] + "_vs_Time_Action_Event_Plot") elif job.titleOverride == "None": if job.log > 0: logging.info("Creating Graph:" + job.outputFilename.split(".")[0]) else: if job.log > 0: logging.info("Creating Graph:" + job.titleOverride) plt.title(job.titleOverride, fontsize=job.fontSize) plt.xlim(0, max(X)) p = plt.plot(X, y) for i in range(0, len(self.timeData)): plt.axvline(self.timeData[i], color=col[i]) if job.showGridLines: plt.grid(b=True, which='major', color='r', linestyle='--') if not job.hideAELegend and not job.removeAllLegends: labs = job.headers plt.legend(p, labs) if "(" and ")" in job.outputFilename: job.outputFilename = job.outputFilename.split( "(")[0] + ".jpg" plt.savefig(os.path.join(job.outputDir, job.outputFilename), dpi=my_dpi) plt.close("all")
def plot_energy_landscape(x, y, values, log_legend=False, title=None, legend_title=None, legend_min=0, legend_max=None): # Clearing the canvas, so we always draw on the empty canvas. Just in case. plt.clf() x, y = preprocess(x, y) fig, ax = plt.subplots() XX, YY = np.meshgrid(x, y) z = values.reshape(len(x) - 1, len(y) - 1).T # ax.grid(True, which='minor', axis='both', linestyle='-', color='k') # ax.set_xticks(x, minor=True) # ax.set_yticks(y, minor=True) ## This is for having ticks in the plot as multiples of pi ax.xaxis.set_major_formatter( tck.FuncFormatter(lambda val, pos: '{:.2f}$\pi$'.format(val / np.pi) if val != 0 else '0')) ax.xaxis.set_major_locator(tck.MultipleLocator(base=np.pi / 4)) ax.yaxis.set_major_formatter( tck.FuncFormatter(lambda val, pos: '{:.2f}$\pi$'.format(val / np.pi) if val != 0 else '0')) ax.yaxis.set_major_locator(tck.MultipleLocator(base=np.pi / 4)) if log_legend: mesh_plot = ax.pcolormesh(XX, YY, z, cmap='RdBu', vmax=legend_max, norm=LogNorm()) else: mesh_plot = ax.pcolormesh(XX, YY, z, cmap='RdBu', vmin=legend_min, vmax=legend_max) ax.set_xlabel("beta") ax.set_ylabel("gamma") if title is None: title = "QAOA energy landscape" ax.set_title(title) # set the limits of the plot to the limits of the data ax.axis([x.min(), x.max(), y.min(), y.max()]) if log_legend: cbar_formatter = tck.LogFormatter(10, labelOnlyBase=False) cbar = fig.colorbar(mesh_plot, ax=ax, format=cbar_formatter) else: cbar = fig.colorbar(mesh_plot, ax=ax) if legend_title is None: legend_title = "energy" cbar.set_label(legend_title) plt.savefig(title) return ax
# df[['MA5', 'MA20', 'MA50', 'MA200']].plot(figsize=(50, 25), grid=True) # x轴坐标数量 ax.xaxis.set_major_locator(ticker.MaxNLocator(40)) # 设置自动格式化时间。 def mydate_formatter(x, pos): try: ts = int(df_idx[int(x)][0]) timeArray = time.localtime(ts) otherStyleTime = time.strftime("%Y--%m--%d %H:%M:%S", timeArray) return otherStyleTime except IndexError: return '' ax.xaxis.set_major_formatter(ticker.FuncFormatter(mydate_formatter)) ax.yaxis.set_major_locator(ticker.MaxNLocator(40)) plt.setp(plt.gca().get_xticklabels(), rotation=45, horizontalalignment='right') ax.grid(True) plt.title("BTC 2018-08-15-now-2H KLine") # 设置y轴坐标范围,解决y轴显示不全问题 plt.ylim(y_min, y_max) # plt.savefig("../data/BTC2018-08-15-now-2H.png") plt.show()
plt.subplot(512) plt.plot(x_axis, data2[int(num_y / 2), :]) #plt.plot(x_axis,data2[int(num_y/2),:],label='Vt/c') #plt.plot(x_axis,np.abs(data3[int(num_y/2),:]),label='|Vx/c|') y2 = max(data2[int(num_y / 2), :]) y1 = min(data2[int(num_y / 2), :]) axes = plt.gca() axes.set_ylim([1.5 * y1 - 0.5 * y2, 1.5 * y2 - 0.5 * y1]) #plt.title(title2) plt.ylabel(title2) plt.legend() ax = plt.subplot(513) ax.plot(x_axis, data3[int(num_y / 2), :]) #ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.1f')) ax.yaxis.set_major_formatter(ticker.FuncFormatter(srd.fmt)) #plt.title(title3) plt.ylabel(title3) plt.subplot(514) U = data4[0, :] U[0] = 0 for i in range(1, num_x - 1): #U[i] = (data4[0,i]+data4[0,i-1])*0.5*(x_axis[i]-x_axis[i-1])+U[i-1] U[i] = data4[0, i] * (x_axis[i] - x_axis[i - 1]) + U[i - 1] U = U * 2 / data3[0, 125]**2 plt.plot(x_axis, data4[int(num_y / 2), :]) #plt.plot(x_axis,U) plt.ylabel(title4)
ax.set_zlabel(scatterLabels[ind][2]) plt.show() #plots 1D scatter plot axes = [7, 8, 10, 11, 5, 12] xlabels = [r"$\alpha_{3}$" + "\n" + r"$\mathbf{a)}$",r"$\alpha_{4}$" + "\n" + r"$\mathbf{b)}$",r"$Kd_{5}$" + "\n" + r"$\mathbf{c)}$",r"$Kd_{6}$" + "\n" + r"$\mathbf{d)}$", r"$Kd_{7}$" + "\n" + r"$\mathbf{e)}$", r"$\delta_{2}$" + "\n" + r"$\mathbf{f)}$"] ind = 1 fig, ax = plt.subplots(nrows=2, ncols=2, sharex=False, sharey=False, figsize=(6, 6)) for i in axes: plt.subplot(3,2,ind) plt.xlabel(xlabels[ind - 1]) ax = plt.gca() ax.invert_yaxis() ind += 1 plot1D(results_flipflop_space, results_flipflop_cost, i) ax.yaxis.set_major_formatter(ticker.FuncFormatter(lambda x, y: "$" + locale.format('%1.0f',(x)/1e5) + "x10^{5}$" )) fig.text(0.004, 0.5, 'Optimizacijski kriterij', va='center', rotation='vertical') plt.show() #plot heatmaps xlabels = [r"$Kd_{7}$" + "\n" + r"$\mathbf{a)}$", r"$Kd_{7}$" + "\n" + r"$\mathbf{b)}$",r"$Kd_{7}$" + "\n" + r"$\mathbf{c)}$",r"$Kd_{7}$" + "\n" + r"$\mathbf{d)}$"] ylabels = [r"$\delta_{1}$", r"$\alpha_{3}$", r"$Kd_{5}$", r"$\delta_{2}$"] heatAxes = [(5,6), (5,7), (5,10), (5,12)] ind = np.random.choice(range(results_flipflop_space.shape[0]), 10000) k = 1 for heatAxis in heatAxes: i = heatAxis[0] j = heatAxis[1] x = results_flipflop_space[:, i] y = results_flipflop_space[:, j]
def plot(displacement_RMS, band = "4.0-14.0", logo = 'https://upload.wikimedia.org/wikipedia/commons/thumb/4/44/Logo_SED_2014.png/220px-Logo_SED_2014.png', bans = {"2020-03-13":'Groups >100 banned', "2020-03-20":'Groups >5 banned'}, type = '*', scale = 1e9, unit = 'nm', time_zone = "Australia/Canberra", sitedesc = "", # e.g. "Seismometers in Schools ... School Name,Location" show = True, save = None, format = 'pdf', self = None, data_provider='ETH', basename=None, reference_period=None, lockdown_period=None, reopening_date=None, holiday_period=None, ): if save is not None and not os.path.isdir(save): os.makedirs(save) for channelcode in list(set([k[:-1] for k in displacement_RMS])): data={} for o in 'ZEN': if channelcode+o not in displacement_RMS : continue data[channelcode[-2:]+o] = displacement_RMS[channelcode+o][band] main=channelcode[-2:]+o if len(data.keys())>1: data[channelcode[-2:]+'*'] = data[main].copy().resample("30min").median().tshift(30, "min") # for the sum main=channelcode[-2:]+'*' for i,t in enumerate(data[main].index): data[main][i] = 0 for o in data: if o == main: continue data[o] = data[o].copy().resample("30min" ).median().tshift(30, "min") for i,t in enumerate(data[main].index): if len(data[o].index)-1<i: break if True:#abs(data[o].index[i].timestamp()-data[main].index[i].timestamp())<60: data[main][i] += data[o][i]**2 for i,t in enumerate(data[main].index): data[main][i] = data[main][i]**.5 data[main] = localize_tz_and_reindex(data[main], "30Min", time_zone = time_zone) if basename is None: basename = "%s%s-%s"%(save, channelcode[:]+main[-1], band) if type in ['*', 'all', 'sitemaps']: ax=sitemap(channelcode[:]+main[-1], data_provider=data_provider, self=self) if save is not None: ax.figure.savefig("%s-map.%s"%(basename,format), bbox_inches='tight') if show: plt.show() if type in ['*', 'all', 'clockmaps']: ax = hourmap(data[main], bans=bans, scale=scale, unit=unit) title = 'Seismic Noise for %s - Filter: [%s] Hz' % (channelcode[:]+main[-1],band) ax.set_title('Seismic Noise for %s - Filter: [%s] Hz' % (channelcode[:]+main[-1],band)) if save is not None: ax.figure.savefig("%s-hourmap.%s"%(basename,format), bbox_inches='tight', facecolor='w') if show: plt.show() if type in ['*', 'all', 'gridmaps']: ax = gridmap(data[main], bans=bans, scale=scale, unit=unit) title = 'Seismic Noise for %s - Filter: [%s] Hz' % ( channelcode[:] + main[-1], band) ax.set_title('Seismic Noise for %s - Filter: [%s] Hz' % ( channelcode[:] + main[-1], band)) if save is not None: ax.figure.savefig("%s-gridmap.%s" % (basename, format), bbox_inches='tight', facecolor='w') if show: plt.show() if type in ['*', 'all', 'timeseries']: fig = plt.figure(figsize=(12,6)) if logo is not None: fig.figimage(plt.imread(logo), 40, 40, alpha=.4, zorder=1) plt.plot(data[main].index, data[main], label = main) for o in data: rs = data[o].copy().between_time("6:00", "16:00") rs = rs.resample("1D" ).median().tshift(12, "H") plt.plot(rs.index, rs, label="$\overline{%s}$ (6h-16h)"%o)#, c='purple') # Get normal business days and set their background color to green db = pd.bdate_range(min(data[main].index), max(data[main].index)) for dbi in db: plt.axvspan(dbi, dbi+datetime.timedelta(days=1), facecolor='lightgreen', edgecolor="none", alpha=0.2, zorder=-10) plt.ylim(0,np.nanpercentile(data[main],95)*1.5) plt.ylim(0,np.nanpercentile(data[main],95)*1.5) ticks = ticker.FuncFormatter(lambda x, pos: "{0:g}".format(x*scale)) plt.gca().yaxis.set_major_formatter(ticks) plt.ylabel("Displacement (%s)"%unit) plt.title('Seismic Noise for %s - Filter: [%s] Hz' % (channelcode[:]+main[-1], band)) plt.xlim(data[main].index.min(), data[main].index.max()) fig.autofmt_xdate() plt.grid(True, zorder=-1) plt.gca().set_axisbelow(True) for iban,ban in enumerate(bans.keys()): plt.axvline(UTCDateTime(ban).datetime, color='r', linewidth=2, linestyle=['-', '--', '-.', ':', '-', '--', '-.', ':', '-', '--', '-.', ':'][iban], path_effects=[pe.withStroke(linewidth=4, foreground="k")], zorder=-9, label='\n'.join(wrapper.wrap(bans[ban]))) plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) ## Idea: add map in an inset below the legend #axins = inset_axes(ax, width="100%", height="100%", # bbox_to_anchor=(1.05, .6, .5, .4), # bbox_transform=ax.transAxes, loc=2, borderpad=0) #axins.tick_params(left=False, right=True, labelleft=False, labelright=True) if save is not None: fig.savefig("%s.%s"%(basename,format), bbox_inches='tight', facecolor='w') if show: plt.show() if type in ['*', 'all', 'clockplots', 'dailyplots', 'sclockplots']: preloc = data[main].loc[:max(list(bans.keys()))] preloc = preloc.set_index([preloc.index.day_name(), preloc.index.hour+preloc.index.minute/60.]) postloc = data[main].loc[max(list(bans.keys())):] postloc = postloc.set_index([postloc.index.day_name(), postloc.index.hour+postloc.index.minute/60.]) cmap = plt.get_cmap("tab20") reference = data[main].loc[reference_period["start"]:reference_period["end"]] reference = reference.set_index([reference.index.day_name(), reference.index.hour+reference.index.minute/60.]) lockdown = data[main].loc[lockdown_period["start"]:lockdown_period["end"]] lockdown = lockdown.set_index([lockdown.index.day_name(), lockdown.index.hour+lockdown.index.minute/60.]) holiday = data[main].loc[holiday_period["start"]:holiday_period["end"]] holiday = holiday.set_index([holiday.index.day_name(), holiday.index.hour+holiday.index.minute/60.]) reopened = data[main].loc[reopening_date["start"]:] reopened = reopened.set_index([reopened.index.day_name(), reopened.index.hour+reopened.index.minute/60.]) if type in ['*', 'all', 'dailyplots']: ax = stack_wday_time(preloc,scale).plot(figsize=(14,8), cmap = cmap) if len(postloc): stack_wday_time(postloc,scale).plot(ls="--", ax=ax, legend=False,cmap = cmap) plt.title("Daily Noise Levels in %s" % (channelcode[:]+main[-1])) plt.ylabel("Amplitude (%s)"%unit) plt.xlabel("Hour of day (local time)") plt.grid() plt.xlim(0,23) plt.ylim(0,np.nanpercentile(data[main],95)*1.5*scale) if save is not None: ax.figure.savefig("%s-daily.%s"%(basename,format), bbox_inches='tight', facecolor='w') if show: plt.show() if type in ['*', 'all', 'clockplots']: # Polar/clock Plot: _ = stack_wday_time(preloc,scale).copy() _.loc[len(_)+1] = _.iloc[0] _.index = radial_hours(len(_)) #subplot_kw = {'polar':True} #opts={#'sharey':True, # 'figsize':(12,6), # 'subplot_kw':subplot_kw} #fig, axes = plt.subplots(1,2,**opts) plt.figure(figsize=(12,6)) ax = plt.subplot(121, polar=True) _.plot(ax=ax)#es[0]) plt.title("Before", fontsize=12) clock24_plot_commons(ax,unit=unit)#es[0]) ax.set_rmax(np.nanpercentile(data[main],95)*1.5*scale) ax.set_rmin(0) ax = plt.subplot(122, polar=True, sharey=ax) if len(postloc): _ = stack_wday_time(postloc,scale).copy() _.loc[len(_)+1] = _.iloc[0] _.index = radial_hours(len(_)) _.plot(ax=ax,#es[0], ls="--") plt.title("After", fontsize=12) clock24_plot_commons(ax,unit=unit)#es[0]) # ax.set_rmax(np.nanpercentile(data[main],95)*1.5*scale) suptitle = "Day/Hour Median Noise levels %s\n" suptitle += "Station %s - [%s] Hz" plt.suptitle(suptitle % (sitedesc, channelcode[:]+main[-1], band), fontsize=16) plt.subplots_adjust(top=0.80) if save is not None: fig = ax.figure fig.savefig("%s-hourly.%s"%(basename,format), bbox_inches='tight', facecolor='w') if show: plt.show() if type in ['sclockplots']: # Polar/clock Plot: radial_scale = np.nanpercentile(data[main],95)*1.5*scale print("clock plot - radial scale: {} nm".format(radial_scale)) fig, axgrid = plt.subplots(ncols=2, nrows=2, figsize=(12,13), subplot_kw={'polar': True}) ## REFERENCE ax = axgrid[0,0] ax.set_ylim((0.0,radial_scale)) ax.title.set_text('Reference') _ = stack_wday_time(reference,scale).copy() _.loc[len(_)+1] = _.iloc[0] _.index = radial_hours(len(_)) _.plot(ax=ax)#es[0]) # plt.title("Reference", fontsize=12) clock24_plot_commons(ax,unit=unit)#es[0]) ## HOLIDAY ax=axgrid[0,1] ax.set_ylim((0.0,radial_scale)) ax.title.set_text('Holiday') if len(holiday): _ = stack_wday_time(holiday,scale).copy() _.loc[len(_)+1] = _.iloc[0] _.index = radial_hours(len(_)) _.plot(ax=ax,#es[0], ls="--") clock24_plot_commons(ax,unit=unit)#es[0]) ## LOCKDOWN ax=axgrid[1,0] ax.set_ylim((0.0,radial_scale)) ax.title.set_text('Lockdown') if len(lockdown): _ = stack_wday_time(lockdown,scale).copy() _.loc[len(_)+1] = _.iloc[0] _.index = radial_hours(len(_)) _.plot(ax=ax,#es[0], ls="--") ax.title.set_text('Lockdown') clock24_plot_commons(ax,unit=unit) ## REOPENING # ax = plt.subplot(224, polar=True, sharey=ax) ax=axgrid[1,1] ax.set_ylim((0.0,radial_scale)) ax.title.set_text('Re-opened') if len(reopened): _ = stack_wday_time(reopened,scale).copy() _.loc[len(_)+1] = _.iloc[0] _.index = radial_hours(len(_)) _.plot(ax=ax,#es[0], ls="--") clock24_plot_commons(ax,unit=unit) ## TITLE FOR ALL PLOTS suptitle = "Day/Hour Median Noise levels %s\n" suptitle += "Station %s - [%s] Hz" plt.suptitle(suptitle % (sitedesc, channelcode[:]+main[-1], band), fontsize=16) # plt.subplots_adjust(top=0.2) if save is not None: # fig = ax.figure fig.savefig("%s-hourly.%s"%(basename,format), bbox_inches='tight', facecolor='w') if show: plt.show()
def plotFrame(t): fig.clf() ax = plotAx() cplot = [] gplot = [] qplot = [] bcplot = [] level1 = levelList[0] for mat1, ax1, title1 in zip(mat, ax, titleList): #Time fig.suptitle(plotter.num2time(t).strftime('%Y-%b-%d %H:%M')) fig.subplots_adjust() inCon = level1 == mat1.contours it = np.interp(t, mat1.t, np.arange(0, len(mat1.t))) it1 = np.minimum(int(it) + 1, len(mat1.t) - 1) z0 = mat0.z[:, :, inCon, int(it)] * ( 1 - it % 1) + mat0.z[:, :, inCon, it1] * (it % 1) z1 = mat1.z[:, :, inCon, int(it)] * ( 1 - it % 1) + mat1.z[:, :, inCon, it1] * (it % 1) z0 = np.squeeze(z0) z1 = np.squeeze(z1) mask0 = np.where(np.isnan(z0), np.ones(np.shape(z0)), np.zeros(np.shape(z0))) mask1 = np.where(np.isnan(z1 - z0), np.ones(np.shape(z1)), np.zeros(np.shape(z1))) print( [np.nanpercentile(z1 - z0, 2.5), np.nanpercentile(z1 - z0, 97.5)]) cplot1 = ax1.contourf( mat1.lon, mat1.lat, z1 - z0, [ tick1 for tick1 in np.arange(-40.0, 40.01, 5.0) if np.abs(tick1) > 1 ], cmap=cmap, extend='both') if level1 <= 1024.5: bcplot1 = ax1.contour(mat1.lon, mat1.lat, z1, levels=np.arange(-200, 0.1, 10.), colors='k', linewidth=.5) else: bcplot1 = ax1.contour(mat1.lon, mat1.lat, z1, levels=np.arange(-2000, 0.1, 25.), colors='k', linewidth=.5) ax1.set_title(title1) bcplot2 = ax1.contourf(mat1.lon, mat1.lat, mask1, levels=[-.5, .5, 1], cmap=cgrey, vmin=0.5, vmax=1, alpha=.5) tLim1 = 3. * int((t - 2) / 3) + np.array([2., 5.]) obsll = [(lon1, lat1) for (lon1, lat1, type1, t1) in zip(obs['lon'], obs['lat'], obs['type'], obs['t']) if type1 == 6 and tLim1[0] <= t1 <= tLim1[1]] lon1, lat1 = zip(*obsll) gplot1 = ax1.plot(lon1, lat1, '.', color=(1., .55, 0.), markersize=2, fillstyle='full') #Colorbar fig.subplots_adjust(right=.8) cax = fig.add_axes([.8, .1, .02, .8]) cbar = fig.colorbar(cplot1, cax=cax, orientation='vertical', spacing='proportional') cbar.set_ticks([tick1 for tick1 in np.arange(-40.0, 40.1, 10.)]) cbar.formatter = ticker.FuncFormatter( lambda x, pos: '{:.1f}'.format(x)) cbar.update_ticks() cbar.set_label(r'Depth change [$\mathrm{m}$]') cplot.append(cplot1) gplot.append(gplot1) bcplot.append(bcplot1) #plt.tight_layout() return cplot, gplot, bcplot
ts = w.time_series("mod13q1_512", "red", -12.0, -54.0, start_date="2000-02-18", end_date="2006-01-01") # prepare chart parameters num_values = len(ts.timeline) # create an evenly spaced array of values within interval: [0, num_values) indices = numpy.arange(num_values) # callback def format_date(x, pos = None): idx = numpy.clip(int(x + 0.5), 0, num_values - 1) d = ts.timeline[idx] sd = d.strftime("%d-%m-%Y") return sd fig, ax = pyplot.subplots() ax.plot(indices, ts["red"], 'o-') ax.xaxis.set_major_formatter(ticker.FuncFormatter(format_date)) fig.autofmt_xdate() pyplot.show()
sns.barplot(x="seg_index", y="log", data=firstfive.loc[firstfive.seg_index < max_segs], linewidth=2.5, facecolor=(1, 1, 1, 0), edgecolor=".2", yerr=(firstfive["ci_upper"] - firstfive["ci_lower"])) ax.set(ylabel= "Fold Change v. Bkgd\n(log2-scaled)",\ xlabel = "Number of Age Segments") #, ylim = (-1.2,0.5)) plt.axhline(0, color="grey", linewidth=2.5) ticks = ticker.FuncFormatter(lambda x, pos: '{0:g}'.format(round(2**x, 1))) ax.yaxis.set_major_formatter(ticks) ax.yaxis.set_major_locator(MultipleLocator(0.5)) plt.savefig("%sfig2b-reilly_age_seg_fold_change_matplotlib_rel_simple.pdf" % RE, bbox_inches="tight") #%% def get_arch_freq(df): age_cols = ["core_remodeling", "mrca_2", "mrca_count"] age_counts = df.groupby(["core_remodeling", "mrca_2"])["enh_id"].count().reset_index()
'%.1f' % (b * 100), ha='center', va='bottom', fontsize=14) d = [] for i in range(0, len(Bottom)): sum = Bottom[i] + Center[i] d.append(sum) width = 0.73 p1 = plt.bar(ind, Bottom, width, color=color_styles[0]) p2 = plt.bar(ind, Center, width, bottom=Bottom, color=color_styles[1]) p3 = plt.bar(ind, Top, width, bottom=d, color=color_styles[2]) plt.gca().yaxis.set_major_formatter(ticker.FuncFormatter(to_percent)) plt.axhline(0.75, linestyle=':', color='k') plt.axhline(0.25, linestyle=':', color='k') plt.ylim((0, 1)) plt.legend((p1[0], p2[0], p3[0]), labels, ncol=2, loc='center', bbox_to_anchor=(0.5, 0.65)) plt.tight_layout() plt.savefig("resource_time_131.svg") plt.show()
def plot_candles(pricing, title=None, volume_bars=False, volume_tech=None, color_function=None, technicals=None, marker=None, line_52=None, sep_technicals=None, sr_line=None): """ Plots a candlestick chart using quantopian pricing data. Author: Daniel Treiman Args: pricing: A pandas dataframe with columns ['open_price', 'close_price', 'high', 'low', 'volume'] title: An optional title for the chart volume_bars: If True, plots volume bars color_function: A function which, given a row index and price series, returns a candle color. technicals: A list of additional data series to add to the chart. Must be the same length as pricing. marker : list or serise line_52 : 52일 고점 저점 sr_line : list 저항선, 지지선, 기준선 등등 """ # y 데이터에 시가 종가 고가 저가 표시 하기 # 저항선 지지선 Y 에 vertical 라인으로 표시 하기 hello def default_color(index, open_price, close_price, low, high): return 'r' if open_price[index] > close_price[index] else 'g' color_function = color_function or default_color technicals = technicals or [] sep_technicals = sep_technicals or [] marker = marker or [] line_52 = line_52 or [] sr_line = sr_line or [] volume_tech = volume_tech or [] marker_shape = [ 'v', '^', 'v', '^', 'o', '<', '>', '1', '2', '3', '4', '8', 's', 'P', 'p', '*', 'x', 'X' ] marker_color = ['m', 'k', 'y', 'c', 'r', 'g', 'b', '#0F0F0F0F'] marker_label = [ 'up', 'down', 'p_up', 'p_down', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n' ] open_price = pricing['open'] close_price = pricing['close'] low = pricing['low'] high = pricing['high'] oclh = pricing[['open', 'close', 'low', 'high']] oc_min = pd.concat([open_price, close_price], axis=1).min(axis=1) oc_max = pd.concat([open_price, close_price], axis=1).max(axis=1) if volume_bars and sep_technicals: fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True, gridspec_kw={'height_ratios': [3, 1, 1]}) elif volume_bars: fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, gridspec_kw={'height_ratios': [3, 1]}) ax3 = None elif sep_technicals: fig, (ax1, ax3) = plt.subplots(2, 1, sharex=True, gridspec_kw={'height_ratios': [3, 1]}) ax2 = None else: fig, ax1 = plt.subplots(1, 1) ax2 = None ax3 = None if title: ax1.set_title(title) x = np.arange(len(pricing)) candle_colors = [ color_function(i, open_price, close_price, low, high) for i in x ] ohlc = np.vstack((list(range(len(pricing))), pricing.values.T)).T candlestick_ohlc(ax1, ohlc, width=0.6, colorup='r', colordown='b') ax1.xaxis.grid(True, ls='dashed') ax1.xaxis.set_tick_params(which='major', length=3.0, direction='in', top='off') for idx, y_value in enumerate(line_52): ax1.axhline(y=y_value, ls='dashed', lw=0.7, color='g') # Assume minute frequency if first two bars are in the same day. frequency = 'minute' if (pricing.index[1] - pricing.index[0]).days == 0 else 'day' time_format = '%Y-%m-%d' if frequency == 'minute': time_format = '%H:%M' # Set X axis tick labels. date_print_list = [] k = 0 for date in pricing.index: if k % 10 == 0: date_print_list.append(date.strftime(time_format)) else: date_print_list.append('') k = k + 1 xdate = [i.strftime(time_format) for i in pricing.index] def mydate(x, pos): try: return xdate[int(x)] except IndexError: return '' # plt.xticks(x, date_print_list, rotation='vertical') plt.xticks(x, [date.strftime(time_format) for date in pricing.index], rotation='vertical') ax1.xaxis.set_major_locator(ticker.MaxNLocator(10)) ax1.xaxis.set_major_formatter(ticker.FuncFormatter(mydate)) # ax1.format_xdata = mdates.DateFormatter('%Y-%m-%d') for indicator in technicals: ax1.plot(x, indicator) k = 0 for marker_indic in marker: tmp_index = [] for t in range(len(marker_indic)): if marker_indic[t] > 0: tmp_index.append(t) ax1.plot(tmp_index, close_price[tmp_index], marker_shape[k], markersize=7, color=marker_color[k], label=marker_label[k]) k += 1 if volume_bars: volume = pricing['volume'] volume_scale = None scaled_volume = volume if volume.max() > 1000000: volume_scale = 'M' scaled_volume = volume / 1000000 elif volume.max() > 1000: volume_scale = 'K' scaled_volume = volume / 1000 ax2.bar(x, scaled_volume, color=candle_colors) volume_title = 'Volume' if volume_scale: volume_title = 'Volume (%s)' % volume_scale ax2.set_title(volume_title) ax2.xaxis.grid(True, ls='dashed') if volume_tech: for v_tech in volume_tech: v_tech_scale = v_tech if volume.max() > 1000000: v_tech_scale = v_tech / 1000000 elif volume.max() > 1000: v_tech_scale = v_tech / 1000 ax2.plot(x, v_tech_scale) if sep_technicals: for s_technic in sep_technicals: ax3.plot(x, s_technic) ax3.set_title('sep_technical') ax3.xaxis.grid(True, ls='dashed') # ax3.axhline(y=0, ls='dashed', lw=0.7, color='g') # slowd slowK 일때는 해지 if s_technic.max() > 20: ax3.axhline(y=80, ls='dashed', lw=0.7, color='g') ax3.axhline(y=20, ls='dashed', lw=0.7, color='g') else: ax3.axhline(y=0, ls='dashed', lw=0.7, color='g') cursor = SnaptoCursor(ax1, x, oclh, ax2, ax3) click = DrawLineOnClick(ax1, x, pricing, sr_line) # cursor = SnaptoCursor(ax1, x, close_price) # cursor = Cursor(ax1) plt.connect('motion_notify_event', cursor.mouse_move) plt.connect('button_press_event', click.mouseClick) # fig.autofmt_xdate() # plt.legend(loc=0) plt.show()
@author: Mohammed Kamal Plot filtered and cumulative data as the agencyabb = ["DHS","DOC","DOD","DOJ","GSA", "HHS","SEC","TREAS","USDA","VA"] """ #import setuprefvar as st import pandas as pd from sqlalchemy import create_engine #import numpy as np import matplotlib.pyplot as plt import matplotlib.ticker as mtick import matplotlib import linuxpostgres_cred as creds y_format = mtick.FuncFormatter( lambda x, p: format(int(x), ',')) # make formatter conn_string = "postgresql://" + creds.PGUSER + ":" + creds.PGPASSWORD + "@" + creds.PGHOST + ":" + creds.PORT + "/" + creds.PGDATABASE engine = create_engine(conn_string) colorWheel = [ '#329932', '#ff6961', '#6a3d9a', '#fb9a99', '#e31a1c', '#fdbf6f', '#ff7f00', '#cab2d6', '#d1e5f0', '#ffff99' ] #agency_abb= st.agencyabb df = pd.read_sql_query( 'SELECT * FROM usaspending."allagency1" where action_date_fiscal_year::int <> 2020 \ order by agency_abb, action_date_fiscal_year', engine)
def showLines(*args, **kwargs): """ Show 1-D data using :func:`~matplotlib.axes.Axes.plot`. :arg x: (optional) x coordinates. *x* can be an 1-D array or a 2-D matrix of column vectors. :type x: `~numpy.ndarray` :arg y: data array. *y* can be an 1-D array or a 2-D matrix of column vectors. :type y: `~numpy.ndarray` :arg dy: an array of variances of *y* which will be plotted as a band along *y*. It should have the same shape with *y*. :type dy: `~numpy.ndarray` :arg lower: an array of lower bounds which will be plotted as a band along *y*. It should have the same shape with *y* and should be paired with *upper*. :type lower: `~numpy.ndarray` :arg upper: an array of upper bounds which will be plotted as a band along *y*. It should have the same shape with *y* and should be paired with *lower*. :type upper: `~numpy.ndarray` :arg alpha: the transparency of the band(s) for plotting *dy*. :type alpha: float :arg beta: the transparency of the band(s) for plotting *miny* and *maxy*. :type beta: float :arg ticklabels: user-defined tick labels for x-axis. :type ticklabels: list """ # note for developers: this function serves as a low-level # plotting function which provides basic utilities for other # plotting functions. Therefore showFigure is not handled # in this function as it should be already handled in the caller. ticklabels = kwargs.pop('ticklabels', None) dy = kwargs.pop('dy', None) miny = kwargs.pop('lower', None) maxy = kwargs.pop('upper', None) alpha = kwargs.pop('alpha', 0.5) beta = kwargs.pop('beta', 0.25) gap = kwargs.pop('gap', False) labels = kwargs.pop('label', None) from matplotlib import cm, ticker from matplotlib.pyplot import figure, gca, xlim ax = gca() lines = ax.plot(*args, **kwargs) polys = [] for i, line in enumerate(lines): color = line.get_color() x, y = line.get_data() if gap: x_new, y_new = addEnds(x, y) line.set_data(x_new, y_new) else: x_new, y_new = x, y if labels is not None: if np.isscalar(labels): line.set_label(labels) else: try: line.set_label(labels[i]) except IndexError: raise ValueError( 'The number of labels ({0}) and that of y ({1}) do not match.' .format(len(labels), len(line))) # the following function needs to be here so that line exists def sub_array(a, i, tag='a'): ndim = 0 if a is not None: if np.isscalar(a[0]): ndim = 1 # a plain list (array) else: ndim = 2 # a nested list (array) else: return None if ndim == 1: _a = a else: try: _a = a[i] except IndexError: raise ValueError( 'The number of {2} ({0}) and that of y ({1}) do not match.' .format(len(miny), len(line), tag)) if len(_a) != len(y): raise ValueError( 'The shapes of {2} ({0}) and y ({1}) do not match.'.format( len(_miny), len(y), tag)) return _a if miny is not None and maxy is not None: _miny = sub_array(miny, i) _maxy = sub_array(maxy, i) if gap: _, _miny = addEnds(x, _miny) _, _maxy = addEnds(x, _maxy) poly = ax.fill_between(x_new, _miny, _maxy, alpha=beta, facecolor=color, edgecolor=None, linewidth=1, antialiased=True) polys.append(poly) if dy is not None: _dy = sub_array(dy, i) if gap: _, _dy = addEnds(x, _dy) poly = ax.fill_between(x_new, y_new - _dy, y_new + _dy, alpha=alpha, facecolor=color, edgecolor=None, linewidth=1, antialiased=True) polys.append(poly) ax.margins(x=0) if ticklabels is not None: if callable(ticklabels): ax.get_xaxis().set_major_formatter( ticker.FuncFormatter(ticklabels)) else: ax.get_xaxis().set_major_formatter( ticker.IndexFormatter(ticklabels)) ax.xaxis.set_major_locator(ticker.AutoLocator()) ax.xaxis.set_minor_locator(ticker.AutoMinorLocator()) return lines, polys
def pimp_axis(x_or_y_ax): """Remove trailing zeros. """ x_or_y_ax.set_major_formatter(ticker.FuncFormatter(ticks_formatter))
def test_readstkData(self): # https://zhuanlan.zhihu.com/p/29519040 from matplotlib import dates as mdates import mpl_finance as mpf from mpl_finance import candlestick_ohlc from matplotlib import ticker as mticker import numpy as np import pylab from matplotlib.pylab import date2num def format_date(x, pos): # 处理所有的节假日包括周末,在这里都会显示为空白 if x < 0 or x > len(date_tickers) - 1: return '' return date_tickers[int(x)].strftime('%y-%m-%d') code = ['399004'] MA1 = 10 MA2 = 50 start = datetime.datetime.now() - datetime.timedelta(300) end = datetime.datetime.now() - datetime.timedelta(10) days = readstkData(code, start, end) # print(days) daysreshape = days.reset_index() daysreshape['DateTime'] = daysreshape['date'].dt.date daysreshape['DateTime2'] = mdates.date2num(daysreshape['DateTime']) daysreshape.drop('volume', axis=1, inplace=True) daysreshape.drop('date', axis=1, inplace=True) # print(daysreshape) daysreshape = daysreshape.reindex( columns=['DateTime', 'open', 'high', 'low', 'close']) daysreshape['dates'] = np.arange(0, len(daysreshape)) date_tickers = daysreshape.DateTime # 汉字 plt.rcParams['font.family'] = ['sans-serif'] plt.rcParams['font.sans-serif'] = ['SimHei'] Av1 = qa.MA(daysreshape.close, MA1) Av2 = qa.MA(daysreshape.close, MA2) SP = len(daysreshape.DateTime.values[MA2 - 1:]) fig = plt.figure(facecolor='#07000d', figsize=(15, 10)) # plt.style.use('dark_background') # ax1 = plt.subplot2grid((6, 4), (1, 0), rowspan=4, colspan=4) ax1 = plt.subplot2grid((6, 4), (1, 0), rowspan=4, colspan=4, facecolor='#07000d') candlestick_ohlc( ax1, quotes=daysreshape[['dates', 'open', 'close', 'high', 'low']].values[-SP:], width=.7, colorup='r', colordown='g', alpha=0.7) # colorup='#ff1717', colordown='#53c156') ax1.set_title('指数k线', fontsize=20) Label1 = str(MA1) + ' SMA' Label2 = str(MA2) + ' SMA' dates = daysreshape['dates'] ax1.plot(dates.values[-SP:], Av1[-SP:], '#e1edf9', label=Label1, linewidth=1.5) ax1.plot(dates.values[-SP:], Av2[-SP:], '#4ee6fd', label=Label2, linewidth=1.5) ax1.grid(True, color='w') ax1.xaxis.set_major_locator(mticker.MaxNLocator(20)) # ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d')) ax1.xaxis.set_major_formatter(mticker.FuncFormatter(format_date)) ax1.yaxis.label.set_color("w") ax1.spines['bottom'].set_color("#5998ff") ax1.spines['top'].set_color("#5998ff") ax1.spines['left'].set_color("#5998ff") ax1.spines['right'].set_color("#5998ff") ax1.tick_params(axis='y', colors='w') plt.gca().yaxis.set_major_locator(mticker.MaxNLocator(prune='upper')) ax1.tick_params(axis='x', colors='w') plt.ylabel('Stock price and Volume') fig.show()
def portfolioBanchmark(portfolio, port_rets, port_data, port_val, bench_rets, bench_data): bench_rets.columns = ['Date', 'Return'] bench_data.columns = ['Date', 'Close'] if not os.path.exists(root_path + '/Figures'): os.mkdir(root_path + '/Figures') # ----------Plot Performance--------------# #port_val = port_val.set_index('Date') port_values = port_val['Portfolio Value'] port_values = pd.DataFrame(port_values) bench_values = bench_data.set_index('Date') perf = pd.merge(port_values, bench_values, left_index=True, right_index=True) port_data = perf["Portfolio Value"] bench_d = perf["Close"] fig = plt.figure() ax = fig.add_subplot(111, facecolor='#576884') fig.set_size_inches(11.7, 8.27) ax2 = ax.twinx() ax2.grid(None) print("########bench_d##########") print(bench_d) print("########port_data##########") print(port_data) lns1 = ax2.plot(bench_d, linestyle='-', color='#6aa527', label='Nifty50') lns2 = ax.plot(port_data, linestyle='-', color="white", label='Portfolio') fig.add_axes(ax, ax2) # added these three lines lns = lns1 + lns2 labs = [l.get_label() for l in lns] ax.legend(lns, labs, loc=0) ax.grid(linestyle='--', alpha=0.2) print(tkr.FuncFormatter(lambda x, p: format(int(x), ','))) ax.get_yaxis().set_major_formatter( tkr.FuncFormatter(lambda x, p: format(int(x), ','))) #Annotate Last Price print(json_serial(port_val.index[-1])) bbox_props = dict(boxstyle='round', fc='w', ec='k', lw=1) ax.annotate( "{:0,.2f}".format(port_val["Portfolio Value"][-1]), (json_serial(port_val.index[-1]), port_val["Portfolio Value"][-1]), xytext=(json_serial(port_val.index[-1]), port_val["Portfolio Value"][-1]), bbox=bbox_props) for spine in plt.gca().spines.values(): spine.set_visible(False) plt.title(portfolio.Portfolio_Name + " Portfolio Performance vs. Nifty 50 Benchmark") plt.savefig(root_path + "/Figures/" + portfolio.Portfolio_Name + "_port_perf.png") plt.tight_layout() fig.tight_layout() return "/static/Figures/" + portfolio.Portfolio_Name + "_port_perf.png"
labels.append("EDT") plt.plot(list_to_dimensionless(ranges), list_to_dimensionless(fr_ratios)) plt.plot(list_to_dimensionless(ranges), list_to_dimensionless(fbarrad_ratios)) plt.plot(list_to_dimensionless(ranges), list_to_dimensionless(EDT_ratios)) plt.legend(labels) ax = plt.gca() ax.set_xscale('log') ax.set_yscale('log') plt.ylabel("Normalised perturbation acceleration [-]") plt.xlabel("Distance from sun [AU]") plt.ylim([1e-10, 1e1]) plt.grid(which='both') ax.xaxis.set_major_formatter( mticker.FuncFormatter(lambda y, _: '{:g}'.format(y))) plt.savefig(fig1savespace % "OD_perturbations.pdf") # plt.show() plt.figure() plt.plot(list_to_dimensionless(ranges), list_to_dimensionless(EDT_raw)) ax = plt.gca() ax.set_xscale('log') ax.set_yscale('log') plt.ylabel("accel [m/s**2]") plt.xlabel("distance from sun [AU]") plt.grid(which='both') ax.xaxis.set_major_formatter( mticker.FuncFormatter(lambda y, _: '{:g}'.format(y))) ax.yaxis.set_major_formatter(
def view3(master, data, title=''): # 获取数据 # calc_kdj(data) # 计算KDJ值,数据存于DataFrame中 # 数据 # daily = LocalData.getInstance().data('000001', 0) daily = data.copy() daily = KDJ(daily) daily.rename(columns={ 'trade_date': 'date', 'vol': 'volume', 'macd': 'bar', 'DIF': 'dif', 'DEA': 'dea', 'MACD': 'bar', 'K': 'k', 'D': 'd', 'J': 'j' }, inplace=True) df = daily[ -150:] # 取最近的N天测试,列顺序为 date,open,close,high,low,volume,dif,dea,bar,k,d,j df = df[[ 'date', 'open', 'close', 'high', 'low', 'volume', 'dif', 'dea', 'bar', 'k', 'd', 'j' ]] # 日期转换成整数序列 date_tickers = df.date.values df.date = range(0, len(df)) # 日期改变成序号 matix = df.values # 转换成绘制蜡烛图需要的数据格式(date, open, close, high, low, volume) xdates = matix[:, 0] # X轴数据(这里用的天数索引) # 设置外观效果 plt.rc('font', family='Microsoft YaHei') # 用中文字体,防止中文显示不出来 plt.rc('figure', fc='k') # 绘图对象背景图 plt.rc('text', c='#800000') # 文本颜色 plt.rc('axes', axisbelow=True, xmargin=0, fc='k', ec='#800000', lw=1.5, labelcolor='#800000', unicode_minus=False) # 坐标轴属性(置底,左边无空隙,背景色,边框色,线宽,文本颜色,中文负号修正) plt.rc('xtick', c='#d43221') # x轴刻度文字颜色 plt.rc('ytick', c='#d43221') # y轴刻度文字颜色 plt.rc('grid', c='#800000', alpha=0.9, ls=':', lw=0.8) # 网格属性(颜色,透明值,线条样式,线宽) plt.rc('lines', lw=0.8) # 全局线宽 # 创建绘图对象和4个坐标轴 fig = plt.figure(figsize=(16, 8)) left, width = 0.05, 0.9 ax1 = fig.add_axes([left, 0.6, width, 0.35]) # left, bottom, width, height ax2 = fig.add_axes([left, 0.45, width, 0.15], sharex=ax1) # 共享ax1轴 ax3 = fig.add_axes([left, 0.25, width, 0.2], sharex=ax1) # 共享ax1轴 ax4 = fig.add_axes([left, 0.05, width, 0.2], sharex=ax1) # 共享ax1轴 plt.setp(ax1.get_xticklabels(), visible=False) # 使x轴刻度文本不可见,因为共享,不需要显示 plt.setp(ax2.get_xticklabels(), visible=False) # 使x轴刻度文本不可见,因为共享,不需要显示 plt.setp(ax3.get_xticklabels(), visible=False) # 使x轴刻度文本不可见,因为共享,不需要显示 # 绘制蜡烛图 def format_date(x, pos=None): return '' if x < 0 or x > len(date_tickers) - 1 else date_tickers[int( x)] # 日期格式化函数,根据天数索引取出日期值 ax1.xaxis.set_major_formatter( ticker.FuncFormatter(format_date)) # 设置自定义x轴格式化日期函数 ax1.xaxis.set_major_locator( ticker.MultipleLocator(max(int(len(df) / 15), 5))) # 横向最多排15个左右的日期,最少5个,防止日期太拥挤 # mpf.candlestick_ochl(ax1, matix, width=0.5, colorup='#ff3232', colordown='#54fcfc') # # 下面这一段代码,替换了上面注释的这个函数,因为上面的这个函数达不到同花顺的效果 opens, closes, highs, lows = matix[:, 1], matix[:, 2], matix[:, 3], matix[:, 4] # 取出ochl值 avg_dist_between_points = (xdates[-1] - xdates[0]) / float( len(xdates)) # 计算每个日期之间的距离 delta = avg_dist_between_points / 4.0 # 用于K线实体(矩形)的偏移坐标计算 barVerts = [((date - delta, open), (date - delta, close), (date + delta, close), (date + delta, open)) for date, open, close in zip(xdates, opens, closes) ] # 生成K线实体(矩形)的4个顶点坐标 rangeSegLow = [ ((date, low), (date, min(open, close))) for date, low, open, close in zip(xdates, lows, opens, closes) ] # 生成下影线顶点列表 rangeSegHigh = [ ((date, high), (date, max(open, close))) for date, high, open, close in zip(xdates, highs, opens, closes) ] # 生成上影线顶点列表 rangeSegments = rangeSegLow + rangeSegHigh # 上下影线顶点列表 cmap = { True: mcolors.to_rgba('#000000', 1.0), False: mcolors.to_rgba('#54fcfc', 1.0) } # K线实体(矩形)中间的背景色(True是上涨颜色,False是下跌颜色) inner_colors = [cmap[opn < cls] for opn, cls in zip(opens, closes)] # K线实体(矩形)中间的背景色列表 cmap = { True: mcolors.to_rgba('#ff3232', 1.0), False: mcolors.to_rgba('#54fcfc', 1.0) } # K线实体(矩形)边框线颜色(上下影线和后面的成交量颜色也共用) updown_colors = [cmap[opn < cls] for opn, cls in zip(opens, closes) ] # K线实体(矩形)边框线颜色(上下影线和后面的成交量颜色也共用)列表 ax1.add_collection( LineCollection( rangeSegments, colors=updown_colors, linewidths=0.5, antialiaseds=False)) # 生成上下影线的顶点数据(颜色,线宽,反锯齿,反锯齿关闭好像没效果) ax1.add_collection( PolyCollection(barVerts, facecolors=inner_colors, edgecolors=updown_colors, antialiaseds=False, linewidths=0.5)) # 生成多边形(矩形)顶点数据(背景填充色,边框色,反锯齿,线宽) # 绘制均线 mav_colors = ['#ffffff', '#d4ff07', '#ff80ff' ] # 均线循环颜色, '#00e600', '#02e2f4', '#ffffb9', '#2a6848' mav_period = [5, 10, 20] # 定义要绘制的均线周期,可增减, 30, 60, 120, 180 n = len(df) for i in range(len(mav_period)): if n >= mav_period[i]: mav_vals = df['close'].rolling(mav_period[i]).mean().values ax1.plot(xdates, mav_vals, c=mav_colors[i % len(mav_colors)], label='MA' + str(mav_period[i])) # ax1.set_title(title) # 标题 ax1.yaxis.set_ticks_position('right') # y轴显示在右边 ax1.grid(True) # 画网格 ax1.legend(loc='upper left') # 图例放置于右上角 # ax1.xaxis_date() # 好像要不要效果一样? # 绘制成交量和成交量均线(5日,10日) # ax2.bar(xdates, matix[:, 5], width= 0.5, color=updown_colors) # 绘制成交量柱状图 barVerts = [((date - delta, 0), (date - delta, vol), (date + delta, vol), (date + delta, 0)) for date, vol in zip(xdates, matix[:, 5])] # 生成K线实体(矩形)的4个顶点坐标 ax2.add_collection( PolyCollection(barVerts, facecolors=inner_colors, edgecolors=updown_colors, antialiaseds=False, linewidths=0.5)) # 生成多边形(矩形)顶点数据(背景填充色,边框色,反锯齿,线宽) if n >= 5: # 5日均线,作法类似前面的均线 vol5 = df['volume'].rolling(5).mean().values ax2.plot(xdates, vol5, c='y', label='VOL5') if n >= 10: # 10日均线,作法类似前面的均线 vol10 = df['volume'].rolling(10).mean().values ax2.plot(xdates, vol10, c='w', label='VOL10') ax2.yaxis.set_ticks_position('right') # y轴显示在右边 ax2.legend(loc='upper left') # 图例放置于右上角 ax2.grid(True) # 画网格 # ax2.set_ylabel('成交量') # y轴名称 # 绘制MACD difs, deas, bars = matix[:, 6], matix[:, 7], matix[:, 8] # 取出MACD值 ax3.axhline(0, ls='-', c='g', lw=0.5) # 水平线 ax3.plot(xdates, difs, c='w', label='DIFF') # 绘制DIFF线 ax3.plot(xdates, deas, c='y', label='DEA') # 绘制DEA线 # ax3.bar(xdates, df['bar'], width= 0.05, color=bar_colors) # 绘制成交量柱状图(发现用bar绘制,线的粗细不一致,故使用下面的直线列表) cmap = { True: mcolors.to_rgba('r', 1.0), False: mcolors.to_rgba('g', 1.0) } # MACD线颜色,大于0为红色,小于0为绿色 bar_colors = [cmap[bar > 0] for bar in bars] # MACD线颜色列表 vlines = [((date, 0), (date, bars[date])) for date in range(len(bars))] # 生成MACD线顶点列表 ax3.add_collection( LineCollection(vlines, colors=bar_colors, linewidths=0.5, antialiaseds=False)) # 生成MACD线的顶点数据(颜色,线宽,反锯齿) ax3.legend(loc='upper left') # 图例放置于右上角 ax3.yaxis.set_ticks_position('right') # y轴显示在右边 ax3.grid(True) # 画网格 # # 绘制KDJ K, D, J = matix[:, 9], matix[:, 10], matix[:, 11] # 取出KDJ值 ax4.axhline(0, ls='-', c='g', lw=0.5) # 水平线 ax4.yaxis.set_ticks_position('right') # y轴显示在右边 ax4.plot(xdates, K, c='y', label='K') # 绘制K线 ax4.plot(xdates, D, c='c', label='D') # 绘制D线 ax4.plot(xdates, J, c='m', label='J') # 绘制J线 ax4.legend(loc='upper left') # 图例放置于右上角 ax4.grid(True) # 画网格 # set useblit = True on gtkagg for enhanced performance # from matplotlib.widgets import Cursor # 处理鼠标 # cursor = Cursor(ax1, useblit=True, color='red', linewidth=2) canvas = FigureCanvasTkAgg(fig, master=master) canvas.get_tk_widget().pack(fill=tk.BOTH, expand=1) return canvas
f3 = plt.figure(facecolor='w', figsize=(18, 7)) #Грид лдя осей, в верхнем ряду (ширина 2) - гистограммы, в нижнем ряду - таблички с цифрами (ширина 1) gs = gridspec.GridSpec(2, 3, height_ratios=[2, 1]) ax_vp = f3.add_subplot(gs[0, 0]) ax_vs = f3.add_subplot(gs[0, 1]) ax_den = f3.add_subplot(gs[0, 2]) ax_vp = plothisto(vp_c, vp_nc, vp_bin_size, ax_vp) ax_vp.set_title(u'$\mathregular{V_P}$, м/с') ax_vs = plothisto(vs_c, vs_nc, vs_bin_size, ax_vs) ax_vs.set_title(u'$\mathregular{V_S}$, м/с') ax_den = plothisto(den_c, den_nc, den_bin_size, ax_den) ax_den.set_title(u'Плотность, $\mathregular{г/см^3}$') ax_den.get_xaxis().set_major_formatter( tkr.FuncFormatter(lambda x, pos: str(x)[:str(x).index('.')] + ',' + str(x)[ (str(x).index('.') + 1):])) #Добавляем таблицы ax_vp_tbl = f3.add_subplot(gs[1, 0]) htbl_vp = add_table_to_ax(ax_vp_tbl, stats_for_table(vp_c, vp_nc)) ax_vs_tbl = f3.add_subplot(gs[1, 1]) htbl_vs = add_table_to_ax(ax_vs_tbl, stats_for_table(vs_c, vs_nc)) ax_den_tbl = f3.add_subplot(gs[1, 2]) htbl_den = add_table_to_ax(ax_den_tbl, stats_for_table(den_c, den_nc, isint=False)) f3.tight_layout() f3.savefig(r'fig1.png', bbox_inches='tight')