def plot_dco_values(ax, values, color="k"): interpol1 = {"temperature": values["temperature"][0:7], "values": values["values"][0:7]} fit1 = pylab.polyfit(interpol1["temperature"], interpol1["values"], 1) print "m={} b={}".format(fit1[0], fit1[1]) fit_fn1 = pylab.poly1d(fit1) interpol2 = {"temperature": values["temperature"][6:14], "values": values["values"][6:14]} fit2 = pylab.polyfit(interpol2["temperature"], interpol2["values"], 1) print "m={} b={}".format(fit2[0], fit2[1]) fit_fn2 = pylab.poly1d(fit2) plot = ax.plot( interpol1["temperature"], fit_fn1(interpol1["temperature"]), "k-", interpol2["temperature"], fit_fn2(interpol2["temperature"]), "k-", # values['temperature'], values['values'], '{}-'.format(color), values["temperature"], values["values"], "{}o".format(color), markersize=5, ) pylab.setp(plot[0], linewidth=2) pylab.setp(plot[1], linewidth=2) return plot
def regression(x, y, df, ax=None): """ Trace x contre y et calcul la fonction de régression Paramètres: x: Nom de la colonne des x y: Nom de la colonne des y df: Dataframe contenant les valeurs ax: Axes où tracer le qqplot, sinon en créait un Retourne: ax, eq, corr ax: Axes (graphique) eq: equation de regression linéaire de premier degré corr: coefficient de corrélation """ if ax is None: ax = _new_default_axe('defaut') _x = df[x] _y = df[y] fit = plb.polyfit(_x, _y, 1) fit_fn = plb.poly1d(fit) # fit_fn is now a function which takes in x and returns an estimate for y plt.plot(_x, _y, 'yo', _x, fit_fn(_x), '--k', axes=ax) try: ax = plt.gca() ax.set_xlabel(x) ax.set_ylabel(y) plt.draw() except: pass eq = plb.poly1d(fit_fn) corr = stats.correlation(_y, _x) return ax, eq, corr
def __init__(self, coeff_list): # NB: we adopt the weak-term-first convention for inputs self.coeff_list = coeff_list self.q = pylab.poly1d(coeff_list[::-1]) self.qd = pylab.polyder(self.q) self.qdd = pylab.polyder(self.qd) self.degree = self.q.order
def analyze(self): logging.info('Analyze and plot results') with tb.open_file(self.output_filename + '.h5', 'r+') as in_file_h5: data = in_file_h5.root.plsr_dac_data[:] # Calculate mean PlsrDAC transfer function mean_data = np.zeros(shape=(len(self.scan_parameter_steps), ), dtype=[(self.scan_parameter, np.int32), ('voltage_mean', np.float), ('voltage_rms', np.float)]) for index, parameter in enumerate(self.scan_parameter_steps): mean_data[self.scan_parameter][index] = parameter mean_data['voltage_mean'][index] = data['voltage'][data[self.scan_parameter] == parameter].mean() mean_data['voltage_rms'][index] = data['voltage'][data[self.scan_parameter] == parameter].std() plt.errorbar(self.scan_parameter_steps, mean_data['voltage_mean'], mean_data['voltage_rms']) # Plot and fit result x, y, y_err = np.array(self.scan_parameter_steps), mean_data['voltage_mean'], mean_data['voltage_rms'] fit = polyfit(x[np.logical_and(x >= self.fit_range[0], x <= self.fit_range[1])], y[np.logical_and(x >= self.fit_range[0], x <= self.fit_range[1])], 1) fit_fn = poly1d(fit) plt.clf() plt.errorbar(x, y, y_err, label='data') plt.plot(x, fit_fn(x), '--k', label=str(fit_fn)) plt.title(self.scan_parameter + ' calibration') plt.xlabel(self.scan_parameter) plt.ylabel('Voltage [V]') plt.grid(True) plt.legend(loc=0) plt.savefig(self.output_filename + '.pdf') # Store result in file self.register.calibration_parameters['Vcal_Coeff_0'] = fit[1] * 1000. # store in mV self.register.calibration_parameters['Vcal_Coeff_1'] = fit[0] * 1000. # store in mV/DAC
def scatterPlot(actual,predicted,dataset_name=''): ''' Demonstrate Scatter Plot generation of the actual labels vs predicted labels This requires Matplotlib installed on the local system. Inputs: ======= actual: (list) a list of actual values for a label column predicted: (list) a list of predicted values for a label column Outputs: ======== Displays the scatter plot in a window ''' try: import matplotlib, pylab from pylab import poly1d, polyfit, plot except ImportError: print 'Matplotlib/Pylab does not exist, skipping Scatter Plot Demo' return if(not actual or not predicted) : return #Line of best fit fit = polyfit(actual,predicted,1) fit_func = poly1d(fit) # matplotlib.pyplot.scatter(actual,predicted, facecolors='none', edgecolors=COLOR_LIGHT_RED, s=50, linewidth=2) plot(actual,fit_func(actual),'k') pylab.title('Scatter plot of Actual Vs Predicted values for dataset : {dataset_name}'.format(dataset_name=dataset_name), weight='bold') pylab.xlabel('Actual', weight='bold') pylab.ylabel('Predicted', weight='bold') pylab.show()
def __init__(self, data, name, polyfit_degree=7): self.data = data reflectance_curve = pylab.polyfit([wavelength for wavelength, reflectance in data], [reflectance for wavelength, reflectance in data], polyfit_degree) self.reflectance_curve = pylab.poly1d(reflectance_curve) self.name = name
def plot_variances(datasets, labels, xlabel=None, ylabel=None, title=None): fig, ax = plt.subplots() variances = [np.log(np.var(d)) for d in datasets] num_labels = [float(l) for l in labels] fit = pylab.polyfit(num_labels, variances, 1) fit_fn = pylab.poly1d(fit) lin_approx = fit_fn(num_labels) r_sq = r_squared(variances, lin_approx) secondary_num_labels = [num_labels[0] - num_labels[1]] + num_labels + [num_labels[-1] + num_labels[1]] ax.plot( num_labels, variances, 'yo', secondary_num_labels, fit_fn(secondary_num_labels), '--k' ) ax.annotate( '$(r^2 = {0:.2f})$'.format(r_sq), (0.90, 0.495), xycoords='axes fraction' ) plt.xlim([secondary_num_labels[0], secondary_num_labels[-1]]) if xlabel is not None: plt.xlabel(xlabel, labelpad=20) if ylabel is not None: plt.ylabel(ylabel, labelpad=20) if title is not None: plt.title(title) plt.show()
def main(): #get list of files files = filelist("files") #set up plotting # run through each channel for ch in range(32): means = np.zeros(len(files)/2) varis = np.zeros(len(files)/2) #process pairs of images for i in range(0, len(files), 2): im1 = readfile(files[i])[:,ch*64:ch*64+64] im2 = readfile(files[i+1])[:,ch*64:ch*64+64] print "processing: ", files[i], files[i+1] #get sum of means som = summean(im1, im2) #get variance of difference) vod = diffvar(im1, im2) means[i/2] = som varis[i/2] = vod print 'means:', means print 'variances: ', varis fit = pl.polyfit(varis, means, 1) fit_fn = pl.poly1d(fit) print "e-/ADU = ", fit[0] plt.plot(varis, means, 'o', varis, fit_fn(varis), '--') plt.legend(['Channel '+str(ch), str(round(fit[0],3))+' e-/ADU'], loc=9) plt.xlabel('Variance') plt.ylabel('Mean') plt.title('Conversion Gain') plt.savefig('ch'+str(ch).zfill(2)+'.png', bbox_inches='tight') plt.clf() return 0
def visualise(fileName, title="", linearFit=False): """ Draw graph representing the result. A bit verbose as that seem to be the only way to make borders gray. fileName = path and name of the pickled results """ with open(fileName, "rb") as f: data = pickle.load(f) fig = plt.figure() p = fig.add_subplot(111) if not linearFit: p.plot(data[0], data[1], 'bo-', label="sentiment") p.plot([data[0][0], data[0][-1]], [data[1][0], data[1][-1]], 'g', label="straight line through first and last point") else: fit = polyfit(data[0], data[1], 1) fitFunc = poly1d(fit) p.plot(data[0], data[1], 'ro', label='sentiment') p.plot(data[0], fitFunc(data[0]), "--k", label="linear fit") p.legend(prop={'size': 10}, frameon=False) plt.ylabel("Average happiness") plt.xlabel("Rating") for e in ['bottom', 'top', 'left', 'right']: p.spines[e].set_color('gray') if title: plt.title(title) plt.show()
def qqplot(x, y, df, interval=0.5, ax=None): """ Trace un QQPlot de x contre y, avec un interval de confiance autour de la droite de régression. Paramètres: x: Nom de la colonne des x y: Nom de la colonne des y df: Dataframe contenant les valeurs nb: Nombre de quantiles à calculer dans la plage [0, 1.] interval: (défaut=0.5) [0-1.], interval de confiance autour de la droite de régression ax: Axes où tracer le qqplot, sinon en créait un """ if ax is None: ax = _new_default_axe('defaut') _x = df[x] _y = df[y] fit = plb.polyfit(_x, _y, 1) fit_fn = plb.poly1d(fit) # fit_fn is now a function which takes in x and returns an estimate for y # Trace les points et la droite de régression fit_y = fit_fn(_x) plt.plot(_x, _y, 'yo', _x, fit_y, '--k', axes=ax) ax.set_xlabel(x) ax.set_ylabel(y) # Puis trace les lignes d'interval autour de la droite de régression plt.plot(_x, fit_y + fit_y * interval, '-r') plt.plot(_x, fit_y - fit_y * interval, '-r') return ax
def linear_reg_plotter(group,x,y): plt.axes(axisbg="#777777") fit = pylab.polyfit(x,y,1) fit_fn = pylab.poly1d(fit) plt.scatter(x,y,color=node_color[group]) plt.plot(x, fit_fn(x),color=node_color[group])
def polyfit_anno(x, y, order=1, color='k'): fit = pl.polyfit(x.values(), y.values(), order) fit_fn = pl.poly1d(fit) pl.plot(x.values(), fit_fn(x.values()), color) midpoint = (min(x.values()) + max(x.values()))/2 minpointy = min(y.values())/1.5 # about arrow properties: http://matplotlib.org/1.3.1/users/annotations_guide.html ax.annotate(fit_fn, xy=(midpoint, fit_fn(midpoint)), xytext=(1.2*midpoint, minpointy), arrowprops=dict(arrowstyle="->", ec=color))
def age_vs_matches(): #opens excel file and allows for python to plot import matplotlib.pyplot as graph from pylab import polyfit from pylab import poly1d from xlrd import open_workbook from xlutils.copy import copy book = open_workbook('genecomparison2.xls', formatting_info=True) wbook = copy(book) sheet8= wbook.get_sheet(7) genecompare = open('genecomparison.txt', 'r') ages_matches = [] ages = [] matches = [] genecompare.readline() k = 1 #determines the number of matches per sample, and creates a list of the ages and number of matches for that age for line in genecompare: match = 0 genes = line[2:] cols = line.split('\t') ages.append(int(cols[1])) for i in genes: if i == 'y': match = match + 1 ages_matches.append([int(cols[1]), match]) matches.append(match) print ages, len(ages) print matches, len(matches) print ages_matches, len(matches) #plots a scatter plot of each age and total matches for that age in python graph.scatter(ages,matches) graph.show() #creates a trendline for a scatter plot fit = polyfit(ages,matches,1) fit_fn = poly1d(fit) # fit_fn is now a function which takes in a age and returns an estimate for matches graph.plot(ages,matches, 'yo', ages, fit_fn(ages), '--k') graph.show() data = (ages,matches) graph.boxplot(ages,matches,vert = False) graph.ylabel('Number of Cancer Gene Mutations') graph.xlabel('Age') graph.title('Total number of Cancer Gene Mutations per age ') graph.plot(ages, fit_fn(ages), '--k') graph.show() sheet8.write(0,0, 'age') sheet8.write(0,1,'total number of cancer gene mutations') #writes a list of ages and matches so can also plot in excel and have tabulated data for i in ages_matches: sheet8.write(k,0,i[0]) sheet8.write(k,1,i[1]) k = k + 1 wbook.save('genecomparison2.xls')
def vizualizationClusters(clusters, pr, vol, Name=0, picFormat = "png", withLabels = False): numberOfClusters = len(clusters) colors = ['r', 'b', 'g', 'c', 'k', 'm' ,'y','w', 'r', 'b', 'g', 'c', 'k', 'm' ,'y','w', 'r', 'b', 'g', 'c', 'k', 'm' ,'y','w', 'r', 'b', 'g', 'c', 'k', 'm' ,'y','w', 'r', 'b', 'g', 'c', 'k', 'm' ,'y','w', 'r', 'b', 'g', 'c', 'k', 'm' ,'y','w', 'r', 'b', 'g', 'c', 'k', 'm' ,'y','w', 'r', 'b', 'g', 'c', 'k', 'm' ,'y','w', 'r', 'b', 'g', 'c', 'k', 'm' ,'y','w''r', 'b', 'g', 'c', 'k', 'm' ,'y','w', 'r', 'b', 'g', 'c', 'k', 'm' ,'y','w', 'r', 'b'] fig = pl.figure() R2 = {} R2all = 0 labels = [] xall =[] yall = [] for i in range(numberOfClusters): xNoTest = [] yNoTest = [] xTest = [] yTest = [] for j in range(clusters[i].getLength()): dateAndHour = str(clusters[i].getElement(j)) labels.append(dateAndHour) if (clusters[i].points[j].isTest==0): xNoTest.append(vol[dateAndHour]) yNoTest.append(pr[dateAndHour]) else: xTest.append(vol[dateAndHour]) yTest.append(pr[dateAndHour]) fit = pl.polyfit(xNoTest, yNoTest, 1) fit_fn = pl.poly1d(fit) R2[i] = evaluationOfR2(xTest,fit_fn,yTest) R2all = R2all + R2[i] pl.plot(xNoTest,yNoTest, 'yo', xNoTest, fit_fn(xNoTest), '--k', color = colors[i]) pl.plot(xTest,yTest, 'y*', color = colors[i]) pl.xlabel("Volume, MWh") pl.ylabel("Price, Rubles per MWh") xall = xall + xNoTest + xTest yall = yall + yNoTest + yTest # annotation if withLabels: for i, lab in enumerate(labels): pl.annotate(lab, xy = (xall[i], yall[i]), xytext = (-5, 5), textcoords = 'offset points', ha = 'right', va = 'bottom' , fontsize = 5) pl.savefig("./pic/"+str(pathToPic) + "/" + str(Name)+"test"+str(i)+"."+picFormat, format=picFormat) pl.close(fig) return R2all
def create_curve(data, polyfit_degree=7): """This function finds a best-fit curve for a list of data points. This function accepts a list argument 'data' as input and an integer polynomial degree of fit. The 'data' argument is a list of 2 item tuples (x, y). The polynomial degree of fit defaults to a 7th degree polynomial. The function returns the equation for the polynomial fit line.""" reflectance_curve = pylab.polyfit([wavelength for wavelength, reflectance in data], [reflectance for wavelength, reflectance in data], polyfit_degree) return pylab.poly1d(reflectance_curve)
def plot_table_values(ax, values, color="k", pol=1): fit = pylab.polyfit(values["temperature"], values["values"], pol) fit_fn = pylab.poly1d(fit) plot = ax.plot( values["temperature"], fit_fn(values["temperature"]), "{}-".format(color), values["temperature"], values["values"], "{}o".format(color), markersize=5, ) pylab.setp(plot[0], linewidth=2) return plot
def makeGraph(X,Y, xName, yName, name="NoName"): fig = plt.figure() ax = fig.add_subplot(111) superName = "Comparison of {} and {}".format(xName,yName) outname = "{} from {}.png".format(superName,name) fig.suptitle(superName) ax.scatter(X,Y) fit = polyfit(X,Y,1) fit_fn = poly1d(fit) # fit_fn is now a function which takes in x and returns an estimate for y ax.plot(X,Y, 'yo', X, fit_fn(X), '--k') ax.set_xlabel('Size of MCS found by {}'.format(xName)) ax.set_ylabel('Size of MCS found by {}'.format(yName)) ax.text(1, 1, "y = {}*x + {}".format(fit[0], fit[1])) fig.savefig(outname)
def plot_linear_regression_values(ax, values, color="k"): fit = pylab.polyfit(values["temperature"], values["values"], 1) print "m={} b={}".format(fit[0], fit[1]) fit_fn = pylab.poly1d(fit) plot = ax.plot( values["temperature"], fit_fn(values["temperature"]), "{}-".format(color), values["temperature"], values["values"], "{}o".format(color), markersize=5, ) pylab.setp(plot[0], linewidth=2) return plot
def analyze(self): logging.info('Analyze and plot results') x = self.data[:, 0] y = self.data[:, 1] fit = polyfit(x[np.logical_and(x >= self.fit_range[0], x <= self.fit_range[1])], y[np.logical_and(x >= self.fit_range[0], x <= self.fit_range[1])], 1) fit_fn = poly1d(fit) plt.plot(x, y, 'o-', label='data') plt.plot(x, fit_fn(x), '--k', label=str(fit_fn)) plt.title(self.scan_parameter + ' calibration') plt.xlabel(self.scan_parameter) plt.ylabel('Voltage [V]') plt.grid(True) plt.legend(loc=0) plt.savefig(self.output_filename + '.pdf') # Store result in file self.register.calibration_parameters['Vcal_Coeff_0'] = fit[1] * 1000. # store in mV self.register.calibration_parameters['Vcal_Coeff_1'] = fit[0] * 1000. # store in mV/DAC
def make_plot(x,y): m1 = 10 m2 = 100 fit = pyl.polyfit(np.log(x[m1:m2]), np.log(y[m1:m2]),1) fit_fun = pyl.poly1d(fit) power_fit = lambda x: math.exp(fit_fun(math.log(x))) vec_power_fit = np.vectorize(power_fit) plt.plot(x, y, 'yo') plt.plot(x,vec_power_fit(x),'b') ax = plt.axes() ax.set_yscale('log') ax.set_xscale('log') ax.text(0.3, 0.07, 'log(y(x)) = %.3f log(x) + %.3f' % (fit[1], fit[0]), fontsize=14, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes) plt.show()
def get_fit(which): f = open("final_position.txt","r") data = pl.genfromtxt(f,comments = "L") if which=="x": datnum = 2 if which=="vx": datnum = 0 x = pl.array([]) y = pl.array([]) for i,j in enumerate(data[:-7,datnum]): if i%2 == 0: x = pl.append(x,data[i,4]) y = pl.append(y,j) fit = pl.polyfit(x,y,2) fitted = pl.poly1d(fit) return fitted
def estimate_bo_params(p1=-0.05): data = np.loadtxt("bo.dat") data = data.transpose() r = data[0] bo = data[1] x = np.log(r) y = np.log(np.log(bo)/p1) fit = polyfit(x, y, 1) fit_fn = poly1d(fit) p2 = fit[0] p1 = p1 r0 = math.exp(-fit[1]/p2) print "r0 = ", r0 print "pbo1 = ", p1 print "pbo2 = ", p2 plt.plot(x, y, x,fit_fn(x), '--k') plt.show()
def plot_time_histogram(by_times, by_nodes, nodes, time, suffix): ranking = [by_times[time][v] for v in by_times[time]] ranking.sort() Y = ranking minY = min(Y) nBins = 10 bins = [minY + int(numpy.power(2, i)) for i in range(nBins)] print bins N, tempbins, temppatches = pylab.hist(Y, bins) H = [[bins[i+1], float(N[i])/sum(N)] for i in range(len(N)) if N[i]>0] print N pylab.close() X = [h[0] for h in H] Y1 = [h[1] for h in H] fit = pylab.polyfit(numpy.log(X), numpy.log(Y1), 1) fit_fn = pylab.poly1d(fit) Y2 = numpy.exp(fit_fn(numpy.log(X))) output_filename = constants.CHARTS_FOLDER_NAME + 'time_hist' + '_' + suffix pylab.figure(figsize=(5, 4)) pylab.rcParams.update({'font.size': 20}) pylab.xscale('log') pylab.yscale('log') pylab.scatter(X, Y1) pylab.plot(X, Y2, '--') #pylab.xlabel('# of edges') #pylab.ylabel('Probability') #pylab.title(output_filename) pylab.savefig(output_filename + '.pdf') pylab.close()
def start(self, solve=True, username="", password="", gameType="Prisoner Meeting"): """ Start the experiments and graph the result at the end. Parameters: solve -- True if we should solve the policies here. False if we should instead load them. username -- The NEOS server username. password -- The NEOS server password. gameType -- The type of game: "Prisoner Meeting" or "Battle Meeting". """ for numNodes in self.numControllerNodes: values = [list(), list(), list()] standardError = [list(), list(), list()] computeFinalValues = [list(), list(), list()] # For each slack term, create the MCC, solve it, and compute the value. for slack in self.slackValues: print( "----- Starting Configuration [Num Nodes: %i, Slack %.1f] -----" % (numNodes, slack)) # Create the MCC, FSCs, etc. Then solve the MCC. Then load the policies. mcc = MCC(gameType) aliceFSC = FSC(mcc, "Alice", numNodes) bobFSC = FSC(mcc, "Bob", numNodes) fscVector = FSCVector([aliceFSC, bobFSC]) if solve: # Note: This overwrites the FSCs and re-saves them for later use, if you want. mccSolve = MCCSolve(mcc, fscVector, maxNumSteps=self.maxNumSteps, delta=slack) totalTime, individualTimes = mccSolve.solve( username, password, resolve=(slack == self.slackValues[0])) print( "Individual Times: [R0: %.2fs, R1: %.2fs, R2: %.2fs]" % (individualTimes[0], individualTimes[1], individualTimes[2])) print("Total Time: %.2f seconds" % (totalTime)) print("") # We can also use the output files to compute the actual values and make another graph! computeFinalValuesResult = self._compute_final_values() computeFinalValues[0] += [computeFinalValuesResult[0]] computeFinalValues[1] += [computeFinalValuesResult[1]] computeFinalValues[2] += [computeFinalValuesResult[2]] else: aliceFSC.load("%i_%i" % (numNodes, int(slack))) bobFSC.load("%i_%i" % (numNodes, int(slack))) # Compute the average value following this FSC policy. data = [list(), list(), list()] averages = np.array([0.0, 0.0, 0.0]) for i in range(self.numTrials): belief = mcc.get_initial_belief() state = None currentValue = 0.0 targetValue = random.random() for s in mcc.states: try: currentValue += belief[s] if currentValue >= targetValue: state = s break except: continue aliceState = aliceFSC.get_initial_state() bobState = bobFSC.get_initial_state() trialValues = [0.0, 0.0, 0.0] compoundedGamma = 1.0 for t in range(self.horizon): action = (aliceFSC.get_action(aliceState), bobFSC.get_action(bobState)) trialValues[0] += compoundedGamma * mcc.R0( state, action) trialValues[1] += compoundedGamma * mcc.Ri( "Alice", state, action) trialValues[2] += compoundedGamma * mcc.Ri( "Bob", state, action) compoundedGamma *= mcc.gamma successor = mcc.get_successor(state, action) observation = mcc.get_observation(action, successor) state = successor aliceState = aliceFSC.get_successor( aliceState, action[0], observation[0]) bobState = bobFSC.get_successor( bobState, action[1], observation[1]) for j in range(len(averages)): data[j] += [trialValues[j]] averages[j] = float(i * averages[j] + trialValues[j]) / float(i + 1.0) # Record the value and compute standard error. for i in range(len(values)): values[i] += [averages[i]] standardError[i] += [ math.sqrt( sum([ pow(data[i][j] - averages[i], 2) for j in range(len(data[i])) ]) / float(len(data[i]) - 1.0)) ] # Compute some final things and make adjustments. for i in range(len(values)): values[i] = np.array(values[i]) if solve: for i in range(len(computeFinalValues)): computeFinalValues[i] = np.array(computeFinalValues[i]) minV = min([min(v) for v in values]) maxV = max([max(v) for v in values]) if gameType == "Battle Meeting": minV = -2.0 maxV = 35.0 elif gameType == "Prisoner Meeting": minV = 0.0 maxV = 50.0 # Plot the result, providing beautiful paper-worthy labels. if self.graphRegionValues: labels = ["V0", "Vi Min", "Vi Max", "Vi Trend", "(V1+V2)/2"] else: labels = ["V0", "V1", "V2", "Trend", "(V1+V2)/2"] linestyles = ["-", "--", ":", "-", "-"] markers = ["o", "s", "^", "", ""] colors = ["r", "g", "b", "k", "k"] minSlack = min(self.slackValues) maxSlack = max(self.slackValues) pylab.rcParams.update({'font.size': 18}) pylab.title("%s: ADR vs. Slack (Num Nodes = %i)" % (gameType, numNodes)) pylab.hold(True) pylab.xlabel("Slack") pylab.xticks(np.arange(minSlack, maxSlack + 5.0, 5.0)) pylab.xlim([minSlack, maxSlack]) pylab.ylabel("Average Discounted Reward") pylab.yticks(np.arange(0.0, int(maxV) + 5.0, 5.0)) pylab.ylim([0.0, int(maxV)]) pylab.hlines(np.arange(0.0, int(maxV) + 1.0, 5.0), minSlack - 1.0, maxSlack + 1.0, colors=[(0.6, 0.6, 0.6)]) if self.graphRegionValues: for i in range(len(self.slackValues)): if values[1][i] > values[2][i]: tmp = values[1][i] values[1][i] = values[2][i] values[2][i] = tmp tmp = standardError[1][i] standardError[1][i] = standardError[2][i] standardError[2][i] = tmp pylab.fill_between(self.slackValues, values[1], values[2], facecolor=(0.85, 0.85, 0.85)) for i in range(len(values)): pylab.errorbar(self.slackValues, values[i], yerr=standardError[i], linestyle=linestyles[i], linewidth=3, marker=markers[i], markersize=18, color=colors[i]) pylab.plot(self.slackValues, values[i], label=labels[i], linestyle=linestyles[i], linewidth=8, marker=markers[i], markersize=18, color=colors[i]) # Special: Print a trend line for the individual objectives. if self.graphTrendLine: trendLineZ = pylab.polyfit( self.slackValues + self.slackValues, pylab.concatenate((values[1], values[2]), axis=0), 1) trendLinePoly = pylab.poly1d(trendLineZ) trendLineValues = [ trendLinePoly(slackValue) for slackValue in self.slackValues ] pylab.plot(self.slackValues, trendLineValues, label=labels[3], linestyle=linestyles[3], linewidth=8, marker=markers[3], markersize=18, color=colors[3]) # Special: Print the average of the individual objectives. if self.graphAverageLine: pylab.plot(self.slackValues, [(values[1][i] + values[2][i]) / 2.0 for i in range(len(self.slackValues))], label=labels[4], linestyle=linestyles[4], linewidth=8, marker=markers[4], markersize=18, color=colors[4]) pylab.rcParams.update({'font.size': 14}) if gameType == "Battle Meeting": pylab.legend(loc=1) # Upper Right elif gameType == "Prisoner Meeting": pylab.legend(loc=3) # Lower Left pylab.show() pylab.rcParams.update({'font.size': 18}) # Special: If we just solved for these, then we have the actual values! Plot these results too! if solve: labels = ["V0", "V1", "V2"] linestyles = ["-", "--", ":"] markers = ["o", "s", "^"] colors = ["r", "g", "b"] minSlack = min(self.slackValues) maxSlack = max(self.slackValues) pylab.title("%s: Computed Values vs. Slack (Num Nodes = %i)" % (gameType, numNodes)) pylab.hold(True) pylab.xlabel("Slack") pylab.xticks(np.arange(minSlack, maxSlack + 5.0, 5.0)) pylab.xlim([minSlack - 0.1, maxSlack + 0.1]) pylab.ylabel("Computed Values") pylab.yticks(np.arange(int(minV), int(maxV) + 5.0, 5.0)) pylab.ylim([minV - 0.1, int(maxV) + 1.1]) pylab.hlines(np.arange(int(minV) - 1.0, int(maxV) + 1.0, 5.0), minSlack - 1.0, maxSlack + 1.0, colors=[(0.6, 0.6, 0.6)]) for i in range(len(computeFinalValues)): pylab.plot(self.slackValues, computeFinalValues[i], label=labels[i], linestyle=linestyles[i], linewidth=8, marker=markers[i], markersize=18, color=colors[i]) pylab.rcParams.update({'font.size': 14}) if gameType == "Prisoner Meeting": pylab.legend(loc=3) # Lower Left elif gameType == "Battle Meeting": pylab.legend(loc=1) # Upper Right pylab.show()
toM = 1e-5 toCM = 1e-1 toCM5 = 1e4 data = np.loadtxt("msd.dat") data = data.transpose() start = int(0.2*len(data[0])) end = int(0.8*len(data[0])) x = data[0] * ts y = data[1] fx = x[start:end] fy = y[start:end] fit = polyfit(fx, fy, 1) fit_fn = poly1d(fit) print "%.4e"%(fit[0]/6*toM) print "%.4e"%(fit[0]/6*toCM) print "%.4e 10^-5cm^2/s"%(fit[0]/6*toCM5) plt.plot(x, y) plt.plot(fx, fit_fn(fx), '--k', lw=2) plt.xlabel("Simulation Time (fs)") plt.ylabel("MSD ($\AA^2$)") plt.show()
def plot_shifts(by_times, by_nodes, nodes, suffix): shifts = [] times = by_times.keys() times.sort() prev_state = None for time in times: #get rid of saturdays and sundays #if time%7==1 or time%7==2: #if time%7==1: # continue state = calc_network_state(by_times, nodes, time) if prev_state!=None: shift = diff_network(prev_state, state, len(by_nodes)) shifts.append( shift ) prev_state = state print len(nodes) print len(shifts) ############################## # ordered ############################## ###test const = numpy.median(shifts) shifts = [numpy.abs(s-const) for s in shifts] ### values = [s for s in shifts] values.sort(reverse=True) plot_values(values, 'XXX') ###''' ############################## # bins 15 ############################## Y = shifts minY = min(Y) maxY = max(Y) nBins = 15 binSize = (maxY-minY)/float(nBins) bins = [minY + binSize*float(i) for i in range(nBins)] print bins N, tempbins, temppatches = pylab.hist(Y, bins) pylab.close() N = [float(n)/float(sum(N)) for n in N] N = [n for n in N] print N X = [bins[i+1] for i in range(len(N)) if bins[i+1]>0 and N[i]>0] Y1 = [N[i] for i in range(len(N)) if bins[i+1]>0 and N[i]>0] fit = pylab.polyfit(numpy.log(X), numpy.log(Y1), 1) fit_fn = pylab.poly1d(fit) Y2 = numpy.exp(fit_fn(numpy.log(X))) output_filename = constants.CHARTS_FOLDER_NAME + 'shifts_bins_15' + '_' + suffix pylab.figure(figsize=(9, 4)) pylab.xscale('log') pylab.yscale('log') pylab.scatter(X, Y1) pylab.plot(X, Y2, '--') #pylab.xlabel('Distance between following network states') #pylab.ylabel('Probability') #pylab.title(output_filename) pylab.savefig(output_filename + '.pdf') pylab.close() #''' ############################## # bins 30 ############################## Y = shifts minY = min(Y) maxY = max(Y) nBins = 30 binSize = (maxY-minY)/float(nBins) bins = [minY + binSize*float(i) for i in range(nBins)] print bins N, tempbins, temppatches = pylab.hist(Y, bins) pylab.close() N = [float(n)/float(sum(N)) for n in N] N = [n for n in N] print N X = [bins[i+1] for i in range(len(N)) if bins[i+1]>0 and N[i]>0] Y1 = [N[i] for i in range(len(N)) if bins[i+1]>0 and N[i]>0] fit = pylab.polyfit(numpy.log(X), numpy.log(Y1), 1) fit_fn = pylab.poly1d(fit) Y2 = numpy.exp(fit_fn(numpy.log(X))) output_filename = constants.CHARTS_FOLDER_NAME + 'shifts_bins_30' + '_' + suffix pylab.figure(figsize=(9, 4)) pylab.xscale('log') pylab.yscale('log') pylab.scatter(X, Y1) pylab.plot(X, Y2, '--') #pylab.xlabel('Distance between following network states') #pylab.ylabel('Probability') #pylab.title(output_filename) pylab.savefig(output_filename + '.pdf') pylab.close()
def analyze(self): logging.info('Analysing the PlsrDAC waveforms') with tb.open_file(self.output_filename + '.h5', 'r') as in_file_h5: data = in_file_h5.root.PlsrDACwaveforms[:] times = np.array(in_file_h5.root.PlsrDACwaveforms._v_attrs.times) scan_parameter_values = in_file_h5.root.PlsrDACwaveforms._v_attrs.scan_parameter_values trigger_levels = in_file_h5.root.PlsrDACwaveforms._v_attrs.trigger_levels fit_range = ast.literal_eval( in_file_h5.root.configuration.run_conf[:][np.where( in_file_h5.root.configuration.run_conf[:]['name'] == 'fit_range')]['value'][0]) fit_range_step = ast.literal_eval( in_file_h5.root.configuration.run_conf[:][np.where( in_file_h5.root.configuration.run_conf[:]['name'] == 'fit_range_step')]['value'][0]) progress_bar = progressbar.ProgressBar(widgets=[ '', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA() ], maxval=data.shape[0], term_width=80) with tb.open_file(self.output_filename + '_interpreted.h5', 'w') as out_file_h5: description = [('PlsrDAC', np.uint32), ('voltage_step', np.float) ] # output data table description data_array = np.zeros((data.shape[0], ), dtype=description) data_table = out_file_h5.create_table( out_file_h5.root, name='plsr_dac_data', description=np.zeros((1, ), dtype=description).dtype, title= 'Voltage steps from transient PlsrDAC calibration scan') with PdfPages(self.output_filename + '_interpreted.pdf') as output_pdf: progress_bar.start() for index in range(data.shape[0]): voltages = data[index] trigger_level = trigger_levels[index] plsr_dac = scan_parameter_values[index] if trigger_level < 0.005: logging.warning( 'The trigger threshold for PlsrDAC %d is with %d mV too low. Thus this setting is omitted in the analysis!', plsr_dac, trigger_level * 1000.) data_array['voltage_step'][index] = np.NaN continue step_index = np.where( np.abs(voltages - trigger_level) == np.amin( np.abs(voltages - trigger_level)))[0][0] left_step_fit_range = (step_index + fit_range_step[0][0], step_index + fit_range_step[0][1]) right_step_fit_range = (step_index + fit_range_step[1][0], step_index + fit_range_step[1][1]) # Error handling if selected fit range exeeds limits if left_step_fit_range[0] < 0 or left_step_fit_range[ 1] < 0 or right_step_fit_range[0] >= data.shape[ 1] or right_step_fit_range[1] >= data.shape[ 1] or left_step_fit_range[ 0] >= left_step_fit_range[ 1] or right_step_fit_range[ 0] >= right_step_fit_range[ 1]: logging.warning( 'The step fit limits for PlsrDAC %d are out of bounds. Omit this data!', plsr_dac) data_array['voltage_step'][index] = np.NaN continue times_left_step, voltage_left_step = times[ left_step_fit_range[0]: left_step_fit_range[1]], voltages[ left_step_fit_range[0]:left_step_fit_range[1]] times_right_step, voltage_right_step = times[ right_step_fit_range[0]:right_step_fit_range[ 1]], voltages[right_step_fit_range[0]: right_step_fit_range[1]] median_left_step = np.median(voltage_left_step) median_right_step = np.median(voltage_right_step) data_array['PlsrDAC'][index] = plsr_dac data_array['voltage_step'][ index] = median_left_step - median_right_step # Plot waveform + fit plt.clf() plt.ylim(0, 1500) plt.grid() plt.plot(times * 1e9, voltages * 1e3, label='Data') plt.plot(times * 1e9, np.repeat([trigger_level * 1e3], len(times)), '--', label='Trigger (%d mV)' % (trigger_level * 1000)) plt.plot(times_left_step * 1e9, np.repeat(median_left_step * 1e3, times_left_step.shape[0]), '-', linewidth=2, label='Left of step constant fit') plt.plot(times_right_step * 1e9, np.repeat(median_right_step * 1e3, times_right_step.shape[0]), '-', linewidth=2, label='Right of step constant fit') plt.title('PulserDAC %d waveform' % plsr_dac) plt.xlabel('Time [ns]') plt.ylabel('Voltage [mV]') plt.legend(loc=0) output_pdf.savefig() progress_bar.update(index) data_table.append(data_array[np.isfinite( data_array['voltage_step'])]) # store valid data # Plot, fit and store linear PlsrDAC transfer function x, y = data_array[np.isfinite( data_array['voltage_step'])]['PlsrDAC'], data_array[ np.isfinite( data_array['voltage_step'])]['voltage_step'] fit = polyfit( x[np.logical_and(x >= fit_range[0], x <= fit_range[1])], y[np.logical_and(x >= fit_range[0], x <= fit_range[1])], 1) fit_fn = poly1d(fit) plt.clf() plt.plot(x, y, '.-', label='data') plt.plot(x, fit_fn(x), '--k', label=str(fit_fn)) plt.title('PlsrDAC calibration') plt.xlabel('PlsrDAC') plt.ylabel('Voltage step [V]') plt.grid(True) plt.legend(loc=0) output_pdf.savefig() # Store result in file self.register.calibration_parameters[ 'Vcal_Coeff_0'] = fit[1] * 1000. # store in mV self.register.calibration_parameters[ 'Vcal_Coeff_1'] = fit[0] * 1000. # store in mV/DAC progress_bar.finish()
#Plot 5 csv = open("data/g19_lab09data_random.csv") step_sum_random = 0.0 step_times_random = [] i = 1 for line in csv: values = line.split(",") step_sum_random += float(values[2]) if i % 15 == 0: step_sum_random = step_sum_random / 15 step_times_random.append(step_sum_random) step_sum_random = 0.0 i += 1 step_line = pl.poly1d(pl.polyfit(x, np.array(step_times), 1)) step_line_random = pl.poly1d(pl.polyfit(x, np.array(step_times_random), 1)) pl.plot(x, np.array(step_times), "b.", x, step_line(x), "b-", label="Average Step Times") pl.plot(x, np.array(step_times_random), "r.", x, step_line_random(x), "r-", label="Average Random Step Times")
def conversionGainFrame(imglist, quadrant): """Determine the conversion gain for a given quadrant. A spatial analysis is done, using all pixels in a quadrant. The mean-variance difference method is used, i.e. the differences between four pairs of flat images are used, each pairs exposure time is twice that of the previous pair. Variance = 2*Nread**2 + Signal1 + Signal2 Will plot both linear and log plots including mean sum vs. variance difference, mean sum vs variance difference - readnoise**2 and a fit to slop for gain in e-/ADU see http://www.noao.edu/kpno/manuals/whirc/WHIRC_VI_Mean-variance_101026.pdf by Dick Joyce, 2010 Args: imglist (list): File names of image pairs of increasing exposure time quadrant (int): Quandrant being analyzed. Returns: gain, readnoise (float): Gain in e-/ADU and readnoise in e- Example: g, rn = detector.conversionGainFrame([q1_0s, q1_0s, q1_2s, q1_2s, q1_4s, q1_4s], 3) """ # run through each channel means = np.zeros(len(imglist) / 2) varis = np.zeros(len(imglist) / 2) #process pairs of images for i in range(0, len(imglist), 2): im1 = imglist[i] im2 = imglist[i + 1] #get sum of means som = summean(im1, im2) #get variance of difference) vod = diffvar(im1, im2) means[i / 2] = som varis[i / 2] = vod print 'means:', means print 'variances: ', varis fit = pl.polyfit(means, varis, 1) print fit fit_fn = pl.poly1d(fit) print 'y intercept', fit_fn(0.0) eADU = 1.0 / fit[0] print "e-/ADU = {0}".format(str(eADU)) fig1 = plt.figure(figsize=(10, 10)) plt.loglog(means, varis, 'o', means, fit_fn(means), '--') plt.legend(['Channel ' + str(quadrant), str(round(eADU, 3)) + ' e-/ADU'], loc=9) plt.xlabel('Mean') plt.ylabel('Variance') plt.title('Conversion Gain, Quadrant {0}'.format(str(quadrant))) plt.show() fig2 = plt.figure(figsize=(10, 10)) plt.plot(means, varis, 'o', means, fit_fn(means), '--') plt.legend(['Channel ' + str(quadrant), str(round(eADU, 3)) + ' e-/ADU'], loc=9) plt.xlabel('Mean') plt.ylabel('Variance') plt.title('Conversion Gain, Quadrant {0}'.format(str(quadrant))) plt.show()
def plot_model_model(dataframe, **kwargs): """ Create the LIE model scatter plot """ settings = {'tollerance': 5, 'color': 'red'} settings.update(kwargs) ax = settings.get('ax', None) combine_plots = False if ax: combine_plots = True # Plot the training set trainset = dataframe.trainset label = 'train' if combine_plots: label = None ax = trainset.plot(kind='scatter', x='ref_affinity', y='dg_calc', color=settings['color'], label=label, s=25, ax=ax) ax.set_aspect('equal') # Plot datalabels if needed if settings.get('plot_labels', False): for i, point in trainset.iterrows(): ax.text(point['ref_affinity'], point['dg_calc'], "{0:.0f}".format(point['case']), fontsize=8) # Force X and Y axis to have the same data range axis_min = 10 * round(min([trainset['ref_affinity'].min(), trainset['dg_calc'].min()]) / 10) axis_max = 10 * round(max([trainset['ref_affinity'].max(), trainset['dg_calc'].max()]) / 10) # Give it a bit more space ax.set_xlim(axis_min - 10, axis_max + 10) ax.set_ylim(axis_min - 10, axis_max + 10) # Plot the regression line if settings.get('plot_regline', False): ref = trainset['ref_affinity'].values fitx = polyfit(ref, trainset['dg_calc'].values, 1) fit_fnx = poly1d(fitx) ax.plot(ref, fit_fnx(ref), 'r-', label="fit", linewidth=0.5) # Add diagonal and error margins if not combine_plots: xlim = ax.get_xlim() ylim = ax.get_ylim() ax.plot(xlim, ylim, 'k-', linewidth=0.5) ax.plot((xlim[0], xlim[1] - settings['tollerance']), (ylim[0] + settings['tollerance'], ylim[1]), 'k--') ax.plot((xlim[0] + settings['tollerance'], xlim[1]), (ylim[0], ylim[1] - settings['tollerance']), 'k--') # Plot the test set if any testset = dataframe.testset if not testset.empty and settings.get('plot_test', True): label = 'test' if combine_plots: label = None ax = testset.plot(kind='scatter', x='ref_affinity', y='dg_calc', label=label, s=20, ax=ax) # Plot datalabels if needed if settings.get('plot_labels', False): for i, point in testset.iterrows(): ax.text(point['ref_affinity'], point['dg_calc'], "{0:.0f}".format(point['case']), fontsize=8) ax.set_xlabel(r'$\Delta$$G_{Ref}$ (kJ/mol)', fontsize=15) ax.set_ylabel(r'$\Delta$$G_{Calc}$ (kJ/mol)', fontsize=15) ax.legend(loc="best", frameon=False) return ax
import pylab from scipy import stats # our data nosleep = [ 39, 18, 48, 24, 46, 35, 30, 34, 42 ] mistakes = [ 6, 8, 13, 5, 17, 6, 15, 8, 2 ] # calculate the regression equation m, b, r, p, std_err = stats.linregress( nosleep, mistakes ) fit = pylab.polyfit( nosleep, mistakes, 1 ) fit_fn = pylab.poly1d( fit ) # plot scatter and regression line pylab.plot( nosleep, mistakes, 'o', nosleep, fit_fn ( nosleep) ) # add the (written) equation to the plot equation = "y' = %.3f x %+.3f" % ( m, b ) #pylab.figtext( 0.5, 0.4, equation ) # adjust the graph's details pylab.xlabel("Hours without sleep") pylab.ylabel("Numer of Mistakes") pylab.xlim([10,50]) pylab.ylim([0,14]) pylab.show()
def Elliptic(): rho_list = pl.logspace(-1, 0, 2) Roell_list = pl.logspace(pl.log10(0.1), pl.log10(50), 20) markers = ["bo-", "gv-", "r>-"] # Elastic limit pl.plot([Roell_list[0], Roell_list[-1]], [1 / 3, 1 / 3], "k", linewidth=3.0, alpha=0.2) for i, rho in enumerate(rho_list): tf_list = [] te_list = [] for Roell in Roell_list: ell = 1 / Roell te, tf = find_data( ["material.ell", "problem.rho", "problem.hsize"], [ell, rho, 1e-3]) te_list.append(te) tf_list.append(tf) pl.savetxt("Rolist%s.txt" % i, Roell_list) pl.savetxt("tf_list%s.txt" % i, tf_list) pl.savetxt("te_list%s.txt" % i, te_list) pl.figure(1) pl.loglog(Roell_list, tf_list, markers[i], label=r"$\rho=%g$" % (rho)) pl.savefig("elliptic_tf_%s.pdf" % i) pl.figure(2) pl.loglog(Roell_list, te_list, markers[i], label=r"$\rho=%g$" % (rho)) pl.savefig("elliptic_te_%s.pdf" % i) if near(rho, 0.1): pl.figure(1) ind = 12 coeff = pl.polyfit(pl.log(Roell_list[ind:]), pl.log(tf_list[ind:]), 1) poly = pl.poly1d(coeff) fit = pl.exp(poly(pl.log(Roell_list[ind:]))) print("The slope = %.2e" % (coeff[0])) pl.loglog(Roell_list[ind:], fit, "k-", linewidth=3.0, alpha=0.2) pl.figure(1) pl.xlabel(r"Relative defect size $a/\ell$") pl.ylabel("$\sigma/\sigma_0$ at fracture") pl.legend(loc="best") pl.xlim([9e-2, 1e2]) pl.grid(True) pl.savefig("elliptic_tf.pdf") pl.figure(2) pl.xlabel(r"Relative defect size $a/\ell$") pl.ylabel("$\sigma/\sigma_0$ at loss of elasticity") pl.legend(loc="best") pl.xlim([9e-2, 1e2]) pl.grid(True) pl.savefig("elliptic_te.pdf") pl.figure(3) rho_list = pl.linspace(0.1, 1, 10) te_list = [] for i, rho in enumerate(rho_list): te, tf = find_data(["material.ell", "problem.rho", "problem.hsize"], [1.0, rho, 1e-3]) te_list.append(te) pl.plot(rho_list, te_list, "o", label="Num.") pl.savetxt("rho_list.txt", rho_list) pl.savetxt("rho_te_list.txt", te_list) rho_list = pl.linspace(0.1, 1, 1000) pl.plot(rho_list, rho_list / (rho_list + 2), "k", label="Theo.", linewidth=3.0, alpha=0.2) pl.xlabel(r"Ellipticity $\rho$") pl.ylabel("$\sigma/\sigma_0$ at loss of elasticity") pl.legend(loc="best") pl.grid(True) pl.savefig("elliptic_te_rho.pdf", bbox_inches='tight')
def asym_quantum_factor(J,b): """ This takes the places of K^2 in calculating the energy levels for asymmetric rotators. Townes and Schawlow, Ch. 4. For J > 6 this returns an empty tuple. Note that it doesn't matter which version of b is used since b_prolate(kappa) = b_oblate(-kappa) and the equations are symmetric in b or depend on b**2. """ roots = () if J == 0: roots = (0,) elif J == 1: roots = (0., 1+b, 1-b) elif J == 2: roots = ( 4., 1-3*b, 1+3*b) p = poly1d([1, -4, -12*b**2]) roots = roots + tuple(p.r) elif J == 3: roots = (4.,) p = poly1d([1, -4, -60*b**2]) roots = roots + tuple(p.r) p = poly1d([1, -10+6*b, 9-54*b-15*b**2]) roots = roots + tuple(p.r) p = poly1d([1, -10-6*b, 9+54*b-15*b**2]) roots = roots + tuple(p.r) elif J == 4: p = poly1d([1, -10*(1-b), 9-90*b-63*b**2]) roots = tuple(p.r) p = poly1d([1, -10*(1+b), 9+90*b-63*b**2]) roots = roots + tuple(p.r) p = poly1d([1, -20, 64-28*b**2]) roots = roots + tuple(p.r) p = poly1d([1, -20, 64-208*b**2, 2880*b**2]) roots = roots + tuple(p.r) elif J == 5: p = poly1d([1, -20, 64-108*b**2]) roots = tuple(p.r) p = poly1d([1, -20, 64-528*b**2,6720*b**2]) roots = roots + tuple(p.r) p = poly1d([1, -35+15*b, 259-510*b-213*b**2, -225+3375*b+4245*b**2-675*b**3]) roots = roots + tuple(p.r) p = poly1d([1, -35-15*b, 259+510*b-213*b**2, -225-3375*b+4245*b**2+675*b**3]) roots = roots + tuple(p.r) elif J == 6: p = poly1d([1, -35+21*b, 259-714*b-525*b**2, -225+4725*b+9165*b**2-3465*b**3]) roots = tuple(p.r) p = poly1d([1, -35-21*b, 259+714*b-525*b**2, -225-4725*b+9165*b**2+3465*b**3]) roots = roots + tuple(p.r) p = poly1d([1, -56, 784-336*b**2, -2304+9984*b**2]) roots = roots + tuple(p.r) p = poly1d([1, -56, 784-1176*b**2, -2304+53664*b**2, -483840*b**2+55440*b**4]) roots = roots + tuple(p.r) else: roots = () return roots
def tafel(cycle, base=None, limit_current_range=(0.15, 0.20), catalyst_mass=None, area_real=None, activity_potential=0.9, shift=1.5, rpm=1600, report='area mass', sweep_rate=20, graph=True, copy=False, verb=False, area_geometric=1, **kwargs): # unzip data iL_lower, iL_upper = limit_current_range potential = np.array(cycle[0], dtype=float) current = np.array(cycle[1], dtype=float) current /= area_geometric verb > 2 and print(f' cycle <{cycle.shape}>') if graph > 1: plt.figure(f'ORR - Tafel - {rpm} - positive sweep') plt.plot(potential, current, label='Raw data') # cut for useful data # TODO: add another filter rang = iL_lower < potential potential = potential[rang] current = current[rang] verb > 2 and print(f' cut down to <{len(current)}>') # TODO: interpolate base to ignore it's size if base is not None: xB, yB = base # extra_data_before = len(yB) - len(rang) # yB = yB[extra_data_before:] yB = yB[rang] assert len(current) == len( yB ), f'cycle<{len(current)}> and base<{len(base)}> have different length.' else: yB = np.empty(len(current), dtype=float) yB[:] = current[-1] # yB = array([current[-1] for i in range(len(current))]) # remove baseline current -= yB if graph > 1: plt.plot(potential, current, label='Corrected data') # current to specific density [A / cm^2 Pt] current_density = current / area_real # get diffusion controlled current JL # TODO: auto cut JLrang = (potential > iL_lower) & (potential < iL_upper) JL = np.average(current[JLrang]) verb > 2 and print(f' JL <{JLrang.sum()}> = {JL}') if graph > 1: plt.plot(potential[JLrang], current[JLrang], label='Diffusion limited region') plt.plot([iL_lower, iL_upper], [JL, JL], 'k') plt.legend() # correction 2 get Jk, cut @ upper limit for noise # TODO: auto cut rang = (potential > iL_upper) & (current != JL) & (current != 0) potential = potential[rang] current = current[rang] verb > 2 and print(f' cut down to <{len(current)} to match>') Jk = current * JL / (JL - current) # TODO: get Ik @ 0.9V for acts # tafel slopes calcs # to log scale logJk = np.log10(abs(Jk)) # TODO: calc tafel rangs # get points cn pend neg negS = np.diff(logJk) < 0 potential = potential[1:][negS] current = current[1:][negS] logJk = logJk[1:][negS] lowCh = logJk[-1] + shift # arbitrario TODO: calc ## lowRang = (logJk > lowCh) & (logJk < lowCh + 1) highRang = (logJk > lowCh + 1) & (logJk < lowCh + 2) # TOpatch: start ~0.92V verb > 2 and print(f' negative slope <{len(current)}>') lowJk = logJk[lowRang] highJk = logJk[highRang] lowfit = polyfit(potential[lowRang], lowJk, 1) lowFit = poly1d(lowfit) highfit = polyfit(potential[highRang], highJk, 1) highFit = poly1d(highfit) factor_area = area_geometric factor_mass = area_geometric if area_real is not None: factor_area /= area_real if catalyst_mass is not None: factor_mass /= 1e-3 * catalyst_mass # get acts low_current = 10**lowFit(activity_potential) high_current = 10**highFit(activity_potential) # TODO: report slopes tafel_slope_low = 1 / lowfit[0] tafel_slope_high = 1 / highfit[0] act_low = Activities(mass_act=low_current * factor_mass, area_act=low_current * factor_area, tafel_slope=tafel_slope_low) act_high = Activities(mass_act=high_current * factor_mass, area_act=high_current * factor_area, tafel_slope=tafel_slope_high) # copy to excel if copy: d = OrderedDict([('potential', potential), ('log Jk', logJk)]) df = pd.DataFrame(data=d) save_to_excel(df, 'results.xlsx', 'Tafel', index=False) d = OrderedDict([('potential', potential[lowRang]), ('low overpotential\nJk', lowJk)]) df = pd.DataFrame(data=d) save_to_excel(df, 'results.xlsx', 'Tafel', 3, index=False) d = OrderedDict([('potential', potential[highRang]), ('high overpotential\nJk', highJk)]) df = pd.DataFrame(data=d) save_to_excel(df, 'results.xlsx', 'Tafel', 5, index=False) # plot if graph: # graph: plt.figure('ORR - Tafel') plt.plot(potential, logJk, ":") plt.plot(potential[lowRang], lowFit(potential[lowRang])) # plt.plot(potential[highRang], highFit(potential[highRang])) # plt.plot(highJk, potential[highRang]) plt.xlabel('Potential [V$_{NHE}$]') plt.ylabel('log J$_k$ [A/cm$^2_{Pt,Pd}$]') plt.title("Tafel") # plt.show() return act_low, act_high
def trendline( x, y, polyfit_degree ): tl = np.polyfit( x, y, polyfit_degree ) tlp = pl.poly1d( tl ) return x, tlp(x)
y = [4, 8, 7, 7, 7, 4, 11, 9, 10, 3] # x tick labels labels = [ 'Feb-19', 'Mar-19', 'Apr-19', 'Jun-19', 'Aug-19', 'Sep-19', 'Oct-19', 'Nov-19', 'Dec-19', 'Feb-20' ] fig = plt.figure() # plotting the graph plt.plot(x, y) plt.scatter(x, y, color='black') plt.xticks(x, labels, rotation=90) # substitutes x tick labels for x numbers # add regression line from pylab import polyfit, poly1d fit = polyfit(x, y, 2) fit_fn = poly1d(fit) plt.plot(x, fit_fn(x), color='red') # naming the x axis plt.xlabel('Month') # naming the y axis plt.ylabel('Volunteers') # giving a title to my graph plt.title('Volunteers per Month in 2019') # function to show the plot plt.show() #fig.savefig('lineplot.pdf')
def conversionGainFrame(imglist, quadrant): """Determine the conversion gain for a given quadrant. A spatial analysis is done, using all pixels in a quadrant. The mean-variance difference method is used, i.e. the differences between four pairs of flat images are used, each pairs exposure time is twice that of the previous pair. Variance = 2*Nread**2 + Signal1 + Signal2 Will plot both linear and log plots including mean sum vs. variance difference, mean sum vs variance difference - readnoise**2 and a fit to slop for gain in e-/ADU see http://www.noao.edu/kpno/manuals/whirc/WHIRC_VI_Mean-variance_101026.pdf by Dick Joyce, 2010 Args: imglist (list): File names of image pairs of increasing exposure time quadrant (int): Quandrant being analyzed. Returns: gain, readnoise (float): Gain in e-/ADU and readnoise in e- Example: g, rn = detector.conversionGainFrame([q1_0s, q1_0s, q1_2s, q1_2s, q1_4s, q1_4s], 3) """ # run through each channel means = np.zeros(len(imglist)/2) varis = np.zeros(len(imglist)/2) #process pairs of images for i in range(0, len(imglist), 2): im1 = imglist[i] im2 = imglist[i+1] #get sum of means som = summean(im1, im2) #get variance of difference) vod = diffvar(im1, im2) means[i/2] = som varis[i/2] = vod print 'means:', means print 'variances: ', varis fit = pl.polyfit(means, varis, 1) print fit fit_fn = pl.poly1d(fit) print 'y intercept', fit_fn(0.0) eADU = 1.0/fit[0] print "e-/ADU = {0}".format(str(eADU)) fig1 = plt.figure(figsize=(10,10)) plt.loglog(means, varis, 'o', means, fit_fn(means), '--') plt.legend(['Channel '+str(quadrant), str(round(eADU,3))+' e-/ADU'], loc=9) plt.xlabel('Mean') plt.ylabel('Variance') plt.title('Conversion Gain, Quadrant {0}'.format(str(quadrant))) plt.show() fig2 = plt.figure(figsize=(10,10)) plt.plot(means, varis, 'o', means, fit_fn(means), '--') plt.legend(['Channel '+str(quadrant), str(round(eADU,3))+' e-/ADU'], loc=9) plt.xlabel('Mean') plt.ylabel('Variance') plt.title('Conversion Gain, Quadrant {0}'.format(str(quadrant))) plt.show()
def plot(s, filepath): print("Plotting schedule.") print(s.to_pretty_string()) for line in s.runs: plots = [] runs = s.runs_by_date(line) fig = plt.figure() ax = fig.add_subplot(111) # To make a stacked bar graph, we need to create an # array for each i batch num_batches_per_run = [] for r in runs: num_batches_per_run.append(len(r.batches)) max_batches = max(num_batches_per_run) batches_to_plot = [] # init array with empty lists which will be filled with batch info for i in range(0,max_batches): batches_to_plot.append([]) for i in range(0, max_batches): for r in runs: if i >= len(r.batches): # pad with zeros if there is no entry batches_to_plot[i].extend([0]) continue # add ith batch of r to ith row in batches_to_plot b = r.batches[i] qty = int(b.expected_quantity) batches_to_plot[i].extend([qty]) # qty must be a list because extend tries to iterate over elements, and ints are not iterable #set up formatting N = len(runs) # max days recorded per schedule x_pos = np.arange(N) # column placing width = 0.6 # how wide the bars will appear cols = ['r', 'b', 'g','y', '#D4D4D4', '#551A8B', '#EE82EE', '#FF6103', '#FFCC11', '#CDD704'] col = 1 # represents the colour of the batches xticks = dates_to_weekday(s, line) # convert the dates to weekdays plt.xticks(x_pos+width/2., np.asarray(xticks)) plt.ylabel('Run Total') plt.xlabel('Date of Production') plt.title("Production Schedule for "+ line + " -- "+s.date) bottom = np.zeros(N,) # build graph and print data print("Building graph ("+line+")") bars = [] batch_labels = [value for sublist in batches_to_plot for value in sublist] # turns lsit of lists into a flat lsit of values for i, b in enumerate(batches_to_plot): bars.append(ax.bar(x_pos, np.asarray(b), width, bottom=bottom, color = cols[i % len(cols)])) # add the batch value to the offset of the bar positions bottom += b # add labels for j in range(len(bars)): for i, bar in enumerate(bars[j].get_children()): bl = bar.get_xy() x = 0.5*bar.get_width() + bl[0] y = 0.5*bar.get_height() + bl[1] if not batches_to_plot[j][i] == 0: ax.text(x,y, "%d" % (batches_to_plot[j][i]), ha='center', va = 'center') # trend line x = x_pos y = np.array([r.expected_total for r in s.runs[line]]) fit = polyfit(x,y,1) fit_fn = poly1d(fit) trendline = plt.plot(x,y, 'ro', x, fit_fn(x), '--k', linewidth=2) # save figures to appropriate directories if not os.path.exists(filepath): os.makedirs(filepath) filename = line + '.pdf' if os.path.exists(filepath + filename): choice = input(filename + " already exists. Overwrite? (y/n) ") if choice == 'y': os.remove(filepath+filename) print("Overwriting...") else: continue plt.savefig(filepath + filename) print("Report complete ({0})".format(s.date))
def Circular(): fig = pl.figure() ax = fig.add_subplot(111) ax.set_xscale("log") ax.set_yscale("log") ax.set_xlim([9e-2, 1e2]) # Elastic limit Roell_list = pl.logspace(pl.log10(0.1), pl.log10(50), 20) ax.plot([Roell_list[0], Roell_list[-1]], [1 / 3, 1 / 3], "k", linewidth=3.0, alpha=0.2) # Numerical results tf_list = [] te_list = [] for Roell in Roell_list: ell = 1 / Roell te, tf = find_data(["material.ell", "problem.rho", "problem.hsize"], [ell, 1, 1e-3]) te_list.append(te) tf_list.append(tf) ax.plot(Roell_list, tf_list, "bo-") # label="Num." ind = int(len(Roell_list) / 2.5) coeff = pl.polyfit(pl.log(Roell_list[ind:-ind]), pl.log(tf_list[ind:-ind]), 1) poly = pl.poly1d(coeff) fit = pl.exp(poly(pl.log(Roell_list[ind:-ind]))) print("The slope = %.2e" % (coeff[0])) pl.loglog(Roell_list[ind:-ind], fit, "k-", linewidth=3.0, alpha=0.2) # Experimental results E = 3e3 sigc = 72 Gc = 290e-3 ell = 3 / 8 * Gc * E / sigc**2 print ell ell = 26e-3 data = pl.loadtxt("literature/exp.csv", delimiter=",") Roell_exp = data[:, 0] / (2 * ell) Roell_exp[0] = Roell_list[0] sig_center = (pl.amin(data[:, 1:], 1) + pl.amax(data[:, 1:], 1)) / 2 err1 = -(pl.amin(data[:, 1:], 1) - sig_center) / sigc err2 = (pl.amax(data[:, 1:], 1) - sig_center) / sigc pl.errorbar(Roell_exp, sig_center / sigc, yerr=[err1, err2], label="Exp.", fmt="g.") # Numerical results from C. Kuhn # data = loadtxt("literature/Kuhn.csv", delimiter=",") # # ell = 0.00885 # Roell_Kuhn = data[:, 0]/ell # ax.plot(Roell_Kuhn, data[:, 1], "r>-", label="Kuhn") pl.ylim([1.0 / 4.0, 1.1]) pl.xlabel("Relative hole size $R/\ell$") pl.ylabel("$\sigma/\sigma_0$ at fracture") pl.legend(loc="best") pl.savefig("circular_tf.pdf", bbox_inches='tight')
def plot(s, filepath): print("Plotting schedule.") print(s.to_pretty_string()) for line in s.runs: plots = [] runs = s.runs_by_date(line) fig = plt.figure() ax = fig.add_subplot(111) # To make a stacked bar graph, we need to create an # array for each i batch num_batches_per_run = [] for r in runs: num_batches_per_run.append(len(r.batches)) max_batches = max(num_batches_per_run) batches_to_plot = [] # init array with empty lists which will be filled with batch info for i in range(0, max_batches): batches_to_plot.append([]) for i in range(0, max_batches): for r in runs: if i >= len(r.batches): # pad with zeros if there is no entry batches_to_plot[i].extend([0]) continue # add ith batch of r to ith row in batches_to_plot b = r.batches[i] qty = int(b.expected_quantity) batches_to_plot[i].extend( [qty] ) # qty must be a list because extend tries to iterate over elements, and ints are not iterable #set up formatting N = len(runs) # max days recorded per schedule x_pos = np.arange(N) # column placing width = 0.6 # how wide the bars will appear cols = [ 'r', 'b', 'g', 'y', '#D4D4D4', '#551A8B', '#EE82EE', '#FF6103', '#FFCC11', '#CDD704' ] col = 1 # represents the colour of the batches xticks = dates_to_weekday(s, line) # convert the dates to weekdays plt.xticks(x_pos + width / 2., np.asarray(xticks)) plt.ylabel('Run Total') plt.xlabel('Date of Production') plt.title("Production Schedule for " + line + " -- " + s.date) bottom = np.zeros(N, ) # build graph and print data print("Building graph (" + line + ")") bars = [] batch_labels = [ value for sublist in batches_to_plot for value in sublist ] # turns lsit of lists into a flat lsit of values for i, b in enumerate(batches_to_plot): bars.append( ax.bar(x_pos, np.asarray(b), width, bottom=bottom, color=cols[i % len(cols)])) # add the batch value to the offset of the bar positions bottom += b # add labels for j in range(len(bars)): for i, bar in enumerate(bars[j].get_children()): bl = bar.get_xy() x = 0.5 * bar.get_width() + bl[0] y = 0.5 * bar.get_height() + bl[1] if not batches_to_plot[j][i] == 0: ax.text(x, y, "%d" % (batches_to_plot[j][i]), ha='center', va='center') # trend line x = x_pos y = np.array([r.expected_total for r in s.runs[line]]) fit = polyfit(x, y, 1) fit_fn = poly1d(fit) trendline = plt.plot(x, y, 'ro', x, fit_fn(x), '--k', linewidth=2) # save figures to appropriate directories if not os.path.exists(filepath): os.makedirs(filepath) filename = line + '.pdf' if os.path.exists(filepath + filename): choice = input(filename + " already exists. Overwrite? (y/n) ") if choice == 'y': os.remove(filepath + filename) print("Overwriting...") else: continue plt.savefig(filepath + filename) print("Report complete ({0})".format(s.date))
def plot3(): """ Compute and plot data for Plot 3. """ global pool g = "grid2" X = numpy.arange(1e-12, 8.1, 1 if DEBUG else 0.1) E = [] Eerr = [] EL = [] V = [] VL = [] S = [] for d in X: # We will skip Eppstein on large values because it takes way too long if DEBUG and d >= 4: d = 5.1 if d < 5.1: data = plot3F((g, "e:f", d)) E.append(data[0]) EL.append(data[2]) else: E.append(float('inf')) EL.append(float('inf')) data = pool.map_async( plot3F, map(lambda _: (g, "r:f:c:0.15", d), xrange(RUNS))).get(99999999) v, suc, vl = zip(*data) VL.append(max(vl)) V.append(numpy.mean(v)) S.append(numpy.mean(suc)) matplotlib.pyplot.clf() axTime = matplotlib.pyplot.subplots()[1] axLength = axTime.twinx() l1, = axTime.plot(X, E, "b-.") V = numpy.array(V) l2, = axTime.plot(X, V, "g--") ELi = EL.index(float('inf')) ELfit = pylab.poly1d(pylab.polyfit(X[:ELi], EL[:ELi], 1))(X) l3, = axLength.plot(X, EL, "m.") axLength.plot(X, ELfit, "m") VLfit = pylab.poly1d(pylab.polyfit(X, VL, 1))(X) l4, = axLength.plot(X, VL, "r+") axLength.plot(X, VLfit, "r") axTime.set_ylabel("Time (s)") axTime.set_ylim([0, 25]) axLength.set_ylabel("Length") axLength.set_ylim([0, 7]) axTime.set_xlabel("Minimum Diversity Required") matplotlib.pyplot.xlim([0, 8]) matplotlib.pyplot.legend( (l1, l2, l3, l4), ('Eppstein time', 'Voss time', 'Eppstein max length', 'Voss max length'), 'upper left') matplotlib.pyplot.savefig("plot3.png") return
def main(): # make sure we have a valid file to use if( len(sys.argv) <= 1 ): print("usage: %s data.csv threads_per_block"%sys.argv[0]) exit() try: dataFile = open( sys.argv[1] ) except IOError: print("Error opening file %s"%sys.argv[1]) exit() title = '' averages = [] pcs = [] line = dataFile.readline().strip().split(',') title = line[0] blocks = map(int, line[1:-1]) threads = [] for line in dataFile.xreadlines(): data = line.strip().split(',') averages.append([]) threads.append(int(data[0])) for val in data[1:-1]: averages[-1].append(np.float64(val)) fit0_m = [] fit0_b = [] fit1_m = [] fit1_b = [] fit1_c = [] fit2_m = [] fit2_b = [] fit2_c = [] for threads_per_block_to_plot in range( 64, 1024+1, 32 ): print("Fitting: %i"%(threads_per_block_to_plot)) avg_idx = threads.index(threads_per_block_to_plot) # Linear fit for < drop drop_idx = next(idx for idx,val in enumerate(blocks) if val >= (2**(14.00))/threads_per_block_to_plot) rise_idx = next(idx for idx,val in enumerate(blocks) if val >= (2**(16.56))/threads_per_block_to_plot) fit0_x = blocks[:drop_idx] m0,b0 = polyfit( fit0_x, averages[avg_idx][:drop_idx], 1 ) fit0_y = poly1d( (m0,b0) )(fit0_x) # Log fit for drop < x < rise fit1_x = np.array([ float(x) for x in blocks[drop_idx:rise_idx] ]) #generate weights for x weight1 = [ (x if x>1 else 1) for x in [ (blocks[i] - blocks[i-1])/10.0 for i in range(drop_idx, rise_idx) ] ] # fit to log C1 = drop_idx-1 B1 = 1 fit1_x_log = [ np.log(B1*(x-C1)) for x in fit1_x ] m1,b1 = polyfit( fit1_x_log, averages[avg_idx][drop_idx:rise_idx], 1, w=weight1 ) fit1_y = poly1d( (m1,b1) )(fit1_x_log) # Log fit for > rise fit2_x = np.array([ float(x) for x in blocks[rise_idx:] ]) #generate weights for x weight2 = [ (x if x>1 else 1) for x in [ (blocks[i] - blocks[i-1])/10.0 for i in range(rise_idx, len(blocks)) ] ] # fit to log C2 = rise_idx-1 B2 = 1 fit2_x_log = [ np.log(B2*(x-C2)) for x in fit2_x ] m2,b2 = polyfit( fit2_x_log, averages[avg_idx][rise_idx:], 1, w=weight2 ) fit2_y = poly1d( (m2,b2) )(fit2_x_log) # save fit data fit0_m.append(m0) fit0_b.append(b0) fit1_m.append(m1) fit1_b.append(b1) fit1_c.append(C1) fit2_m.append(m2) fit2_b.append(b2) fit2_c.append(C2) # create a new plot to use fig = plt.figure(figsize=figure_size) ax = fig.add_subplot( 111 ) # line at 16*1024 threads ax.axvline(x=drop_idx, label='2^14 threads') ax.axvline(x=rise_idx, label='2^16.56 threads') # scatter plot ax.scatter( blocks, averages[avg_idx] ) # fits if show_eqs: ax.plot( fit0_x, fit0_y, label="16384 threads fit: %f*x+%f"%(m0,b0) ) ax.plot( fit1_x, fit1_y, label="Log fit: %f*ln(x-%f)+%f"%(m1, C1, b1) ) ax.plot( fit2_x, fit2_y, label="Log fit: %f*ln(x-%f)+%f"%(m2, C2, b2) ) ax.legend(loc='lower right') else: ax.plot( fit0_x, fit0_y ) ax.plot( fit1_x, fit1_y ) ax.plot( fit2_x, fit2_y ) ax.xaxis.label.set_size(font_size) ax.yaxis.label.set_size(font_size) ax.legend(loc='lower right', prop={'size':font_size}) ax.tick_params(axis='both', which='major', labelsize=font_size) ax.tick_params(axis='both', which='minor', labelsize=font_size) ax.legend(loc='lower right', prop={'size':30}) ax.set_xlabel( 'Number of Blocks' ) ax.set_ylabel( 'Average Sync Cost' ) ax.set_ylim( [ 0, y_lim ] ) #ax.get_ylim()[1] ] ) ax.set_xlim( [ 0, 2000 ] ) fig.savefig("%s_%i.png" % (sys.argv[1][:-4], threads_per_block_to_plot)) #plt.show(block=False) #plt.pause(0.5) # Now we have the data for each fit, so fit that # Linear fit fig = plt.figure(figsize=figure_size) ax = fig.add_subplot( 111 ) ax.scatter( threads, fit0_m ) ax.set_xlabel( 'Number of Threads/Blocks' ) ax.set_ylabel( 'Slope for first linear fit' ) fig.savefig("%s_%s%i.png" % (sys.argv[1][:-4], "m", 0)) fig = plt.figure(figsize=figure_size) ax = fig.add_subplot( 111 ) ax.scatter( threads, fit0_b ) ax.set_xlabel( 'Number of Threads/Blocks' ) ax.set_ylabel( 'Intercept for first linear fit' ) fig.savefig("%s_%s%i.png" % (sys.argv[1][:-4], "b", 0)) # Log fit fig = plt.figure(figsize=figure_size) ax = fig.add_subplot( 111 ) ax.scatter( threads, fit1_m ) ax.set_xlabel( 'Number of Threads/Blocks' ) ax.set_ylabel( 'Slope for log fit' ) fig.savefig("%s_%s%i.png" % (sys.argv[1][:-4], "m", 1)) fig = plt.figure(figsize=figure_size) ax = fig.add_subplot( 111 ) ax.scatter( threads, fit1_b ) ax.set_xlabel( 'Number of Threads/Blocks' ) ax.set_ylabel( 'Intercept for log fit' ) fig.savefig("%s_%s%i.png" % (sys.argv[1][:-4], "b", 1)) fig = plt.figure(figsize=figure_size) ax = fig.add_subplot( 111 ) ax.scatter( threads, fit1_c ) ax.set_xlabel( 'Number of Threads/Blocks' ) ax.set_ylabel( 'L1size for log fit' ) # Log fit fig = plt.figure(figsize=figure_size) ax = fig.add_subplot( 111 ) ax.scatter( threads, fit2_m ) ax.set_xlabel( 'Number of Threads/Blocks' ) ax.set_ylabel( 'Slope for log fit' ) fig.savefig("%s_%s%i.png" % (sys.argv[1][:-4], "m", 2)) fig = plt.figure(figsize=figure_size) ax = fig.add_subplot( 111 ) ax.scatter( threads, fit2_b ) ax.set_xlabel( 'Number of Threads/Blocks' ) ax.set_ylabel( 'Intercept for log fit' ) fig.savefig("%s_%s%i.png" % (sys.argv[1][:-4], "b", 2)) fig = plt.figure(figsize=figure_size) ax = fig.add_subplot( 111 ) ax.scatter( threads, fit2_c ) ax.set_xlabel( 'Number of Threads/Blocks' ) ax.set_ylabel( 'L1size for log fit' ) fig.savefig("%s_%s%i.png" % (sys.argv[1][:-4], "c", 2))
if not y_scores_recall.has_key(k): y_scores_recall[k] = [] y_scores_recall[k].append(v.mean()) for k,v in all_f1[key].items(): if not y_scores_f1.has_key(k): y_scores_f1[k] = [] y_scores_f1[k].append(v.mean()) x_entropy_all = x_entropy['all'] x_entropy.pop('all') x_entropy_std.pop('all') # 感觉熵与精度之间存在线性相关 fit = pylab.polyfit(x_entropy_all,y_scores_all,1) fit_fn = pylab.poly1d(fit) print "熵与总体分类精度之间的相关系数:", pearsonr(x_entropy_all, y_scores_all) from scipy.optimize import curve_fit def func(x,a,b): return a*np.log(b*x) popt,pcov = curve_fit(func, x_size, x_entropy_all) #y_fit = np.exp(popt[0]*x) # 整体上 plt.figure() plt.subplot(132) plt.plot(x_size, y_scores_all,'o-') plt.xlabel('Sample Size') plt.ylabel('Total Accuracy')