def test_double_func(self): """ Test that it can double the input """ data = 2 result = func2(data) self.assertEqual(result, 6)
import functions as func# print(func.argstodic(name="subbu",age=50)) print(func.concatlist([1,2,3],[4,5,6],[7,8,9])) print(func.firstlast([14,2,4])) print(func.double_the_args("subbu", 87, 74, "maya")) #assume inputs are numbers and may be more than one print(func.two_times_all_args(1,4,7,8)) print(func.args_to_list("rt", 9, 3,"sf")) print(func.kwargs_to_dic(a=933, b="jdhfd", c=90, l=3323)) print(func.is_there_empty_list([1,2,3,4],[],[234])) print(func.func2()) func3 = func.func2() print(func3()) print(func.list_to_tuple([1,2,3,4,5])) # here built-in object is being passed print(func.name_of_object(object)) func.print_decimals(0,10,0.1,1)
#!/usr/bin/python from functions import func3 from functions import func2 print func2('Jane', 22)
import functions #functions.world() functions.func1() functions.func2()
def degreeProb(self, raw=False, binned=False, pRaw=False, pBinned=False, plotTheory=False, plotRatio=False, checkSum=False, plot=False, plotType1='o', plotType2='o-', plotType3='-', log=False): """ Calculates the probabilities of getting degree values k, p(k) for all graphs. Also log-bins the data.""" K = [] KProbs = [] KCounts = [] expectedCounts = [] graphsCentres = [] graphsProbs = [] theoryData = [] if raw: # Raw probs calculated only is specified for i in range(len(self.sortedDegrees)): k = [] probs = [] averageCounts = [] degrees = np.sort( self.sortedDegrees[i] ) # Getting the list of degrees for the current graph size for x in degrees: # Going in ascending order of the degrees list if x not in k: k.append(x) count = 0 for y in degrees: if y == x: count += 1 # Counting how many times the current degree value occurs in the list elif count >= 1: break # Since the list is sorted in ascending order, if the count is >= 1 and isn't increasing further, there are no more degrees with this value else: pass # If the degree value hasn't been found yet, keep scanning if count == 0: print "COUNT IS ZERO" prob = count / float( len(degrees) ) # Dividing the counted occurrences of all k values by the total number of degrees in the graph averageCounts.append( count / self.loops ) # Calculating the average counts number per degree by diving but the number of loops probs.append(prob) else: pass k = np.asarray( k, dtype='int64' ) # Converting the lists to numpy arrays for easier manipulation averagecounts = np.asarray(averageCounts, dtype='float64') probs = np.asarray(probs, dtype='float64') K.append(k) KCounts.append(averageCounts) KProbs.append(probs) N = self.graphNs[i] if self.random: theoryProbs = np.array([ funcs.func2(float(k), float(self.graphMs[i])) for k in K[i] ], dtype='float64') else: theoryProbs = np.array([ funcs.func1(float(k), float(self.graphMs[i])) for k in K[i] ], dtype='float64') theoryCounts = N * theoryProbs # Calculating the theoretically expected degree counts expectedCounts.append(theoryCounts) # Converting the lists to numpy arrays for easier manipulation K = np.asarray(K) KCounts = np.asarray(KCounts) expectedCounts = np.asarray(expectedCounts) KProbs = np.asarray(KProbs) if checkSum: # Checking to see if all the probabilities add up to 1 ie they are normalised for all system sizes for probs in KProbs: summation = np.sum(probs) print "Summation: ", summation if binned: # Binned probs calculated only is specified for i in range(len(self.sortedDegrees)): degrees = self.sortedDegrees[ i] # Getting the list of avalanche sizes for the current system size centres, binProbs = lb.log_bin( degrees, bin_start=float(self.graphMs[i]), a=self.a, datatype='integer' ) # Calculating the log-binned probs using the log_bin_CN_2016.py module graphsCentres.append(centres) graphsProbs.append(binProbs) for x in range(len(graphsCentres) ): # Calculating the theoretical probability values if self.random: theoryValues = np.array([ funcs.func2(float(k), float(self.graphMs[x])) for k in graphsCentres[x] ], dtype='float64') else: theoryValues = np.array([ funcs.func1(float(k), float(self.graphMs[x])) for k in graphsCentres[x] ], dtype='float64') theoryData.append(theoryValues) if plot: # Plotting the probs p(k) against degree values k on log-log plots (if specified) xLab = r"$k$" title = "Degree Size Probability vs Degree Size" if pRaw and not pBinned: # Plotting just the raw data yLab = r"$p(k)$" legend = ['Raw for m = ' + str(i) for i in self.graphMs] if self.graphNs[0] != self.graphNs[-1]: legend = ['Raw for N = ' + str(i) for i in self.graphNs] plotTypes = [plotType1 for i in range(len(KProbs))] pt.plot(xData=K, yData=KProbs, plotType=plotTypes, xLab=xLab, yLab=yLab, title=title, legend=legend, multiX=True, multiY=True, loc=1, log=log) elif pBinned and not pRaw: # Plotting just the binned data yLab = r"$\tildep(k)$" legend = ['Binned for m = ' + str(i) for i in self.graphMs] if self.graphNs[0] != self.graphNs[-1]: legend = ['Binned for N = ' + str(i) for i in self.graphNs] plotTypes = [plotType1 for i in range(len(graphsProbs))] if plotTheory: legend1 = [ 'Binned for m = ' + str(i) for i in self.graphMs ] legend2 = [ 'Theoretical for m = ' + str(i) for i in self.graphMs ] if self.graphNs[0] != self.graphNs[-1]: legend1 = [ 'Binned for N = ' + str(i) for i in self.graphNs ] legend2 = [ 'Theoretical for N = ' + str(i) for i in self.graphNs ] plotTypes1 = [plotType1 for i in range(len(graphsProbs))] plotTypes2 = [plotType3 for i in range(len(graphsProbs))] plt.figure(figsize=(12, 10)) pt.plot(xData=graphsCentres, yData=graphsProbs, plotType=plotTypes1, xLab=xLab, yLab=yLab, title=title, legend=legend1, multiX=True, multiY=True, loc=1, log=log, figure=False) pt.plot(xData=graphsCentres, yData=theoryData, plotType=plotTypes2, xLab=xLab, yLab=yLab, title=title, legend=legend2, multiX=True, multiY=True, loc=1, log=log, figure=False) plt.grid() elif plotRatio: # Plotting the ratio of raw to theory probs yLab = r"$p_{d}(k)/p_{t}(k)$" legend = ['Binned for m = ' + str(i) for i in self.graphMs] if self.graphNs[0] != self.graphNs[-1]: legend = [ 'Binned for N = ' + str(i) for i in self.graphNs ] plotTypes = [plotType2 for i in range(len(graphsProbs))] ratios = [] for i in range(len(graphsProbs)): ratio = graphsProbs[i] / theoryData[i] ratios.append(ratio) plt.figure(figsize=(12, 10)) plt.axhline(y=1, linewidth=2, color='k') pt.plot(xData=graphsCentres, yData=ratios, plotType=plotTypes, xLab=xLab, yLab=yLab, title=title, legend=legend, multiX=True, multiY=True, loc=1, log=log, figure=False) else: legend = ['Binned for m = ' + str(i) for i in self.graphMs] if self.graphNs[0] != self.graphNs[-1]: legend = [ 'Binned for N = ' + str(i) for i in self.graphNs ] plotTypes = [plotType2 for i in range(len(graphsProbs))] pt.plot(xData=graphsCentres, yData=graphsProbs, plotType=plotTypes, xLab=xLab, yLab=yLab, title=title, legend=legend, multiX=True, multiY=True, loc=1, log=log) elif pRaw and pBinned: # Plotting both the raw data and binned data yLab = r"$p(k)$" legend = [ 'Raw for m = ' + str(self.graphMs[-1]), 'Binned for m = ' + str(self.graphMs[-1]) ] plotTypes = [plotType1, plotType2] pt.plot(xData=[K[-1], graphsCentres[-1]], yData=[KProbs[-1], graphsProbs[-1]], plotType=plotTypes, xLab=xLab, yLab=yLab, title=title, legend=legend, multiX=True, multiY=True, loc=1, log=log) return K, KProbs, KCounts, expectedCounts, graphsCentres, graphsProbs, theoryData # Returning both the raw and binned data
def dataCollapse(self, graphCentres, graphProbs, plotType='o-', log=False): """ Completely collapses the probabilities calculated by rescaling the vertical and horizontal axis """ scaledGraphCentres = [] scaledGraphCentres2 = [] scaledGraphProbs = [] for i in range( len(graphCentres )): # Calculating the reciprocal of the theoretical probs centres = graphCentres[i] probs = graphProbs[i] if self.random: factors = np.array([ 1. / (funcs.func2(float(k), float(self.graphMs[i]))) for k in centres ], dtype='float64') else: factors = np.array([ 1. / (funcs.func1(float(k), float(self.graphMs[i]))) for k in centres ], dtype='float64') m = float(self.graphMs[i]) N = float(self.graphNs[i]) if self.random: # Calculating the theoretical k1 value scaling = funcs.func4(N, m) else: scaling = funcs.func3(N, m) scaledCentres = centres / scaling # Scaling the horizontal axis as k / k1 scaledProbs = probs * factors # Scaling the vertical axis as p_data / p_theory scaledGraphCentres.append(scaledCentres) scaledGraphProbs.append(scaledProbs) # Plotting the collapsed data xLab = r"$k / k_{1}$" xLab2 = r"$k$" yLab = r"$p_{data}(k) / p_{theory}(k)$" title = "Scaled Degree Size Probability vs Scaled Degree Size (DATA COLLAPSE)" legend = ['Scaled Binned for N = ' + str(i) for i in self.graphNs] plotTypes = [plotType for i in range(len(scaledProbs))] # Partially collapsing pt.plot(xData=graphCentres, yData=scaledGraphProbs, plotType=plotTypes, xLab=xLab2, yLab=yLab, title=title, legend=legend, multiX=True, multiY=True, loc=1, log=log) # Fully collapsing pt.plot(xData=scaledGraphCentres, yData=scaledGraphProbs, plotType=plotTypes, xLab=xLab, yLab=yLab, title=title, legend=legend, multiX=True, multiY=True, loc=1, log=log)
def std(self, optimize=False, plot=False, plotScaled=False, plotType1='o', plotType2='-', log=False): """ Calculates the STD of the height of the pile once in the steady state for all systems. Also produces an optimised fit through the points""" STDs = [] t0 = int(self.tMax - self.N) T = self.tMax - t0 # Setting the number of data points to use for calculating the stds lowerLimit = t0 + 1 # The lower index for the heights list to scan upperLimit = t0 + T # The upper index for the heights list to scan for i in range(len(self.systemHeights)): heights = self.systemHeights[i] steady = heights[lowerLimit:upperLimit + 1] firstTerm = np.sum(steady**2) / float(T) # Calculating <h^2> secondTerm = (np.sum(steady) / float(T))**2 # Calculating <h>^2 STD = np.sqrt( firstTerm - secondTerm) # Substracting the two terms and taking the sqrt STDs.append(STD) # Converting the list to numpy array to easier manipulation STDs = np.asarray(STDs, dtype='float64') if optimize: # The STDs are optimised using a fit function "a0 * (L**D)" (two smalllest system sizes ignored to avoid corrections to scaling) popt, pcov = curve_fit(funcs.func2, self.systemSizes[2:], STDs[2:]) a0, D = popt[0], popt[ 1] # Getting the paramaters from the optimised fit scaledSTDs = np.array( STDs / (self.systemSizes**float(D)), dtype='float64') # Scaling the STDs by dividing by L^D if plot: # Plotting STDs vs L xLab = r"$L$" yLab = r"$\sigma_{h}(L)$" title = "Average Height STD of Pile in Steady State vs System Size" legend = "Average Height STDs" if optimize: # Also plotting the optimised fit curve through the numerical data point legend = [ "Average Height STDs", "Optimized Fit Function " r"$a_0L^D$" ] plotTypes = [plotType1, plotType2] optimizeData = [ i for i in range(self.systemSizes[0], self.systemSizes[-1] + 1) ] pt.plot(xData=[self.systemSizes, optimizeData], yData=[STDs, funcs.func2(optimizeData, a0, D)], plotType=plotTypes, xLab=xLab, yLab=yLab, \ title=title, legend=legend, multiX=True, multiY=True, log=log) else: pt.plot(xData=self.systemSizes, yData=STDs, plotType='o-', xLab=xLab, yLab=yLab, title=title, legend=legend, log=log) if plotScaled: # Plotting scaled STDs vs L xLab = r"$L$" yLab = r"$\sigma_{h}(L)/L^{D}$" title = "(Average Height STD)/" r"$L^{D}$" " vs System Size" legend = "Scaled Average Height STDs" pt.plot(xData=self.systemSizes, yData=scaledSTDs, plotType='o-', xLab=xLab, yLab=yLab, title=title, legend=legend, log=log) if optimize: return STDs, scaledSTDs, a0, D # Also returning fit parameters else: return STDs # Only returning calculated STDs