def seng_greet(): import subprocess as s print('Welcome, Michael.') s.call('date') #Menu# print('-' * 30) print(' M A I N - M E N U') print('-' * 30) print('1.Source') print('2.Item list') print('3.The Overseer') print('-' * 30) #Recieve user input# user_input = input('Enter your choice [1-3]: ') #Checks input and runs accordingly# import source as src if user_input == 1: src.source() elif user_input == 2: s.call(['cat','items.txt']) elif user_input == 3: import cont_menu as cntrl cntrl.cntrl_Menu() elif user_input == int('q'): print('Exiting..') else: print('THAT IS NOT AN OPTION!') seng_greet()
def setUp(self): self.mm = ManagerMock() self.mserv = self.mm.meta.getServer(1) testconfig = config.Config(None, source.source.default_config) testconfig.source.database = ":memory:" # As it is hard to create the read only config structure from # hand use a spare one to steal from spare = config.Config(None, source.source.default_config) testconfig.__dict__['game:tf'] = spare.generic testconfig.__dict__['game:tf'].name = "Team Fortress 2" testconfig.__dict__['game:tf'].teams = [ "Lobby", "Spectator", "Blue", "Red" ] testconfig.__dict__['game:tf'].serverregex = re.compile( "^\[A-1:123\]$") testconfig.__dict__['game:tf'].servername = "Test %(game)s %(server)s" self.s = source.source("source", self.mm, testconfig) self.mm.s = self.s # Since we don't want to run threaded if we don't have to # emulate startup to the derived class function self.s.onStart() self.s.connected() # Critical test assumption self.assertEqual(self.mm.metaCB['callback'], self.s) self.assertEqual(self.mm.serverCB['callback'], self.s)
def testDefaultConfig(self): self.resetState() mm = ManagerMock() INVALIDFORCEDEFAULT = "" s = source.source("source", mm, INVALIDFORCEDEFAULT) self.assertNotEqual(s.cfg(), None)
def setUp(self): self.mm = ManagerMock(); self.mserv = self.mm.meta.getServer(1) testconfig = config.Config(None, source.source.default_config) testconfig.source.database = ":memory:" # As it is hard to create the read only config structure from # hand use a spare one to steal from spare = config.Config(None, source.source.default_config) testconfig.__dict__['game:tf'] = spare.generic testconfig.__dict__['game:tf'].name = "Team Fortress 2" testconfig.__dict__['game:tf'].teams = ["Lobby", "Spectator", "Blue", "Red"] testconfig.__dict__['game:tf'].serverregex = re.compile("^\[A-1:123\]$") testconfig.__dict__['game:tf'].servername = "Test %(game)s %(server)s" self.s = source.source("source", self.mm, testconfig) self.mm.s = self.s # Since we don't want to run threaded if we don't have to # emulate startup to the derived class function self.s.onStart() self.s.connected() # Critical test assumption self.assertEqual(self.mm.metaCB['callback'], self.s) self.assertEqual(self.mm.serverCB['callback'], self.s)
def selection_attribute(req, selectionId, attribute, attributeId): get_object_or_404(Selection, pk=selectionId) if attribute in selection_tags: return tag(req, attributeId) elif attribute == 'source': return source(req, attributeId) elif attribute == 'quotations': return quotation(req, attributeId) else: raise Http404
def __init__(self, filename): self.source_list = list() try: for line in open(filename, 'r'): data = line.strip().lower().split() ngram = '{0} {1}'.format(data[0], data[1]) year = data[2] match_count = data[3] page_count = data[4] src = source.source(ngram, year, match_count, page_count) self.source_list.append(src) except IOError as e: #file not found print "I/O error({0}): {1}".format(e.errno, e.strerror) sys.exit()
def onStartConfig(self,event): numOfNodes = 20 for i in range(numOfNodes): # initialize nodes argv = {} argv['ID'] = i argv['src'] = i argv['des'] = numOfNodes - 1 n = source(argv) self.nodes.append(n) for i in range(numOfNodes-1): # the last node as the sink for t in pacGenerator(100*20,1,math.ceil(self.nodes[i].getPacInterval()/3)): e = initPacket(t,i,numOfNodes) self.eventList.append(e) print 'Start Config'
def build(self, setsources, setcategs): self.vb.call("hist", "build", [self], "Building the histograms.") self.sources = [source.source(self.mypaf, s) for s in setsources] self.categs = setcategs self.h = [] for s in self.sources: appender = [] for c in self.categs: hc = copy.deepcopy(self.parent) hc.SetName(s.name + ":=" + c + ":=" + self.name) appender.append(hc) self.h.append(appender) self.built = True
def onStartConfig(self, event): numOfNodes = 20 for i in range(numOfNodes): # initialize nodes argv = {} argv['ID'] = i argv['src'] = i argv['des'] = numOfNodes - 1 n = source(argv) self.nodes.append(n) for i in range(numOfNodes - 1): # the last node as the sink for t in pacGenerator( 100 * 20, 1, math.ceil(self.nodes[i].getPacInterval() / 3)): e = initPacket(t, i, numOfNodes) self.eventList.append(e) print 'Start Config'
def applyArgs(self, pad = 0): self.vb.call("hist", "applyArgs", [self, pad], "Applying the arguments of this instance.") ## reset source (for hists in schemes, they only have one hist and source) if self.alist.has("source") and len(self.sources) == 1: self.sources = None self.sources = [source.source(self.mypaf, self.alist.get("source"))] ## load default style if given if self.alist.has("style"): self.defaults = self.db.getRow("hstyles", "name == '" + self.alist.get("style") + "'") #self.dlist.set(" ## apply style stuff self.applyGrid () self.applyLog (pad) self.applyErrors() self.applyNorm () self.applyDigits()
def build(self, setsources, setcategs): self.vb.call("hist", "build", [self], "Building the histograms.") self.sources = [source.source(self.mypaf, s) for s in setsources] self.categs = setcategs self.h = [] for s in self.sources: appender = [] for c in self.categs: hc = copy.deepcopy(self.parent) #hc = rstuff.copyTH1(self.parent) hc.SetName(s.name + ":=" + c + ":=" + self.name) appender.append(hc) self.h.append(appender) self.built = True self.normalized = [[False for cidx in range(len(self.categs))] for sidx in range(len(self.sources))] self.drawn = [[False for cidx in range(len(self.categs))] for sidx in range(len(self.sources))]
def gamma(ai, ciso, di, dn, dt, edep, edei, eden, engi, engp, engn, frac, fracis, gdoti, gdotn, grphi, grphii, grphin, grplo, grploi, grplon, gdot, gtime, gcal, hz, i1, ifun, ndei, nden, ngroup, ngrpi, ngrpn, ntrani, rdt, siz, sn, sor, totali, totaln, trani, peak): cay = 1.0 dt = .000000001 dt2 = dt * 0.5 gcal = gcal / siz total = 0.0 if ((i1 <= 1) or (i1 > 2)): #GO TO(10, 160), I1 - STOPS AT 160 if ( (ifun == 2) ): #GO TO (30, 20, 30, 30), IFUN #10 #Colonel Fee mentioned IFUN will always be 1 ifun2 = ifun ifun = 1 #20 ###GO TO 40 #if (ncal == 1): #30 #Colonel Fee mentioned IFUN will always be 1 so we need to jump to 40 i = 2 #40 while (i < 250): #ENDS AT 50 t1 = i * dt #Original Fortran had T * DT... I think it was supposed to be I * DT??? t2 = (i - 1) * dt s1 = source.source(t1) s2 = source.source(t2) i += 1 #end while loop if ((s1 - s2) >= 0): #go to 50 if 0 or positive. Go to 60 if negative. #CONTINUE #50 sys.exit("Error occured. ifun is: {0}.".format( ifun)) #Print an error, then "STOP" peak = s2 #60 cay = gcal / peak if ((ifun2 - 2) == 0): peak2 = peak #70 ifun = ifun2 if (ncal == 2): cay = 1.0 #75 i = 1 while (i < 250): #80 t = (i - 0.5) * dt total = total + source.source(t) * dt i += 1 #end while loop #90 cay = gcal / total #end last two if statements #100 ig = 1 i = 1 t = 1 * dt2 t1 = i * dt gtime[ i - 1] = t1 #Original Fortran code had gtime(i). Fortran arrays start at 1. gdot[i - 1] = source.source( t1) #Original Fortran code had gdot(i). Fortran arrays start at 1. total = source.source(t) * dt i = 2 while (i < 250): inn = i t = (i - 0.5) * dt t1 = i * dt gtime[ i - 1] = t1 #Original Fortran code had gtime(i). Fortran arrays start at 1. gdot[i - 1] = source.source( t1 ) #Original Fortran code had gdot(i). Fortran arrays start at 1. total = total + source.source(t) * dt if ((ig == 1) or (ig < 1) or (ig > 2)): if ( (gdot[i - 1] - gdot[i - 2]) < 0 ): #110 #Original Fortran code had gdot(i) - dgot(i-1). Fortray arrays start at 1. peak = gdot[ i - 2] #120 #Original Fortran code had gdot(i-1). Fortran arrays start at 1. ig = 2 i += 1 #end while loop & if statements #130 ndot = inn #PRINT 420, peak, total print("Peak: ", peak, "Total: ", total) i = 1 while (i < 50): n1 = i n2 = i + 50 n3 = i + 100 n4 = 1 + 150 n5 = i + 200 print( i, gdot[n1 - 1], gdot[n2 - 1], gdot[n3 - 1], gdot[n4 - 1], gdot[n5 - 1] ) #Original Fortran code had gdot(n1), gdot(n2), etc. Fortran arrays start at 1. i += 1 #CONTINUE/end while loop #140 k = 1 while (k < ngroup): n1 = 2 * k n2 = 2 * k + 1 engp[n1 - 1] = grplo[ k - 1] #Changed the arrays in this line and next 3 lines to be (x-1) because Fortran arrays start at 1. engp[n2 - 1] = grphi[k - 1] edep[n1 - 1] = frac[k - 1] / (grphi[k - 1] - grplo[k - 1]) edep[n2 - 1] = edep[n1 - 1] k += 1 #CONTINUE #150 engp[0] = grplo[ 0] #Original Fortran code: engp(1) = grplo(1). Fortran arrays start at 1. edep[0] = 0.0 #Changed edep(1) to edep(0) ndep = 2 * ngroup + 2 engp[ndep - 1] = grphi[ ngroup - 1] #Changed arrays from x to x-1 because Fortran arrays start at 1. edep[ ndep - 1] = 0.0 #Changed array from x to x-1 because Fortran arrays start at 1. #GO TO (170, 310), I2 if (i2 == 1): #160 rdt = math.exp(tablin.tablin(rad, 2, nrad, hob, 1) / stdrho) #170 rdt = 2.0 / (rdt * dt) m = 1 while (m < ngrpn): sor[m - 1] = dn[ m - 1] #180 #Changed sor(m) = dn(m) because Fortran arrays start at 1. m += 1 i = 1 while (i < 250): taut = i * dt #if neg go to 210, if 0 or pos go to 190 if ((taut - tspeak) >= 0): #CALL SOURCEN #190 sourcen.sourcen() k = 1 while (k < ngroup): gdotn[i - 1] = sn[k - 1] + gdotn[ i - 1] #200 #Changed (x) to (x-1) because Fortran arrays start at 1 k += 1 #end while loop/CONTINUE #210 i += 1 #end while loop and if statement rdt = 0.02 * rdt summ = 0.0 m = 1 while (m < ngrpn - 1): dn[m - 1] = sor[ m - 1] #220 #Changed dn(m) = sor(m) because Fortran arrays start at 1 m += 1 #CALL SOURCEN sourcen.sourcen() m = 1 while (m < ngrpn - 1): dn[m - 1] = ( sor[m - 1] + dn[m - 1] ) * 0.5 #Changed dn(m) = (sor(m) + dn(m)) because Fortran arrays start at 1 k = 1 while ( (k - 1) < (ngroup - 1) ): #Changed ngrpn to ngrpn-1 and (k) to (k-1)because Fortran arrays start at 1 summ = summ + hz[k - 1][m - 1] * dn[ m - 1] * 0.00000005 #230 #(x-1) instead of (x) to all arrays because Fortran arrays start at 1 k += 1 m += 1 #CONTINUE #240 i = 1 while (i < 1000): #CALL SOURCEN sourcen.sourcen() k = 1 while ( (k - 1) < (ngroup - 1) ): #Changed k and ngroup to (k-1) and (ngroup-1) because Fortran arrays start at 1 summ = summ + sn[ k - 1] * 0.00000005 #250 #Changed sn(k) because Fortran arrays start at 1 k += 1 i += 1 #CONTINUE #260 totalg = summ m = 1 while (m < ngrpn): dn[m - 1] = sor[ m - 1] #changed m to m-1 because Fortran arrays start at 1. sor[m - 1] = 0.0 #270 #Changed m to m-1 because Fortran arrays start at 1. m += 1 summ = 0.0 m = 1 while (m < ngrpn): summ = summ + (grphin[m - 1] + grplon[m - 1]) * 0.5 * dn[ m - 1] #280 #Changed m to m-1 because Fortran arrays start at 1. m += 1 totaln = summ #PRINT 440, TOTALN, TOTALG print("totaln is: ", totaln, ". totalg is: ", totalg) i = 1 while (i < 50): n1 = i n2 = i + 50 n3 = i + 100 n4 = i + 150 n5 = i + 200 print(i, gdotn[n1], gdotn[n2], gdotn[n3], gdotn[n4], gdotn[n5]) i += 1 #CONTINUE #290 m = 1 while (m < ngrpn): n1 = 2 * m n2 = n1 + 1 engn[n1 - 1] = grplon[ m - 1] #Changed all array locations until end of while loop from x to x-1 because Fortran arrays start at 1. engn[n2 - 1] = grphin[m - 1] eden[n1 - 1] = (grplon[m - 1] + grphin[m - 1]) * 0.5 * dn[m] / ( (grphin[m - 1] - grplon[m - 1]) + totaln) eden[n2 - 1] = eden[n1 - 1] m += 1 #CONTINUE #300 engn[0] = grplon[ 0] #original Fortran code: engn(1) = grplon(1) (Fortran arrays start at 1) eden[ 0] = 0.0 #original Fortran code: eden(1) (Fortran arrays start at 1) nden = 2 * ngrpn + 2 engn[nden - 1] = grphin[ ngrpn - 1] #original Fortran: engn(nden) = grphin(ngrpn) (Fortran arrays start at 1) eden[ nden - 1] = 0.0 #original Fortran code: eden(nden) (Fortran arrays start at 1) #GO TO (320, 400), I3 #310 if ((i3 < 2) or (i3 > 2)): #go to end of code if none of these fit i = 1 while (i < 250): #320 t = i * dt tiso = t - ciso if ((t - ciso) >= 0): summ = 0.0 #330 l = 1 while (l < niso): summ = summ + ai[l - 1] * math.exp(-tiso / di[0]) * tablin( trani, 2, ntrani, tiso, 2 ) #340 #Changed di(1) to di(0) because Fortran arrays start at 1 gdoti[ i - 1] = summ #Changed gdoti(i) to (i-1) because Fortran arrays start at 1 l += 1 i += 1 #CONTINUE/ends while & if & while #350 dt = 0.00000001 summ = 0.0 i = 1 while (i < 1000): t = (i - 0.5) * dt l = 1 while (l < niso): summ = summ + ai[l - 1] * dt * math.exp( -t / di[l - 1] ) * TABLIN( trani, 2, ntrani, t, 2 ) #360 #Changed ai(l) and di(l) because Fortran arrays start at 1. l += 1 i += 1 #CONTINUE - ends previous two while loops #370 totali = summ #PRINT 450, TOTALI print("totali is: ", totali) i = 1 while (i < 50): n1 = i n2 = i + 50 n3 = i + 100 n4 = i + 150 n5 = i + 200 print( i, gdoti[n1 - 1], gdoti[n2 - 1], gdoti[n3 - 1], gdoti[n4 - 1], gdoti[n5 - 1] ) #Changed all array locations from (x) to (x-1) because Fortran arrays start at 1 i += 1 #CONTINUE #380 k = 1 while (k < ngrphi): n1 = 2 * k n2 = n1 + 1 engi[n1 - 1] = grploi[ k - 1] #original Fortran code: engi(n1) = grploi(k) (Fortran arrays start at 1) engi[n2 - 1] = grphii[ k - 1] #same as above for next few lines until end of while loop edei[n1 - 1] = fracis[k - 1] / (graphii[k - 1] - grploi[k - 1]) edei[n2 - 1] = edei[n1 - 1] k += 1 #CONTINUE #390 engi[0] - grploi[ 0] #original Fortran code: engi(1) - grploi(1) (Fortran arrays start at 1) edei[ 0] = 0.0 #original Fortran code: edei(1) (Fortran arrays start at 1) ndei = 2 * ngrpi + 2 engi[ndei - 1] = grphii[ ngrphi - 1] # (x-1) in the array index because Fortran arrays start at 1 edei[ndei - 1] = 0.0
def _extractSources(self, in_filename, in_FITS_im): ''' run sExtractor on images and check source output. returns a list of source instances (w/ unpopulated reference catalogue fields). ''' sExCat = sExCatalogue(self.err, self.logger) catdata = sExCat.query(in_filename, self.params['resPath'], self.params['sExConfFile'], ccdSizeX=int(self.params['CCDSizeX']), ccdSizeY=int(self.params['CCDSizeY']), fieldMargin=int(self.params['fieldMargin']), appendToCat=False, hard=True) # ...and check the output ## check number of catalogue sources != 0 (i.e. empty catalogue returned) if catdata is None: self.err.setError(9) self.err.handleError() return None ## check number of catalogue sources numExtSources = catdata.nrows self.logger.info("(pipeline._extractSources) " + str(numExtSources) + " legit sources in image (" + str(int(self.params['minSources'])) + ")") if numExtSources < int(self.params['minSources']): self.err.setError(4) self.err.handleError() return None ## check maximum elongation of sources elongation = round(max(catdata["ELONGATION"].tonumpy()), 2) self.logger.info( "(pipeline._extractSources) Max elongation in image is " + str(elongation) + " (" + str(self.params['maxElongation']) + ")") if elongation > float(self.params['maxElongation']): self.err.setError(5) self.err.handleError() return None ## check excess kurtosis of object angle exKurtosis = round(stats.kurtosis(catdata["THETA_IMAGE"].tonumpy()), 2) self.logger.info( "(pipeline._extractSources) Kurtosis of object angle is " + str(exKurtosis) + " (" + str(float(self.params['maxExKurtosis'])) + ")") if exKurtosis > float(self.params['maxExKurtosis']): self.err.setError(6) self.err.handleError() return None ## combined check of kurtosis and elongation sourcesCombCheck = 0 for idx in range(catdata.nrows): if catdata['ELONGATION'][idx] > float( self.params['maxCombElongation'] ) and catdata['KURTOSIS'][idx] > float( self.params['maxCombExKurtosis']): sourcesCombCheck += 1 self.logger.info( "(pipeline._extractSources) Number of objects failing combined elongation/kurtosis constraint is " + str(sourcesCombCheck) + " (" + str(float(self.params['maxSourcesCombCheck'])) + ")") if sourcesCombCheck > float(self.params['maxSourcesCombCheck']): self.err.setError(7) self.err.handleError() return None ## check maximum flux flux = max(catdata['FLUX_MAX'].tonumpy()) self.logger.info( "(pipeline._extractSources) Maximum flux in catalogue is " + str(flux) + " (" + str(self.params['maxFlux']) + ")") if flux > float(self.params['maxFlux']): self.err.setError(8) self.err.handleError() return None # parse sExtractor catalogue sExCat = sExCatalogue(self.err, self.logger) sExCat.read(in_filename) sources = [] # create a list of source instances from each sExtracted source # with Nonetype cross-match catalogue variables for i in range(len(sExCat.RA)): sources.append( source(in_filename, sExCat.RA[i], sExCat.DEC[i], sExCat.X_IMAGE[i], sExCat.Y_IMAGE[i], sExCat.FLUX_AUTO[i], sExCat.FLUXERR_AUTO[i], sExCat.MAG_AUTO[i], sExCat.MAGERR_AUTO[i], sExCat.BACKGROUND[i], sExCat.ISOAREA_WORLD[i], sExCat.FLAGS[i], sExCat.FWHM_WORLD[i], sExCat.ELONGATION[i], sExCat.ELLIPTICITY[i], sExCat.THETA_IMAGE[i])) return sources
#Dictionary Program import sys import time import source #Check for Python2 and 3 import sys if sys.version_info > (3, 0): sys.stdout.write("Sorry, Python 2.7 or lower is required for sorware to run \n") print("Exiting in Two(2) Seconds") time.sleep(1) sys.exit(2) def home(): print('''' // / // //// ~ tt ii // / // ~ tttttt // / // // ~ tt ii o o // / // // ~ t ii o o //// // //// ~ tttt ii o o ''') home() source.source()
def run(N, step, modifier): row_tot, col_tot = int(np.round(np.sqrt(N/10)/step)), \ int(np.round(np.sqrt(N/10)/step)) #row_tot, col_tot = 8,8 Dx, Dy = row_tot * step, col_tot * step box_tot = row_tot * col_tot k = 2 * np.pi / (step * row_tot) P = int(np.ceil( k * np.sqrt(2) * step)) * modifier # proportional to box diameter * k Q = 2 * P + 1 # source creation box_list = [[] for i in range(box_tot)] src_list = [] #src_list = [source.source(0.3, 0.5, 1),source.source(3.4,3.5,1)]#source.source(3.5, 3.3, 1)] for i in range(N): src_list.append(source.source(Dx * np.random.random(), Dy * \ np.random.random(), np.random.random())) # Map src to nearest lower left grid pnt src_list[i].grid = np.array([int(np.floor(src_list[i].x/step)), \ int(np.floor(src_list[i].y/step))]) src_list[i].idx = utils.coord2idx(src_list[i].grid[0], \ src_list[i].grid[1], col_tot) # compute c2m vector in cyl src_list[i].rho, src_list[i].theta = utils.cart2cyl((src_list[i].grid[0] +\ 0.5)*step - src_list[i].x, (src_list[i].grid[1] + 0.5)*step - src_list[i].y) # contains source idxs in each box box_list[src_list[i].idx].append(i) interactions = interaction.interaction(box_tot, col_tot, row_tot, \ src_list, box_list) interactions.fill_lists() fast = time.clock() #fast time #Calculate Multipoles alpha_list = np.array([i for i in np.arange(0, 2 * np.pi, 2 * np.pi / Q)]) C2M_list = [[] for i in range(box_tot)] for box_idx in range(box_tot): for i, alpha in enumerate(alpha_list): val = 0 for src_idx in box_list[box_idx]: src = src_list[src_idx] val += np.exp(np.complex(0,1) * k * src.rho * \ np.cos(alpha - src.theta)) * src.weight C2M_list[box_idx].append(val) #M2L M2L_list = [[0 for i in range(box_tot)] for i in range(box_tot)] for obs_box_idx in range(box_tot): obs_x, obs_y = np.array(utils.idx2coord(obs_box_idx, col_tot)) * step for src_box_idx in interactions.list[obs_box_idx]: vals = [] for alpha in alpha_list: src_x, src_y = np.array(utils.idx2coord(src_box_idx, col_tot)) * step x, y = obs_x - src_x, obs_y - src_y sep_rho, sep_theta = utils.cart2cyl(x, y) val = 0 for p in np.arange(-P, P + 1, 1): val += funcs.hankel1(p, k*sep_rho) * np.exp(-np.complex(0,1) *\ p * (sep_theta - alpha - np.pi/2)) vals.append(val) M2L_list[obs_box_idx][src_box_idx] += np.array(vals) #L2O L2O_list = [[] for i in range(N)] for i, src in enumerate(src_list): for alpha in alpha_list: L2O_list[i].append(np.exp(np.complex(0,1) * k * src.rho * \ np.cos(alpha - (src.theta + np.pi)))) # interactions pot = np.array([np.complex(0, 0) for i in range(N)]) for obs_box_idx in range(box_tot): C2L_list = [] for i, src_box_idx in enumerate(interactions.list[obs_box_idx]): # translates from sources to local multipole C2L_list.append(M2L_list[obs_box_idx][src_box_idx] * \ C2M_list[src_box_idx]) for i, obs_idx in enumerate(box_list[obs_box_idx]): C2O_list = [L2O_list[obs_idx] * C2L for C2L in C2L_list] pot[obs_idx] = np.sum(C2O_list) / Q # near interactions near_pot = interactions.compute_near(obs_box_idx, k) for i, obs_idx in enumerate(box_list[obs_box_idx]): pot[obs_idx] += near_pot[i] fast = time.clock() - fast # TESTING slow = time.clock() src_idxs = [i for i in range(N)] G = interactions.build_G(src_idxs, src_idxs, k) weights = np.array([src.weight for src in src_list]) # slow = time.clock() test_pot = np.dot(G, weights) slow = time.clock() - slow error = (lg.norm(pot) - lg.norm(test_pot)) / lg.norm(test_pot) print('N: ', N, 'Modifier: ', modifier, 'Step: ', step) print('error: ', error) print('Slow Time: ', slow) print('Fast Time: ', fast) return (error, slow, fast)
import typer from source import source from datetime import datetime from output import output_dir # Start a typer app # https://github.com/tiangolo/typer app = typer.Typer() # Imports list of posts from worklog file. posts = source() def lines_start_with(substring): # Return list of lines that start with a substring # return [l for l in lines if l.startswith(substring)] lines_found = [] for post in posts: for line in post: if line.startswith(substring): lines_found.append(line) return lines_found def concat(list): # Return string of concatenated list str objects return ''.join(list) @app.command() def to_do(sort_due: bool = typer.Option(False,
def foodSecurity(data): f.food(data.getBibliography(), data.getGeneralCitizen()) s.source(data.getGeneralCitizen(), data.getLocalLeaders()) cc.corralCrop(data.getLocalLeaders(), data.getFarmyardCrop()) con.continuity(data.getLocalLeaders(), data.getComunalServices())
def runSimulation(dataRate): numOfNodes = 41 nodes = [] for i in range(numOfNodes): # initialize nodes argv = {} argv['ID'] = i argv['src'] = i argv['des'] = numOfNodes - 1 n = source(argv) nodes.append(n) eventList = [] #for i in range(numOfNodes-1): # nodes[i].setPacInterval(dataRate) ''' for i in range(numOfNodes-1): # the last node as the sink if i < 5: for t in pacGenerator(math.ceil(nodes[i].getPacInterval()),1,2000): print t e = initPacket(t,i,numOfNodes) eventList.append(e) else: for t in pacGenerator(math.ceil(nodes[i].getPacInterval()),1,21000,20000): print t e = initPacket(t,i,numOfNodes) eventList.append(e) ''' for i in range(numOfNodes-1): if i < 10: t = random.randint(1800,2200) else: t = random.randint(300000,310000) e = initPacket(t,i,numOfNodes) eventList.append(e) min_t = 0 data = [] for i in range(numOfNodes): dataEach = [] data.append(dataEach) time = [] while True: #print min_t if min_t%100 < 0.15: print min_t time.append(min_t) for i in range(numOfNodes-1): data[i].append(nodes[i].getPacInterval()) if not eventList: break elif min_t > nodes[0].getPacInterval()*200: break else: min_index, min_t = min(enumerate(e.time for e in eventList),key=operator.itemgetter(1)) newList = action(eventList[min_index],nodes) eventList.pop(min_index) for n in newList: eventList.append(n) #for d in data: # writeResult(d,'result.csv') statSuc = [] statAll = [] statDelay = [] statEnergy = [] aveH = [] #print 'Average Packet Delay for each node. (Unit: ms).' for i in range(numOfNodes-1): #nodes[i].setPacInterval((60+j*20)*20) # For Figure 1 yes,num = nodes[i].getPacStat() statSuc.append(yes) statAll.append(num) statDelay.append(nodes[i].getDelayStat()) statEnergy.append(nodes[i].getEnergyStat()) # End # For Figure 2 d = data e = time # End #print nodes[i].getDelayStat()*4/250000.0*1000 #nodes[i].printEnergyStat() #print nodes[i].getChannelIndicators() #print nodes[i].getPacInterval() #print nodes[1].getPacInterval() #print sum(statSuc)/float(sum(statAll)) #h = float(nodes[i].getPacInterval())/20.0 #aveH.append(h) #print h #print 'Average Packet Sucessful Rate for each node. (%)' #for i in range(numOfNodes-1): # yes,num = nodes[i].getPacStat() # print yes/float(num)*100 #print numpy.mean(aveH) #print nodes[1].getDelayStat() #print sum(statSuc)/float(sum(statAll))/(nodes[1].getEnergyStat()/h)**8/h # 10000 is just to amplify the num # the following are for Figure 1 a = sum(statSuc)/float(sum(statAll))/nodes[1].getPacInterval() b = sum(statDelay)/float(numOfNodes-1) c = sum(statEnergy)/float(numOfNodes-1) # End # the following are for Figure 2 # End return d,e
def getsource(imgname): pixiv_url = 'https://www.pixiv.net/member_illust.php?mode=medium&illust_id=' twitter_url = 'https://www.twitter.com' danbooru_url = 'https://danbooru.donmai.us/posts' # Try for a Pixiv match (ex: 28022143_p0.jpg) m = re.match(r'(\d{7,9})_?p(\d{1,3})', imgname) if m: img_id = m.group(1) url = pixiv_url + img_id page = m.group(2) return source('pixiv', url=url, img_id=img_id, img_name=imgname, page=page) # Try for a mobile Pixiv match (ex: illust_51503473_20170620_040102.jpg) m = re.match(r'illust_(\d{7,9})_\d{8}_\d{6}', imgname) if m: img_id = m.group(1) url = pixiv_url + img_id return source('pixiv', url=url, img_id=img_id, img_name=imgname, page='?') # Try for a Twitter match (ex. twitter-HitenKei-885477467594530816) m = re.match(r'twitter-(\w+)-(\d+)', imgname) if m: artist = m.group(1) img_id = m.group(2) url = '{}/{}/status/{}'.format(twitter_url, artist, img_id) return source('twitter', url=url, artist=artist, img_id=img_id, img_name=imgname) # Try for a Danbooru match (ex. danbooru-none-2789021) m = re.match(r'danbooru-(\w+)-(\w+)', imgname) if m: artist = m.group(1) img_id = m.group(2) url = '{}/{}'.format(danbooru_url, img_id) return source('danbooru', url=url, artist=artist, img_id=img_id, img_name=imgname) # Try for an 'other' match (ex. other-website-artist-111111) m = re.match(r'other-(\w+)-(\w+)-(\w+)', imgname) if m: return source('other', \ url=m.group(1), \ artist=m.group(2), \ img_id=m.group(3), \ img_name=imgname) # if there is no match, then the file doesn't have a recorded source return source(None, img_name=imgname)
def run(level_cnt, grid_step, N, eps): grid_dim = 2**(level_cnt-1) # Should remain power of two for easy life src_list = [] for i in range(N): src_list.append(source.source(grid_dim * np.random.random(),grid_dim *\ np.random.random(), np.random.random())) # Map src to nearest lower left grid pnt src_list[i].grid = (int(np.floor(src_list[i].x/grid_step)), \ int(np.floor(src_list[i].y/grid_step))) print("Building Tree...") my_tree = tree.tree(src_list, level_cnt) my_tree.build() print("Filling Interaction Lists...") interactions = interaction.interaction(level_cnt, my_tree) interactions.fill_list() leaf_start = 2**(2*(level_cnt-1)) leaf_end = 2*leaf_start for obs_idx in range(leaf_start, leaf_end): for src_idx in range(leaf_start, leaf_end): G = interactions.build_G(my_tree.tree[obs_idx], \ my_tree.tree[src_idx]) if (my_tree.tree[src_idx] == []) or (my_tree.tree[obs_idx] == []): U, V = np.array([]), np.array([]) else: U,V = utils.uv_decompose(G, eps) srcs = np.array([src_list[i] for i in my_tree.tree[src_idx]]) obs_ids = my_tree.tree[obs_idx] src_vec = np.array([src.weight for src in srcs]) interactions.src_vecs[obs_idx][src_idx] = src_vec interactions.obs_vecs[obs_idx] = obs_ids interactions.uv_list[obs_idx][src_idx] = (U,V) print('Computing UV Decompositions...') for lvl in range(level_cnt-2, 1, -1): lb = 2**(2*lvl) ub = 2*lb for obs_idx in range(lb,ub): for src_idx in interactions.list[obs_idx]: # for src_idx in range(lb,ub): n = my_tree.get_children(obs_idx,lvl) #rows of merging m = my_tree.get_children(src_idx,lvl) #cols of merging uv = [[0,0],[0,0]] # index as [row][col] for i in range(2): for j in range(2): U1, V1 = interactions.uv_list[n[2*i]][m[2*j]] U2, V2 = interactions.uv_list[n[2*i+1]][m[2*j]] U3, V3 = interactions.uv_list[n[2*i]][m[2*j+1]] U4, V4 = interactions.uv_list[n[2*i+1]][m[2*j+1]] U12,V12 = utils.merge(U1, V1, U2, V2, eps) U34,V34 = utils.merge(U3, V3, U4, V4, eps) # Horizontal merge uv[i][j] = utils.merge(U12, V12, U34, V34, eps, 1) Um1,Vm1 = utils.merge(uv[0][0][0], uv[0][0][1],\ uv[1][0][0], uv[1][0][1], eps) Um2,Vm2 = utils.merge(uv[0][1][0], uv[0][1][1], \ uv[1][1][0], uv[1][1][1], eps) src_vec = np.array([]) obs_ids = [] for box_idx in m: srcs = np.array([src_list[i] for i in my_tree.tree[box_idx]]) src_vec = np.hstack((src_vec, np.array([src.weight \ for src in srcs]))) for box_idx in n: obss = np.array([src_list[i] for i in my_tree.tree[box_idx]]) obs_ids = obs_ids + my_tree.tree[box_idx] U,V = utils.merge(Um1, Vm1, Um2, Vm2, eps, 1) interactions.src_vecs[obs_idx][src_idx] = src_vec interactions.obs_vecs[obs_idx] = obs_ids interactions.uv_list[obs_idx][src_idx] = (U, V) fast_time = 0 print("Computing Fast Interactions...") for obs_box_idx in range(len(interactions.list)): obs_srcs_near = my_tree.tree[obs_box_idx] obs_srcs_far = interactions.obs_vecs[obs_box_idx] obs_pot_near = np.zeros(len(obs_srcs_near)) obs_pot_far = np.zeros(len(obs_srcs_far)) for src_box_idx in interactions.list[obs_box_idx]: # src_srcs = my_tree.tree[src_box_idx] src_vec = interactions.src_vecs[obs_box_idx][src_box_idx] # src_vec = np.array([src_list[idx].weight for idx in src_srcs]) U, V = interactions.uv_list[obs_box_idx][src_box_idx] if np.size(U) != 0: s = time.clock() obs_pot_far += np.dot(U, np.dot(V, src_vec)) fast_time += time.clock() - s #near field interacitons obs_pot_near += interactions.compute_near(obs_box_idx) for i, obs in enumerate(obs_srcs_near): s = time.clock() interactions.potentials[obs] += obs_pot_near[i] fast_time += time.clock() - s for i, obs in enumerate(obs_srcs_far): s = time.clock() interactions.potentials[obs] += obs_pot_far[i] fast_time += time.clock() - s #Direct Computation print("Computing Direct Interactions...") idxs = [i for i in range(N)] G = interactions.build_G(idxs, idxs) src_vec = np.array([src.weight for src in src_list]) s = time.clock() direct_potentials = np.dot(G, src_vec) slow_time = time.clock() - s # error = (lg.norm(interactions.potentials) - lg.norm(direct_potentials))\ / lg.norm(direct_potentials) print('Error: ', error) print('Fast Time: ', fast_time) print('Slow Time: ', slow_time) return(fast_time, slow_time, error) ## old testing code but saving it just incase ### #lvl = 2 #obs_idx = 16 #src_idx = 25 #n = my_tree.get_children(obs_idx,lvl) #rows of merging #m = my_tree.get_children(src_idx,lvl) #cols of merging #rank = 1 #uv = [[0,0],[0,0]] # index as [row][col] #for i in range(2): # for j in range(2): # print(i,j) # U1, V1 = interactions.uv_list[n[2*i]][m[2*j]] # U2, V2 = interactions.uv_list[n[2*i+1]][m[2*j]] # U3, V3 = interactions.uv_list[n[2*i]][m[2*j+1]] # U4, V4 = interactions.uv_list[n[2*i+1]][m[2*j+1]] # # U12,V12 = utils.merge(U1, V1, U2, V2, eps) # U34,V34 = utils.merge(U3, V3, U4, V4, eps) # # Horizontal merge # uv[i][j] = utils.merge(U12, V12, U34, V34, eps, 1) # #Um1,Vm1 = utils.merge(uv[0][0][0], uv[0][0][1],\ # uv[1][0][0], uv[1][0][1], eps) #Um2,Vm2 = utils.merge(uv[0][1][0], uv[0][1][1], \ # uv[1][1][0], uv[1][1][1], eps) # #U,V = utils.merge(Um1, Vm1, Um2, Vm2, eps, 1)